2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_platform.h>
36 #include <linux/dma-mapping.h>
38 #include <linux/spinlock.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/slab.h>
42 #include <crypto/algapi.h>
43 #include <crypto/aes.h>
44 #include <crypto/des.h>
45 #include <crypto/sha.h>
46 #include <crypto/md5.h>
47 #include <crypto/aead.h>
48 #include <crypto/authenc.h>
49 #include <crypto/skcipher.h>
50 #include <crypto/hash.h>
51 #include <crypto/internal/hash.h>
52 #include <crypto/scatterwalk.h>
56 static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
58 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
59 talitos_ptr->eptr = upper_32_bits(dma_addr);
63 * map virtual single (contiguous) pointer to h/w descriptor pointer
65 static void map_single_talitos_ptr(struct device *dev,
66 struct talitos_ptr *talitos_ptr,
67 unsigned short len, void *data,
69 enum dma_data_direction dir)
71 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
73 talitos_ptr->len = cpu_to_be16(len);
74 to_talitos_ptr(talitos_ptr, dma_addr);
75 talitos_ptr->j_extent = extent;
79 * unmap bus single (contiguous) h/w descriptor pointer
81 static void unmap_single_talitos_ptr(struct device *dev,
82 struct talitos_ptr *talitos_ptr,
83 enum dma_data_direction dir)
85 dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
86 be16_to_cpu(talitos_ptr->len), dir);
89 static int reset_channel(struct device *dev, int ch)
91 struct talitos_private *priv = dev_get_drvdata(dev);
92 unsigned int timeout = TALITOS_TIMEOUT;
94 setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
96 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
101 dev_err(dev, "failed to reset channel %d\n", ch);
105 /* set 36-bit addressing, done writeback enable and done IRQ enable */
106 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
107 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
109 /* and ICCR writeback, if available */
110 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
111 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
112 TALITOS_CCCR_LO_IWSE);
117 static int reset_device(struct device *dev)
119 struct talitos_private *priv = dev_get_drvdata(dev);
120 unsigned int timeout = TALITOS_TIMEOUT;
121 u32 mcr = TALITOS_MCR_SWR;
123 setbits32(priv->reg + TALITOS_MCR, mcr);
125 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
130 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
131 setbits32(priv->reg + TALITOS_MCR, mcr);
135 dev_err(dev, "failed to reset device\n");
143 * Reset and initialize the device
145 static int init_device(struct device *dev)
147 struct talitos_private *priv = dev_get_drvdata(dev);
152 * errata documentation: warning: certain SEC interrupts
153 * are not fully cleared by writing the MCR:SWR bit,
154 * set bit twice to completely reset
156 err = reset_device(dev);
160 err = reset_device(dev);
165 for (ch = 0; ch < priv->num_channels; ch++) {
166 err = reset_channel(dev, ch);
171 /* enable channel done and error interrupts */
172 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
173 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
175 /* disable integrity check error interrupts (use writeback instead) */
176 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
177 setbits32(priv->reg + TALITOS_MDEUICR_LO,
178 TALITOS_MDEUICR_LO_ICE);
184 * talitos_submit - submits a descriptor to the device for processing
185 * @dev: the SEC device to be used
186 * @ch: the SEC device channel to be used
187 * @desc: the descriptor to be processed by the device
188 * @callback: whom to call when processing is complete
189 * @context: a handle for use by caller (optional)
191 * desc must contain valid dma-mapped (bus physical) address pointers.
192 * callback must check err and feedback in descriptor header
193 * for device processing status.
195 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
196 void (*callback)(struct device *dev,
197 struct talitos_desc *desc,
198 void *context, int error),
201 struct talitos_private *priv = dev_get_drvdata(dev);
202 struct talitos_request *request;
206 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
208 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
209 /* h/w fifo is full */
210 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
214 head = priv->chan[ch].head;
215 request = &priv->chan[ch].fifo[head];
217 /* map descriptor and save caller data */
218 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
220 request->callback = callback;
221 request->context = context;
223 /* increment fifo head */
224 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
227 request->desc = desc;
231 out_be32(priv->chan[ch].reg + TALITOS_FF,
232 upper_32_bits(request->dma_desc));
233 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
234 lower_32_bits(request->dma_desc));
236 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
240 EXPORT_SYMBOL(talitos_submit);
243 * process what was done, notify callback of error if not
245 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
247 struct talitos_private *priv = dev_get_drvdata(dev);
248 struct talitos_request *request, saved_req;
252 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
254 tail = priv->chan[ch].tail;
255 while (priv->chan[ch].fifo[tail].desc) {
256 request = &priv->chan[ch].fifo[tail];
258 /* descriptors with their done bits set don't get the error */
260 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
268 dma_unmap_single(dev, request->dma_desc,
269 sizeof(struct talitos_desc),
272 /* copy entries so we can call callback outside lock */
273 saved_req.desc = request->desc;
274 saved_req.callback = request->callback;
275 saved_req.context = request->context;
277 /* release request entry in fifo */
279 request->desc = NULL;
281 /* increment fifo tail */
282 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
284 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
286 atomic_dec(&priv->chan[ch].submit_count);
288 saved_req.callback(dev, saved_req.desc, saved_req.context,
290 /* channel may resume processing in single desc error case */
291 if (error && !reset_ch && status == error)
293 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
294 tail = priv->chan[ch].tail;
297 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
301 * process completed requests for channels that have done status
303 #define DEF_TALITOS_DONE(name, ch_done_mask) \
304 static void talitos_done_##name(unsigned long data) \
306 struct device *dev = (struct device *)data; \
307 struct talitos_private *priv = dev_get_drvdata(dev); \
308 unsigned long flags; \
310 if (ch_done_mask & 1) \
311 flush_channel(dev, 0, 0, 0); \
312 if (priv->num_channels == 1) \
314 if (ch_done_mask & (1 << 2)) \
315 flush_channel(dev, 1, 0, 0); \
316 if (ch_done_mask & (1 << 4)) \
317 flush_channel(dev, 2, 0, 0); \
318 if (ch_done_mask & (1 << 6)) \
319 flush_channel(dev, 3, 0, 0); \
322 /* At this point, all completed channels have been processed */ \
323 /* Unmask done interrupts for channels completed later on. */ \
324 spin_lock_irqsave(&priv->reg_lock, flags); \
325 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
326 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
327 spin_unlock_irqrestore(&priv->reg_lock, flags); \
329 DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
330 DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
331 DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
334 * locate current (offending) descriptor
336 static u32 current_desc_hdr(struct device *dev, int ch)
338 struct talitos_private *priv = dev_get_drvdata(dev);
339 int tail = priv->chan[ch].tail;
342 cur_desc = in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
344 while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
345 tail = (tail + 1) & (priv->fifo_len - 1);
346 if (tail == priv->chan[ch].tail) {
347 dev_err(dev, "couldn't locate current descriptor\n");
352 return priv->chan[ch].fifo[tail].desc->hdr;
356 * user diagnostics; report root cause of error based on execution unit status
358 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
360 struct talitos_private *priv = dev_get_drvdata(dev);
364 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
366 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
367 case DESC_HDR_SEL0_AFEU:
368 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
369 in_be32(priv->reg + TALITOS_AFEUISR),
370 in_be32(priv->reg + TALITOS_AFEUISR_LO));
372 case DESC_HDR_SEL0_DEU:
373 dev_err(dev, "DEUISR 0x%08x_%08x\n",
374 in_be32(priv->reg + TALITOS_DEUISR),
375 in_be32(priv->reg + TALITOS_DEUISR_LO));
377 case DESC_HDR_SEL0_MDEUA:
378 case DESC_HDR_SEL0_MDEUB:
379 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
380 in_be32(priv->reg + TALITOS_MDEUISR),
381 in_be32(priv->reg + TALITOS_MDEUISR_LO));
383 case DESC_HDR_SEL0_RNG:
384 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
385 in_be32(priv->reg + TALITOS_RNGUISR),
386 in_be32(priv->reg + TALITOS_RNGUISR_LO));
388 case DESC_HDR_SEL0_PKEU:
389 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
390 in_be32(priv->reg + TALITOS_PKEUISR),
391 in_be32(priv->reg + TALITOS_PKEUISR_LO));
393 case DESC_HDR_SEL0_AESU:
394 dev_err(dev, "AESUISR 0x%08x_%08x\n",
395 in_be32(priv->reg + TALITOS_AESUISR),
396 in_be32(priv->reg + TALITOS_AESUISR_LO));
398 case DESC_HDR_SEL0_CRCU:
399 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
400 in_be32(priv->reg + TALITOS_CRCUISR),
401 in_be32(priv->reg + TALITOS_CRCUISR_LO));
403 case DESC_HDR_SEL0_KEU:
404 dev_err(dev, "KEUISR 0x%08x_%08x\n",
405 in_be32(priv->reg + TALITOS_KEUISR),
406 in_be32(priv->reg + TALITOS_KEUISR_LO));
410 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
411 case DESC_HDR_SEL1_MDEUA:
412 case DESC_HDR_SEL1_MDEUB:
413 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
414 in_be32(priv->reg + TALITOS_MDEUISR),
415 in_be32(priv->reg + TALITOS_MDEUISR_LO));
417 case DESC_HDR_SEL1_CRCU:
418 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
419 in_be32(priv->reg + TALITOS_CRCUISR),
420 in_be32(priv->reg + TALITOS_CRCUISR_LO));
424 for (i = 0; i < 8; i++)
425 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
426 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
427 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
431 * recover from error interrupts
433 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
435 struct talitos_private *priv = dev_get_drvdata(dev);
436 unsigned int timeout = TALITOS_TIMEOUT;
437 int ch, error, reset_dev = 0, reset_ch = 0;
440 for (ch = 0; ch < priv->num_channels; ch++) {
441 /* skip channels without errors */
442 if (!(isr & (1 << (ch * 2 + 1))))
447 v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
448 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
450 if (v_lo & TALITOS_CCPSR_LO_DOF) {
451 dev_err(dev, "double fetch fifo overflow error\n");
455 if (v_lo & TALITOS_CCPSR_LO_SOF) {
456 /* h/w dropped descriptor */
457 dev_err(dev, "single fetch fifo overflow error\n");
460 if (v_lo & TALITOS_CCPSR_LO_MDTE)
461 dev_err(dev, "master data transfer error\n");
462 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
463 dev_err(dev, "s/g data length zero error\n");
464 if (v_lo & TALITOS_CCPSR_LO_FPZ)
465 dev_err(dev, "fetch pointer zero error\n");
466 if (v_lo & TALITOS_CCPSR_LO_IDH)
467 dev_err(dev, "illegal descriptor header error\n");
468 if (v_lo & TALITOS_CCPSR_LO_IEU)
469 dev_err(dev, "invalid execution unit error\n");
470 if (v_lo & TALITOS_CCPSR_LO_EU)
471 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
472 if (v_lo & TALITOS_CCPSR_LO_GB)
473 dev_err(dev, "gather boundary error\n");
474 if (v_lo & TALITOS_CCPSR_LO_GRL)
475 dev_err(dev, "gather return/length error\n");
476 if (v_lo & TALITOS_CCPSR_LO_SB)
477 dev_err(dev, "scatter boundary error\n");
478 if (v_lo & TALITOS_CCPSR_LO_SRL)
479 dev_err(dev, "scatter return/length error\n");
481 flush_channel(dev, ch, error, reset_ch);
484 reset_channel(dev, ch);
486 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
488 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
489 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
490 TALITOS_CCCR_CONT) && --timeout)
493 dev_err(dev, "failed to restart channel %d\n",
499 if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
500 dev_err(dev, "done overflow, internal time out, or rngu error: "
501 "ISR 0x%08x_%08x\n", isr, isr_lo);
503 /* purge request queues */
504 for (ch = 0; ch < priv->num_channels; ch++)
505 flush_channel(dev, ch, -EIO, 1);
507 /* reset and reinitialize the device */
512 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
513 static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
515 struct device *dev = data; \
516 struct talitos_private *priv = dev_get_drvdata(dev); \
518 unsigned long flags; \
520 spin_lock_irqsave(&priv->reg_lock, flags); \
521 isr = in_be32(priv->reg + TALITOS_ISR); \
522 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
523 /* Acknowledge interrupt */ \
524 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
525 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
527 if (unlikely(isr & ch_err_mask || isr_lo)) { \
528 spin_unlock_irqrestore(&priv->reg_lock, flags); \
529 talitos_error(dev, isr & ch_err_mask, isr_lo); \
532 if (likely(isr & ch_done_mask)) { \
533 /* mask further done interrupts. */ \
534 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
535 /* done_task will unmask done interrupts at exit */ \
536 tasklet_schedule(&priv->done_task[tlet]); \
538 spin_unlock_irqrestore(&priv->reg_lock, flags); \
541 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
544 DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
545 DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
546 DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
551 static int talitos_rng_data_present(struct hwrng *rng, int wait)
553 struct device *dev = (struct device *)rng->priv;
554 struct talitos_private *priv = dev_get_drvdata(dev);
558 for (i = 0; i < 20; i++) {
559 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
560 TALITOS_RNGUSR_LO_OFL;
569 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
571 struct device *dev = (struct device *)rng->priv;
572 struct talitos_private *priv = dev_get_drvdata(dev);
574 /* rng fifo requires 64-bit accesses */
575 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
576 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
581 static int talitos_rng_init(struct hwrng *rng)
583 struct device *dev = (struct device *)rng->priv;
584 struct talitos_private *priv = dev_get_drvdata(dev);
585 unsigned int timeout = TALITOS_TIMEOUT;
587 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
588 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
592 dev_err(dev, "failed to reset rng hw\n");
596 /* start generating */
597 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
602 static int talitos_register_rng(struct device *dev)
604 struct talitos_private *priv = dev_get_drvdata(dev);
606 priv->rng.name = dev_driver_string(dev),
607 priv->rng.init = talitos_rng_init,
608 priv->rng.data_present = talitos_rng_data_present,
609 priv->rng.data_read = talitos_rng_data_read,
610 priv->rng.priv = (unsigned long)dev;
612 return hwrng_register(&priv->rng);
615 static void talitos_unregister_rng(struct device *dev)
617 struct talitos_private *priv = dev_get_drvdata(dev);
619 hwrng_unregister(&priv->rng);
625 #define TALITOS_CRA_PRIORITY 3000
626 #define TALITOS_MAX_KEY_SIZE 96
627 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
629 #define MD5_BLOCK_SIZE 64
634 __be32 desc_hdr_template;
635 u8 key[TALITOS_MAX_KEY_SIZE];
636 u8 iv[TALITOS_MAX_IV_LENGTH];
638 unsigned int enckeylen;
639 unsigned int authkeylen;
640 unsigned int authsize;
643 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
644 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
646 struct talitos_ahash_req_ctx {
647 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
648 unsigned int hw_context_size;
649 u8 buf[HASH_MAX_BLOCK_SIZE];
650 u8 bufnext[HASH_MAX_BLOCK_SIZE];
654 unsigned int to_hash_later;
656 struct scatterlist bufsl[2];
657 struct scatterlist *psrc;
660 static int aead_setauthsize(struct crypto_aead *authenc,
661 unsigned int authsize)
663 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
665 ctx->authsize = authsize;
670 static int aead_setkey(struct crypto_aead *authenc,
671 const u8 *key, unsigned int keylen)
673 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
674 struct rtattr *rta = (void *)key;
675 struct crypto_authenc_key_param *param;
676 unsigned int authkeylen;
677 unsigned int enckeylen;
679 if (!RTA_OK(rta, keylen))
682 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
685 if (RTA_PAYLOAD(rta) < sizeof(*param))
688 param = RTA_DATA(rta);
689 enckeylen = be32_to_cpu(param->enckeylen);
691 key += RTA_ALIGN(rta->rta_len);
692 keylen -= RTA_ALIGN(rta->rta_len);
694 if (keylen < enckeylen)
697 authkeylen = keylen - enckeylen;
699 if (keylen > TALITOS_MAX_KEY_SIZE)
702 memcpy(&ctx->key, key, keylen);
704 ctx->keylen = keylen;
705 ctx->enckeylen = enckeylen;
706 ctx->authkeylen = authkeylen;
711 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
716 * talitos_edesc - s/w-extended descriptor
717 * @src_nents: number of segments in input scatterlist
718 * @dst_nents: number of segments in output scatterlist
719 * @dma_len: length of dma mapped link_tbl space
720 * @dma_link_tbl: bus physical address of link_tbl
721 * @desc: h/w descriptor
722 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
724 * if decrypting (with authcheck), or either one of src_nents or dst_nents
725 * is greater than 1, an integrity check value is concatenated to the end
728 struct talitos_edesc {
734 dma_addr_t dma_link_tbl;
735 struct talitos_desc desc;
736 struct talitos_ptr link_tbl[0];
739 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
740 unsigned int nents, enum dma_data_direction dir,
743 if (unlikely(chained))
745 dma_map_sg(dev, sg, 1, dir);
746 sg = scatterwalk_sg_next(sg);
749 dma_map_sg(dev, sg, nents, dir);
753 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
754 enum dma_data_direction dir)
757 dma_unmap_sg(dev, sg, 1, dir);
758 sg = scatterwalk_sg_next(sg);
762 static void talitos_sg_unmap(struct device *dev,
763 struct talitos_edesc *edesc,
764 struct scatterlist *src,
765 struct scatterlist *dst)
767 unsigned int src_nents = edesc->src_nents ? : 1;
768 unsigned int dst_nents = edesc->dst_nents ? : 1;
771 if (edesc->src_is_chained)
772 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
774 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
777 if (edesc->dst_is_chained)
778 talitos_unmap_sg_chain(dev, dst,
781 dma_unmap_sg(dev, dst, dst_nents,
785 if (edesc->src_is_chained)
786 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
788 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
791 static void ipsec_esp_unmap(struct device *dev,
792 struct talitos_edesc *edesc,
793 struct aead_request *areq)
795 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
796 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
797 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
798 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
800 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
802 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
805 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
810 * ipsec_esp descriptor callbacks
812 static void ipsec_esp_encrypt_done(struct device *dev,
813 struct talitos_desc *desc, void *context,
816 struct aead_request *areq = context;
817 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
818 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
819 struct talitos_edesc *edesc;
820 struct scatterlist *sg;
823 edesc = container_of(desc, struct talitos_edesc, desc);
825 ipsec_esp_unmap(dev, edesc, areq);
827 /* copy the generated ICV to dst */
828 if (edesc->dma_len) {
829 icvdata = &edesc->link_tbl[edesc->src_nents +
830 edesc->dst_nents + 2];
831 sg = sg_last(areq->dst, edesc->dst_nents);
832 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
833 icvdata, ctx->authsize);
838 aead_request_complete(areq, err);
841 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
842 struct talitos_desc *desc,
843 void *context, int err)
845 struct aead_request *req = context;
846 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
847 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
848 struct talitos_edesc *edesc;
849 struct scatterlist *sg;
852 edesc = container_of(desc, struct talitos_edesc, desc);
854 ipsec_esp_unmap(dev, edesc, req);
859 icvdata = &edesc->link_tbl[edesc->src_nents +
860 edesc->dst_nents + 2];
862 icvdata = &edesc->link_tbl[0];
864 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
865 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
866 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
871 aead_request_complete(req, err);
874 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
875 struct talitos_desc *desc,
876 void *context, int err)
878 struct aead_request *req = context;
879 struct talitos_edesc *edesc;
881 edesc = container_of(desc, struct talitos_edesc, desc);
883 ipsec_esp_unmap(dev, edesc, req);
885 /* check ICV auth status */
886 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
887 DESC_HDR_LO_ICCR1_PASS))
892 aead_request_complete(req, err);
896 * convert scatterlist to SEC h/w link table format
897 * stop at cryptlen bytes
899 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
900 int cryptlen, struct talitos_ptr *link_tbl_ptr)
905 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
906 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
907 link_tbl_ptr->j_extent = 0;
909 cryptlen -= sg_dma_len(sg);
910 sg = scatterwalk_sg_next(sg);
913 /* adjust (decrease) last one (or two) entry's len to cryptlen */
915 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
916 /* Empty this entry, and move to previous one */
917 cryptlen += be16_to_cpu(link_tbl_ptr->len);
918 link_tbl_ptr->len = 0;
922 link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
925 /* tag end of link table */
926 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
932 * fill in and submit ipsec_esp descriptor
934 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
936 void (*callback) (struct device *dev,
937 struct talitos_desc *desc,
938 void *context, int error))
940 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
941 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
942 struct device *dev = ctx->dev;
943 struct talitos_desc *desc = &edesc->desc;
944 unsigned int cryptlen = areq->cryptlen;
945 unsigned int authsize = ctx->authsize;
946 unsigned int ivsize = crypto_aead_ivsize(aead);
951 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
954 map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
955 sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
957 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
961 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
962 (char *)&ctx->key + ctx->authkeylen, 0,
967 * map and adjust cipher len to aead request cryptlen.
968 * extent is bytes of HMAC postpended to ciphertext,
969 * typically 12 for ipsec
971 desc->ptr[4].len = cpu_to_be16(cryptlen);
972 desc->ptr[4].j_extent = authsize;
974 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
975 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
977 edesc->src_is_chained);
980 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
982 sg_link_tbl_len = cryptlen;
984 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
985 sg_link_tbl_len = cryptlen + authsize;
987 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
988 &edesc->link_tbl[0]);
990 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
991 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
992 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
996 /* Only one segment now, so no link tbl needed */
997 to_talitos_ptr(&desc->ptr[4],
998 sg_dma_address(areq->src));
1003 desc->ptr[5].len = cpu_to_be16(cryptlen);
1004 desc->ptr[5].j_extent = authsize;
1006 if (areq->src != areq->dst)
1007 sg_count = talitos_map_sg(dev, areq->dst,
1008 edesc->dst_nents ? : 1,
1010 edesc->dst_is_chained);
1012 if (sg_count == 1) {
1013 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1015 struct talitos_ptr *link_tbl_ptr =
1016 &edesc->link_tbl[edesc->src_nents + 1];
1018 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1019 (edesc->src_nents + 1) *
1020 sizeof(struct talitos_ptr));
1021 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1024 /* Add an entry to the link table for ICV data */
1025 link_tbl_ptr += sg_count - 1;
1026 link_tbl_ptr->j_extent = 0;
1029 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1030 link_tbl_ptr->len = cpu_to_be16(authsize);
1032 /* icv data follows link tables */
1033 to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1034 (edesc->src_nents + edesc->dst_nents + 2) *
1035 sizeof(struct talitos_ptr));
1036 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1037 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1038 edesc->dma_len, DMA_BIDIRECTIONAL);
1042 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1045 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1046 if (ret != -EINPROGRESS) {
1047 ipsec_esp_unmap(dev, edesc, areq);
1054 * derive number of elements in scatterlist
1056 static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1058 struct scatterlist *sg = sg_list;
1062 while (nbytes > 0) {
1064 nbytes -= sg->length;
1065 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1067 sg = scatterwalk_sg_next(sg);
1074 * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
1076 * @nents: Number of SG entries
1077 * @buf: Where to copy to
1078 * @buflen: The number of bytes to copy
1079 * @skip: The number of bytes to skip before copying.
1080 * Note: skip + buflen should equal SG total size.
1082 * Returns the number of copied bytes.
1085 static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
1086 void *buf, size_t buflen, unsigned int skip)
1088 unsigned int offset = 0;
1089 unsigned int boffset = 0;
1090 struct sg_mapping_iter miter;
1091 unsigned long flags;
1092 unsigned int sg_flags = SG_MITER_ATOMIC;
1093 size_t total_buffer = buflen + skip;
1095 sg_flags |= SG_MITER_FROM_SG;
1097 sg_miter_start(&miter, sgl, nents, sg_flags);
1099 local_irq_save(flags);
1101 while (sg_miter_next(&miter) && offset < total_buffer) {
1103 unsigned int ignore;
1105 if ((offset + miter.length) > skip) {
1106 if (offset < skip) {
1107 /* Copy part of this segment */
1108 ignore = skip - offset;
1109 len = miter.length - ignore;
1110 if (boffset + len > buflen)
1111 len = buflen - boffset;
1112 memcpy(buf + boffset, miter.addr + ignore, len);
1114 /* Copy all of this segment (up to buflen) */
1116 if (boffset + len > buflen)
1117 len = buflen - boffset;
1118 memcpy(buf + boffset, miter.addr, len);
1122 offset += miter.length;
1125 sg_miter_stop(&miter);
1127 local_irq_restore(flags);
1132 * allocate and map the extended descriptor
1134 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1135 struct scatterlist *src,
1136 struct scatterlist *dst,
1138 unsigned int cryptlen,
1139 unsigned int authsize,
1143 struct talitos_edesc *edesc;
1144 int src_nents, dst_nents, alloc_len, dma_len;
1145 int src_chained, dst_chained = 0;
1146 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1149 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1150 dev_err(dev, "length exceeds h/w max limit\n");
1151 return ERR_PTR(-EINVAL);
1154 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1155 src_nents = (src_nents == 1) ? 0 : src_nents;
1161 dst_nents = src_nents;
1163 dst_nents = sg_count(dst, cryptlen + authsize,
1165 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1170 * allocate space for base edesc plus the link tables,
1171 * allowing for two separate entries for ICV and generated ICV (+ 2),
1172 * and the ICV data itself
1174 alloc_len = sizeof(struct talitos_edesc);
1175 if (src_nents || dst_nents) {
1176 dma_len = (src_nents + dst_nents + 2) *
1177 sizeof(struct talitos_ptr) + authsize;
1178 alloc_len += dma_len;
1181 alloc_len += icv_stashing ? authsize : 0;
1184 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1186 dev_err(dev, "could not allocate edescriptor\n");
1187 return ERR_PTR(-ENOMEM);
1190 edesc->src_nents = src_nents;
1191 edesc->dst_nents = dst_nents;
1192 edesc->src_is_chained = src_chained;
1193 edesc->dst_is_chained = dst_chained;
1194 edesc->dma_len = dma_len;
1196 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1203 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1206 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1207 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1209 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1210 areq->cryptlen, ctx->authsize, icv_stashing,
1214 static int aead_encrypt(struct aead_request *req)
1216 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1217 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1218 struct talitos_edesc *edesc;
1220 /* allocate extended descriptor */
1221 edesc = aead_edesc_alloc(req, 0);
1223 return PTR_ERR(edesc);
1226 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1228 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1231 static int aead_decrypt(struct aead_request *req)
1233 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1234 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1235 unsigned int authsize = ctx->authsize;
1236 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1237 struct talitos_edesc *edesc;
1238 struct scatterlist *sg;
1241 req->cryptlen -= authsize;
1243 /* allocate extended descriptor */
1244 edesc = aead_edesc_alloc(req, 1);
1246 return PTR_ERR(edesc);
1248 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1249 ((!edesc->src_nents && !edesc->dst_nents) ||
1250 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1252 /* decrypt and check the ICV */
1253 edesc->desc.hdr = ctx->desc_hdr_template |
1254 DESC_HDR_DIR_INBOUND |
1255 DESC_HDR_MODE1_MDEU_CICV;
1257 /* reset integrity check result bits */
1258 edesc->desc.hdr_lo = 0;
1260 return ipsec_esp(edesc, req, NULL, 0,
1261 ipsec_esp_decrypt_hwauth_done);
1265 /* Have to check the ICV with software */
1266 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1268 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1270 icvdata = &edesc->link_tbl[edesc->src_nents +
1271 edesc->dst_nents + 2];
1273 icvdata = &edesc->link_tbl[0];
1275 sg = sg_last(req->src, edesc->src_nents ? : 1);
1277 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1280 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
1283 static int aead_givencrypt(struct aead_givcrypt_request *req)
1285 struct aead_request *areq = &req->areq;
1286 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1287 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1288 struct talitos_edesc *edesc;
1290 /* allocate extended descriptor */
1291 edesc = aead_edesc_alloc(areq, 0);
1293 return PTR_ERR(edesc);
1296 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1298 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1299 /* avoid consecutive packets going out with same IV */
1300 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1302 return ipsec_esp(edesc, areq, req->giv, req->seq,
1303 ipsec_esp_encrypt_done);
1306 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1307 const u8 *key, unsigned int keylen)
1309 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1311 memcpy(&ctx->key, key, keylen);
1312 ctx->keylen = keylen;
1317 static void common_nonsnoop_unmap(struct device *dev,
1318 struct talitos_edesc *edesc,
1319 struct ablkcipher_request *areq)
1321 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1322 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1323 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1325 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1328 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1332 static void ablkcipher_done(struct device *dev,
1333 struct talitos_desc *desc, void *context,
1336 struct ablkcipher_request *areq = context;
1337 struct talitos_edesc *edesc;
1339 edesc = container_of(desc, struct talitos_edesc, desc);
1341 common_nonsnoop_unmap(dev, edesc, areq);
1345 areq->base.complete(&areq->base, err);
1348 static int common_nonsnoop(struct talitos_edesc *edesc,
1349 struct ablkcipher_request *areq,
1350 void (*callback) (struct device *dev,
1351 struct talitos_desc *desc,
1352 void *context, int error))
1354 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1355 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1356 struct device *dev = ctx->dev;
1357 struct talitos_desc *desc = &edesc->desc;
1358 unsigned int cryptlen = areq->nbytes;
1359 unsigned int ivsize;
1362 /* first DWORD empty */
1363 desc->ptr[0].len = 0;
1364 to_talitos_ptr(&desc->ptr[0], 0);
1365 desc->ptr[0].j_extent = 0;
1368 ivsize = crypto_ablkcipher_ivsize(cipher);
1369 map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0,
1373 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1374 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1379 desc->ptr[3].len = cpu_to_be16(cryptlen);
1380 desc->ptr[3].j_extent = 0;
1382 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1383 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1385 edesc->src_is_chained);
1387 if (sg_count == 1) {
1388 to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1390 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1391 &edesc->link_tbl[0]);
1393 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1394 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1395 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1399 /* Only one segment now, so no link tbl needed */
1400 to_talitos_ptr(&desc->ptr[3],
1401 sg_dma_address(areq->src));
1406 desc->ptr[4].len = cpu_to_be16(cryptlen);
1407 desc->ptr[4].j_extent = 0;
1409 if (areq->src != areq->dst)
1410 sg_count = talitos_map_sg(dev, areq->dst,
1411 edesc->dst_nents ? : 1,
1413 edesc->dst_is_chained);
1415 if (sg_count == 1) {
1416 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1418 struct talitos_ptr *link_tbl_ptr =
1419 &edesc->link_tbl[edesc->src_nents + 1];
1421 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1422 (edesc->src_nents + 1) *
1423 sizeof(struct talitos_ptr));
1424 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1425 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1427 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1428 edesc->dma_len, DMA_BIDIRECTIONAL);
1432 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1435 /* last DWORD empty */
1436 desc->ptr[6].len = 0;
1437 to_talitos_ptr(&desc->ptr[6], 0);
1438 desc->ptr[6].j_extent = 0;
1440 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1441 if (ret != -EINPROGRESS) {
1442 common_nonsnoop_unmap(dev, edesc, areq);
1448 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1451 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1452 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1454 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1455 areq->nbytes, 0, 0, areq->base.flags);
1458 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1460 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1461 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1462 struct talitos_edesc *edesc;
1464 /* allocate extended descriptor */
1465 edesc = ablkcipher_edesc_alloc(areq);
1467 return PTR_ERR(edesc);
1470 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1472 return common_nonsnoop(edesc, areq, ablkcipher_done);
1475 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1477 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1478 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1479 struct talitos_edesc *edesc;
1481 /* allocate extended descriptor */
1482 edesc = ablkcipher_edesc_alloc(areq);
1484 return PTR_ERR(edesc);
1486 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1488 return common_nonsnoop(edesc, areq, ablkcipher_done);
1491 static void common_nonsnoop_hash_unmap(struct device *dev,
1492 struct talitos_edesc *edesc,
1493 struct ahash_request *areq)
1495 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1497 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1499 /* When using hashctx-in, must unmap it. */
1500 if (edesc->desc.ptr[1].len)
1501 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1504 if (edesc->desc.ptr[2].len)
1505 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1508 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
1511 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1516 static void ahash_done(struct device *dev,
1517 struct talitos_desc *desc, void *context,
1520 struct ahash_request *areq = context;
1521 struct talitos_edesc *edesc =
1522 container_of(desc, struct talitos_edesc, desc);
1523 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1525 if (!req_ctx->last && req_ctx->to_hash_later) {
1526 /* Position any partial block for next update/final/finup */
1527 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1528 req_ctx->nbuf = req_ctx->to_hash_later;
1530 common_nonsnoop_hash_unmap(dev, edesc, areq);
1534 areq->base.complete(&areq->base, err);
1537 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1538 struct ahash_request *areq, unsigned int length,
1539 void (*callback) (struct device *dev,
1540 struct talitos_desc *desc,
1541 void *context, int error))
1543 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1544 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1545 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1546 struct device *dev = ctx->dev;
1547 struct talitos_desc *desc = &edesc->desc;
1550 /* first DWORD empty */
1551 desc->ptr[0] = zero_entry;
1553 /* hash context in */
1554 if (!req_ctx->first || req_ctx->swinit) {
1555 map_single_talitos_ptr(dev, &desc->ptr[1],
1556 req_ctx->hw_context_size,
1557 (char *)req_ctx->hw_context, 0,
1559 req_ctx->swinit = 0;
1561 desc->ptr[1] = zero_entry;
1562 /* Indicate next op is not the first. */
1568 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1569 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1571 desc->ptr[2] = zero_entry;
1576 desc->ptr[3].len = cpu_to_be16(length);
1577 desc->ptr[3].j_extent = 0;
1579 sg_count = talitos_map_sg(dev, req_ctx->psrc,
1580 edesc->src_nents ? : 1,
1582 edesc->src_is_chained);
1584 if (sg_count == 1) {
1585 to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
1587 sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
1588 &edesc->link_tbl[0]);
1590 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1591 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1592 dma_sync_single_for_device(ctx->dev,
1593 edesc->dma_link_tbl,
1597 /* Only one segment now, so no link tbl needed */
1598 to_talitos_ptr(&desc->ptr[3],
1599 sg_dma_address(req_ctx->psrc));
1603 /* fifth DWORD empty */
1604 desc->ptr[4] = zero_entry;
1606 /* hash/HMAC out -or- hash context out */
1608 map_single_talitos_ptr(dev, &desc->ptr[5],
1609 crypto_ahash_digestsize(tfm),
1610 areq->result, 0, DMA_FROM_DEVICE);
1612 map_single_talitos_ptr(dev, &desc->ptr[5],
1613 req_ctx->hw_context_size,
1614 req_ctx->hw_context, 0, DMA_FROM_DEVICE);
1616 /* last DWORD empty */
1617 desc->ptr[6] = zero_entry;
1619 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1620 if (ret != -EINPROGRESS) {
1621 common_nonsnoop_hash_unmap(dev, edesc, areq);
1627 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1628 unsigned int nbytes)
1630 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1631 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1632 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1634 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
1635 nbytes, 0, 0, areq->base.flags);
1638 static int ahash_init(struct ahash_request *areq)
1640 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1641 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1643 /* Initialize the context */
1645 req_ctx->first = 1; /* first indicates h/w must init its context */
1646 req_ctx->swinit = 0; /* assume h/w init of context */
1647 req_ctx->hw_context_size =
1648 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1649 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1650 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1656 * on h/w without explicit sha224 support, we initialize h/w context
1657 * manually with sha224 constants, and tell it to run sha256.
1659 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1661 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1664 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1666 req_ctx->hw_context[0] = SHA224_H0;
1667 req_ctx->hw_context[1] = SHA224_H1;
1668 req_ctx->hw_context[2] = SHA224_H2;
1669 req_ctx->hw_context[3] = SHA224_H3;
1670 req_ctx->hw_context[4] = SHA224_H4;
1671 req_ctx->hw_context[5] = SHA224_H5;
1672 req_ctx->hw_context[6] = SHA224_H6;
1673 req_ctx->hw_context[7] = SHA224_H7;
1675 /* init 64-bit count */
1676 req_ctx->hw_context[8] = 0;
1677 req_ctx->hw_context[9] = 0;
1682 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1684 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1685 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1686 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1687 struct talitos_edesc *edesc;
1688 unsigned int blocksize =
1689 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1690 unsigned int nbytes_to_hash;
1691 unsigned int to_hash_later;
1695 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1696 /* Buffer up to one whole block */
1697 sg_copy_to_buffer(areq->src,
1698 sg_count(areq->src, nbytes, &chained),
1699 req_ctx->buf + req_ctx->nbuf, nbytes);
1700 req_ctx->nbuf += nbytes;
1704 /* At least (blocksize + 1) bytes are available to hash */
1705 nbytes_to_hash = nbytes + req_ctx->nbuf;
1706 to_hash_later = nbytes_to_hash & (blocksize - 1);
1710 else if (to_hash_later)
1711 /* There is a partial block. Hash the full block(s) now */
1712 nbytes_to_hash -= to_hash_later;
1714 /* Keep one block buffered */
1715 nbytes_to_hash -= blocksize;
1716 to_hash_later = blocksize;
1719 /* Chain in any previously buffered data */
1720 if (req_ctx->nbuf) {
1721 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1722 sg_init_table(req_ctx->bufsl, nsg);
1723 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1725 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1726 req_ctx->psrc = req_ctx->bufsl;
1728 req_ctx->psrc = areq->src;
1730 if (to_hash_later) {
1731 int nents = sg_count(areq->src, nbytes, &chained);
1732 sg_copy_end_to_buffer(areq->src, nents,
1735 nbytes - to_hash_later);
1737 req_ctx->to_hash_later = to_hash_later;
1739 /* Allocate extended descriptor */
1740 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1742 return PTR_ERR(edesc);
1744 edesc->desc.hdr = ctx->desc_hdr_template;
1746 /* On last one, request SEC to pad; otherwise continue */
1748 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1750 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1752 /* request SEC to INIT hash. */
1753 if (req_ctx->first && !req_ctx->swinit)
1754 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1756 /* When the tfm context has a keylen, it's an HMAC.
1757 * A first or last (ie. not middle) descriptor must request HMAC.
1759 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1760 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1762 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1766 static int ahash_update(struct ahash_request *areq)
1768 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1772 return ahash_process_req(areq, areq->nbytes);
1775 static int ahash_final(struct ahash_request *areq)
1777 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1781 return ahash_process_req(areq, 0);
1784 static int ahash_finup(struct ahash_request *areq)
1786 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1790 return ahash_process_req(areq, areq->nbytes);
1793 static int ahash_digest(struct ahash_request *areq)
1795 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1796 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1801 return ahash_process_req(areq, areq->nbytes);
1804 struct keyhash_result {
1805 struct completion completion;
1809 static void keyhash_complete(struct crypto_async_request *req, int err)
1811 struct keyhash_result *res = req->data;
1813 if (err == -EINPROGRESS)
1817 complete(&res->completion);
1820 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1823 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1825 struct scatterlist sg[1];
1826 struct ahash_request *req;
1827 struct keyhash_result hresult;
1830 init_completion(&hresult.completion);
1832 req = ahash_request_alloc(tfm, GFP_KERNEL);
1836 /* Keep tfm keylen == 0 during hash of the long key */
1838 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1839 keyhash_complete, &hresult);
1841 sg_init_one(&sg[0], key, keylen);
1843 ahash_request_set_crypt(req, sg, hash, keylen);
1844 ret = crypto_ahash_digest(req);
1850 ret = wait_for_completion_interruptible(
1851 &hresult.completion);
1858 ahash_request_free(req);
1863 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1864 unsigned int keylen)
1866 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1867 unsigned int blocksize =
1868 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1869 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1870 unsigned int keysize = keylen;
1871 u8 hash[SHA512_DIGEST_SIZE];
1874 if (keylen <= blocksize)
1875 memcpy(ctx->key, key, keysize);
1877 /* Must get the hash of the long key */
1878 ret = keyhash(tfm, key, keylen, hash);
1881 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1885 keysize = digestsize;
1886 memcpy(ctx->key, hash, digestsize);
1889 ctx->keylen = keysize;
1895 struct talitos_alg_template {
1898 struct crypto_alg crypto;
1899 struct ahash_alg hash;
1901 __be32 desc_hdr_template;
1904 static struct talitos_alg_template driver_algs[] = {
1905 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1906 { .type = CRYPTO_ALG_TYPE_AEAD,
1908 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1909 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1910 .cra_blocksize = AES_BLOCK_SIZE,
1911 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1912 .cra_type = &crypto_aead_type,
1914 .setkey = aead_setkey,
1915 .setauthsize = aead_setauthsize,
1916 .encrypt = aead_encrypt,
1917 .decrypt = aead_decrypt,
1918 .givencrypt = aead_givencrypt,
1919 .geniv = "<built-in>",
1920 .ivsize = AES_BLOCK_SIZE,
1921 .maxauthsize = SHA1_DIGEST_SIZE,
1924 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1925 DESC_HDR_SEL0_AESU |
1926 DESC_HDR_MODE0_AESU_CBC |
1927 DESC_HDR_SEL1_MDEUA |
1928 DESC_HDR_MODE1_MDEU_INIT |
1929 DESC_HDR_MODE1_MDEU_PAD |
1930 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1932 { .type = CRYPTO_ALG_TYPE_AEAD,
1934 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1935 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1936 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1937 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1938 .cra_type = &crypto_aead_type,
1940 .setkey = aead_setkey,
1941 .setauthsize = aead_setauthsize,
1942 .encrypt = aead_encrypt,
1943 .decrypt = aead_decrypt,
1944 .givencrypt = aead_givencrypt,
1945 .geniv = "<built-in>",
1946 .ivsize = DES3_EDE_BLOCK_SIZE,
1947 .maxauthsize = SHA1_DIGEST_SIZE,
1950 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1952 DESC_HDR_MODE0_DEU_CBC |
1953 DESC_HDR_MODE0_DEU_3DES |
1954 DESC_HDR_SEL1_MDEUA |
1955 DESC_HDR_MODE1_MDEU_INIT |
1956 DESC_HDR_MODE1_MDEU_PAD |
1957 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1959 { .type = CRYPTO_ALG_TYPE_AEAD,
1961 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1962 .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
1963 .cra_blocksize = AES_BLOCK_SIZE,
1964 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1965 .cra_type = &crypto_aead_type,
1967 .setkey = aead_setkey,
1968 .setauthsize = aead_setauthsize,
1969 .encrypt = aead_encrypt,
1970 .decrypt = aead_decrypt,
1971 .givencrypt = aead_givencrypt,
1972 .geniv = "<built-in>",
1973 .ivsize = AES_BLOCK_SIZE,
1974 .maxauthsize = SHA224_DIGEST_SIZE,
1977 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1978 DESC_HDR_SEL0_AESU |
1979 DESC_HDR_MODE0_AESU_CBC |
1980 DESC_HDR_SEL1_MDEUA |
1981 DESC_HDR_MODE1_MDEU_INIT |
1982 DESC_HDR_MODE1_MDEU_PAD |
1983 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
1985 { .type = CRYPTO_ALG_TYPE_AEAD,
1987 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
1988 .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
1989 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1990 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1991 .cra_type = &crypto_aead_type,
1993 .setkey = aead_setkey,
1994 .setauthsize = aead_setauthsize,
1995 .encrypt = aead_encrypt,
1996 .decrypt = aead_decrypt,
1997 .givencrypt = aead_givencrypt,
1998 .geniv = "<built-in>",
1999 .ivsize = DES3_EDE_BLOCK_SIZE,
2000 .maxauthsize = SHA224_DIGEST_SIZE,
2003 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2005 DESC_HDR_MODE0_DEU_CBC |
2006 DESC_HDR_MODE0_DEU_3DES |
2007 DESC_HDR_SEL1_MDEUA |
2008 DESC_HDR_MODE1_MDEU_INIT |
2009 DESC_HDR_MODE1_MDEU_PAD |
2010 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2012 { .type = CRYPTO_ALG_TYPE_AEAD,
2014 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2015 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2016 .cra_blocksize = AES_BLOCK_SIZE,
2017 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2018 .cra_type = &crypto_aead_type,
2020 .setkey = aead_setkey,
2021 .setauthsize = aead_setauthsize,
2022 .encrypt = aead_encrypt,
2023 .decrypt = aead_decrypt,
2024 .givencrypt = aead_givencrypt,
2025 .geniv = "<built-in>",
2026 .ivsize = AES_BLOCK_SIZE,
2027 .maxauthsize = SHA256_DIGEST_SIZE,
2030 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2031 DESC_HDR_SEL0_AESU |
2032 DESC_HDR_MODE0_AESU_CBC |
2033 DESC_HDR_SEL1_MDEUA |
2034 DESC_HDR_MODE1_MDEU_INIT |
2035 DESC_HDR_MODE1_MDEU_PAD |
2036 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2038 { .type = CRYPTO_ALG_TYPE_AEAD,
2040 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2041 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2042 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2043 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2044 .cra_type = &crypto_aead_type,
2046 .setkey = aead_setkey,
2047 .setauthsize = aead_setauthsize,
2048 .encrypt = aead_encrypt,
2049 .decrypt = aead_decrypt,
2050 .givencrypt = aead_givencrypt,
2051 .geniv = "<built-in>",
2052 .ivsize = DES3_EDE_BLOCK_SIZE,
2053 .maxauthsize = SHA256_DIGEST_SIZE,
2056 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2058 DESC_HDR_MODE0_DEU_CBC |
2059 DESC_HDR_MODE0_DEU_3DES |
2060 DESC_HDR_SEL1_MDEUA |
2061 DESC_HDR_MODE1_MDEU_INIT |
2062 DESC_HDR_MODE1_MDEU_PAD |
2063 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2065 { .type = CRYPTO_ALG_TYPE_AEAD,
2067 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2068 .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2069 .cra_blocksize = AES_BLOCK_SIZE,
2070 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2071 .cra_type = &crypto_aead_type,
2073 .setkey = aead_setkey,
2074 .setauthsize = aead_setauthsize,
2075 .encrypt = aead_encrypt,
2076 .decrypt = aead_decrypt,
2077 .givencrypt = aead_givencrypt,
2078 .geniv = "<built-in>",
2079 .ivsize = AES_BLOCK_SIZE,
2080 .maxauthsize = SHA384_DIGEST_SIZE,
2083 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2084 DESC_HDR_SEL0_AESU |
2085 DESC_HDR_MODE0_AESU_CBC |
2086 DESC_HDR_SEL1_MDEUB |
2087 DESC_HDR_MODE1_MDEU_INIT |
2088 DESC_HDR_MODE1_MDEU_PAD |
2089 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2091 { .type = CRYPTO_ALG_TYPE_AEAD,
2093 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2094 .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2095 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2096 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2097 .cra_type = &crypto_aead_type,
2099 .setkey = aead_setkey,
2100 .setauthsize = aead_setauthsize,
2101 .encrypt = aead_encrypt,
2102 .decrypt = aead_decrypt,
2103 .givencrypt = aead_givencrypt,
2104 .geniv = "<built-in>",
2105 .ivsize = DES3_EDE_BLOCK_SIZE,
2106 .maxauthsize = SHA384_DIGEST_SIZE,
2109 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2111 DESC_HDR_MODE0_DEU_CBC |
2112 DESC_HDR_MODE0_DEU_3DES |
2113 DESC_HDR_SEL1_MDEUB |
2114 DESC_HDR_MODE1_MDEU_INIT |
2115 DESC_HDR_MODE1_MDEU_PAD |
2116 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2118 { .type = CRYPTO_ALG_TYPE_AEAD,
2120 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2121 .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2122 .cra_blocksize = AES_BLOCK_SIZE,
2123 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2124 .cra_type = &crypto_aead_type,
2126 .setkey = aead_setkey,
2127 .setauthsize = aead_setauthsize,
2128 .encrypt = aead_encrypt,
2129 .decrypt = aead_decrypt,
2130 .givencrypt = aead_givencrypt,
2131 .geniv = "<built-in>",
2132 .ivsize = AES_BLOCK_SIZE,
2133 .maxauthsize = SHA512_DIGEST_SIZE,
2136 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2137 DESC_HDR_SEL0_AESU |
2138 DESC_HDR_MODE0_AESU_CBC |
2139 DESC_HDR_SEL1_MDEUB |
2140 DESC_HDR_MODE1_MDEU_INIT |
2141 DESC_HDR_MODE1_MDEU_PAD |
2142 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2144 { .type = CRYPTO_ALG_TYPE_AEAD,
2146 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2147 .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2148 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2149 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2150 .cra_type = &crypto_aead_type,
2152 .setkey = aead_setkey,
2153 .setauthsize = aead_setauthsize,
2154 .encrypt = aead_encrypt,
2155 .decrypt = aead_decrypt,
2156 .givencrypt = aead_givencrypt,
2157 .geniv = "<built-in>",
2158 .ivsize = DES3_EDE_BLOCK_SIZE,
2159 .maxauthsize = SHA512_DIGEST_SIZE,
2162 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2164 DESC_HDR_MODE0_DEU_CBC |
2165 DESC_HDR_MODE0_DEU_3DES |
2166 DESC_HDR_SEL1_MDEUB |
2167 DESC_HDR_MODE1_MDEU_INIT |
2168 DESC_HDR_MODE1_MDEU_PAD |
2169 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2171 { .type = CRYPTO_ALG_TYPE_AEAD,
2173 .cra_name = "authenc(hmac(md5),cbc(aes))",
2174 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2175 .cra_blocksize = AES_BLOCK_SIZE,
2176 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2177 .cra_type = &crypto_aead_type,
2179 .setkey = aead_setkey,
2180 .setauthsize = aead_setauthsize,
2181 .encrypt = aead_encrypt,
2182 .decrypt = aead_decrypt,
2183 .givencrypt = aead_givencrypt,
2184 .geniv = "<built-in>",
2185 .ivsize = AES_BLOCK_SIZE,
2186 .maxauthsize = MD5_DIGEST_SIZE,
2189 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2190 DESC_HDR_SEL0_AESU |
2191 DESC_HDR_MODE0_AESU_CBC |
2192 DESC_HDR_SEL1_MDEUA |
2193 DESC_HDR_MODE1_MDEU_INIT |
2194 DESC_HDR_MODE1_MDEU_PAD |
2195 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2197 { .type = CRYPTO_ALG_TYPE_AEAD,
2199 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2200 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2201 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2202 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2203 .cra_type = &crypto_aead_type,
2205 .setkey = aead_setkey,
2206 .setauthsize = aead_setauthsize,
2207 .encrypt = aead_encrypt,
2208 .decrypt = aead_decrypt,
2209 .givencrypt = aead_givencrypt,
2210 .geniv = "<built-in>",
2211 .ivsize = DES3_EDE_BLOCK_SIZE,
2212 .maxauthsize = MD5_DIGEST_SIZE,
2215 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2217 DESC_HDR_MODE0_DEU_CBC |
2218 DESC_HDR_MODE0_DEU_3DES |
2219 DESC_HDR_SEL1_MDEUA |
2220 DESC_HDR_MODE1_MDEU_INIT |
2221 DESC_HDR_MODE1_MDEU_PAD |
2222 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2224 /* ABLKCIPHER algorithms. */
2225 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2227 .cra_name = "cbc(aes)",
2228 .cra_driver_name = "cbc-aes-talitos",
2229 .cra_blocksize = AES_BLOCK_SIZE,
2230 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2232 .cra_type = &crypto_ablkcipher_type,
2234 .setkey = ablkcipher_setkey,
2235 .encrypt = ablkcipher_encrypt,
2236 .decrypt = ablkcipher_decrypt,
2238 .min_keysize = AES_MIN_KEY_SIZE,
2239 .max_keysize = AES_MAX_KEY_SIZE,
2240 .ivsize = AES_BLOCK_SIZE,
2243 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2244 DESC_HDR_SEL0_AESU |
2245 DESC_HDR_MODE0_AESU_CBC,
2247 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2249 .cra_name = "cbc(des3_ede)",
2250 .cra_driver_name = "cbc-3des-talitos",
2251 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2252 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2254 .cra_type = &crypto_ablkcipher_type,
2256 .setkey = ablkcipher_setkey,
2257 .encrypt = ablkcipher_encrypt,
2258 .decrypt = ablkcipher_decrypt,
2260 .min_keysize = DES3_EDE_KEY_SIZE,
2261 .max_keysize = DES3_EDE_KEY_SIZE,
2262 .ivsize = DES3_EDE_BLOCK_SIZE,
2265 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2267 DESC_HDR_MODE0_DEU_CBC |
2268 DESC_HDR_MODE0_DEU_3DES,
2270 /* AHASH algorithms. */
2271 { .type = CRYPTO_ALG_TYPE_AHASH,
2274 .update = ahash_update,
2275 .final = ahash_final,
2276 .finup = ahash_finup,
2277 .digest = ahash_digest,
2278 .halg.digestsize = MD5_DIGEST_SIZE,
2281 .cra_driver_name = "md5-talitos",
2282 .cra_blocksize = MD5_BLOCK_SIZE,
2283 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2285 .cra_type = &crypto_ahash_type
2288 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2289 DESC_HDR_SEL0_MDEUA |
2290 DESC_HDR_MODE0_MDEU_MD5,
2292 { .type = CRYPTO_ALG_TYPE_AHASH,
2295 .update = ahash_update,
2296 .final = ahash_final,
2297 .finup = ahash_finup,
2298 .digest = ahash_digest,
2299 .halg.digestsize = SHA1_DIGEST_SIZE,
2302 .cra_driver_name = "sha1-talitos",
2303 .cra_blocksize = SHA1_BLOCK_SIZE,
2304 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2306 .cra_type = &crypto_ahash_type
2309 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2310 DESC_HDR_SEL0_MDEUA |
2311 DESC_HDR_MODE0_MDEU_SHA1,
2313 { .type = CRYPTO_ALG_TYPE_AHASH,
2316 .update = ahash_update,
2317 .final = ahash_final,
2318 .finup = ahash_finup,
2319 .digest = ahash_digest,
2320 .halg.digestsize = SHA224_DIGEST_SIZE,
2322 .cra_name = "sha224",
2323 .cra_driver_name = "sha224-talitos",
2324 .cra_blocksize = SHA224_BLOCK_SIZE,
2325 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2327 .cra_type = &crypto_ahash_type
2330 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2331 DESC_HDR_SEL0_MDEUA |
2332 DESC_HDR_MODE0_MDEU_SHA224,
2334 { .type = CRYPTO_ALG_TYPE_AHASH,
2337 .update = ahash_update,
2338 .final = ahash_final,
2339 .finup = ahash_finup,
2340 .digest = ahash_digest,
2341 .halg.digestsize = SHA256_DIGEST_SIZE,
2343 .cra_name = "sha256",
2344 .cra_driver_name = "sha256-talitos",
2345 .cra_blocksize = SHA256_BLOCK_SIZE,
2346 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2348 .cra_type = &crypto_ahash_type
2351 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2352 DESC_HDR_SEL0_MDEUA |
2353 DESC_HDR_MODE0_MDEU_SHA256,
2355 { .type = CRYPTO_ALG_TYPE_AHASH,
2358 .update = ahash_update,
2359 .final = ahash_final,
2360 .finup = ahash_finup,
2361 .digest = ahash_digest,
2362 .halg.digestsize = SHA384_DIGEST_SIZE,
2364 .cra_name = "sha384",
2365 .cra_driver_name = "sha384-talitos",
2366 .cra_blocksize = SHA384_BLOCK_SIZE,
2367 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2369 .cra_type = &crypto_ahash_type
2372 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2373 DESC_HDR_SEL0_MDEUB |
2374 DESC_HDR_MODE0_MDEUB_SHA384,
2376 { .type = CRYPTO_ALG_TYPE_AHASH,
2379 .update = ahash_update,
2380 .final = ahash_final,
2381 .finup = ahash_finup,
2382 .digest = ahash_digest,
2383 .halg.digestsize = SHA512_DIGEST_SIZE,
2385 .cra_name = "sha512",
2386 .cra_driver_name = "sha512-talitos",
2387 .cra_blocksize = SHA512_BLOCK_SIZE,
2388 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2390 .cra_type = &crypto_ahash_type
2393 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2394 DESC_HDR_SEL0_MDEUB |
2395 DESC_HDR_MODE0_MDEUB_SHA512,
2397 { .type = CRYPTO_ALG_TYPE_AHASH,
2400 .update = ahash_update,
2401 .final = ahash_final,
2402 .finup = ahash_finup,
2403 .digest = ahash_digest,
2404 .setkey = ahash_setkey,
2405 .halg.digestsize = MD5_DIGEST_SIZE,
2407 .cra_name = "hmac(md5)",
2408 .cra_driver_name = "hmac-md5-talitos",
2409 .cra_blocksize = MD5_BLOCK_SIZE,
2410 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2412 .cra_type = &crypto_ahash_type
2415 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2416 DESC_HDR_SEL0_MDEUA |
2417 DESC_HDR_MODE0_MDEU_MD5,
2419 { .type = CRYPTO_ALG_TYPE_AHASH,
2422 .update = ahash_update,
2423 .final = ahash_final,
2424 .finup = ahash_finup,
2425 .digest = ahash_digest,
2426 .setkey = ahash_setkey,
2427 .halg.digestsize = SHA1_DIGEST_SIZE,
2429 .cra_name = "hmac(sha1)",
2430 .cra_driver_name = "hmac-sha1-talitos",
2431 .cra_blocksize = SHA1_BLOCK_SIZE,
2432 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2434 .cra_type = &crypto_ahash_type
2437 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2438 DESC_HDR_SEL0_MDEUA |
2439 DESC_HDR_MODE0_MDEU_SHA1,
2441 { .type = CRYPTO_ALG_TYPE_AHASH,
2444 .update = ahash_update,
2445 .final = ahash_final,
2446 .finup = ahash_finup,
2447 .digest = ahash_digest,
2448 .setkey = ahash_setkey,
2449 .halg.digestsize = SHA224_DIGEST_SIZE,
2451 .cra_name = "hmac(sha224)",
2452 .cra_driver_name = "hmac-sha224-talitos",
2453 .cra_blocksize = SHA224_BLOCK_SIZE,
2454 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2456 .cra_type = &crypto_ahash_type
2459 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2460 DESC_HDR_SEL0_MDEUA |
2461 DESC_HDR_MODE0_MDEU_SHA224,
2463 { .type = CRYPTO_ALG_TYPE_AHASH,
2466 .update = ahash_update,
2467 .final = ahash_final,
2468 .finup = ahash_finup,
2469 .digest = ahash_digest,
2470 .setkey = ahash_setkey,
2471 .halg.digestsize = SHA256_DIGEST_SIZE,
2473 .cra_name = "hmac(sha256)",
2474 .cra_driver_name = "hmac-sha256-talitos",
2475 .cra_blocksize = SHA256_BLOCK_SIZE,
2476 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2478 .cra_type = &crypto_ahash_type
2481 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2482 DESC_HDR_SEL0_MDEUA |
2483 DESC_HDR_MODE0_MDEU_SHA256,
2485 { .type = CRYPTO_ALG_TYPE_AHASH,
2488 .update = ahash_update,
2489 .final = ahash_final,
2490 .finup = ahash_finup,
2491 .digest = ahash_digest,
2492 .setkey = ahash_setkey,
2493 .halg.digestsize = SHA384_DIGEST_SIZE,
2495 .cra_name = "hmac(sha384)",
2496 .cra_driver_name = "hmac-sha384-talitos",
2497 .cra_blocksize = SHA384_BLOCK_SIZE,
2498 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2500 .cra_type = &crypto_ahash_type
2503 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2504 DESC_HDR_SEL0_MDEUB |
2505 DESC_HDR_MODE0_MDEUB_SHA384,
2507 { .type = CRYPTO_ALG_TYPE_AHASH,
2510 .update = ahash_update,
2511 .final = ahash_final,
2512 .finup = ahash_finup,
2513 .digest = ahash_digest,
2514 .setkey = ahash_setkey,
2515 .halg.digestsize = SHA512_DIGEST_SIZE,
2517 .cra_name = "hmac(sha512)",
2518 .cra_driver_name = "hmac-sha512-talitos",
2519 .cra_blocksize = SHA512_BLOCK_SIZE,
2520 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2522 .cra_type = &crypto_ahash_type
2525 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2526 DESC_HDR_SEL0_MDEUB |
2527 DESC_HDR_MODE0_MDEUB_SHA512,
2531 struct talitos_crypto_alg {
2532 struct list_head entry;
2534 struct talitos_alg_template algt;
2537 static int talitos_cra_init(struct crypto_tfm *tfm)
2539 struct crypto_alg *alg = tfm->__crt_alg;
2540 struct talitos_crypto_alg *talitos_alg;
2541 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2542 struct talitos_private *priv;
2544 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2545 talitos_alg = container_of(__crypto_ahash_alg(alg),
2546 struct talitos_crypto_alg,
2549 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2552 /* update context with ptr to dev */
2553 ctx->dev = talitos_alg->dev;
2555 /* assign SEC channel to tfm in round-robin fashion */
2556 priv = dev_get_drvdata(ctx->dev);
2557 ctx->ch = atomic_inc_return(&priv->last_chan) &
2558 (priv->num_channels - 1);
2560 /* copy descriptor header template value */
2561 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2563 /* select done notification */
2564 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2569 static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2571 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2573 talitos_cra_init(tfm);
2575 /* random first IV */
2576 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2581 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2583 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2585 talitos_cra_init(tfm);
2588 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2589 sizeof(struct talitos_ahash_req_ctx));
2595 * given the alg's descriptor header template, determine whether descriptor
2596 * type and primary/secondary execution units required match the hw
2597 * capabilities description provided in the device tree node.
2599 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2601 struct talitos_private *priv = dev_get_drvdata(dev);
2604 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2605 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2607 if (SECONDARY_EU(desc_hdr_template))
2608 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2609 & priv->exec_units);
2614 static int talitos_remove(struct platform_device *ofdev)
2616 struct device *dev = &ofdev->dev;
2617 struct talitos_private *priv = dev_get_drvdata(dev);
2618 struct talitos_crypto_alg *t_alg, *n;
2621 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2622 switch (t_alg->algt.type) {
2623 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2624 case CRYPTO_ALG_TYPE_AEAD:
2625 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2627 case CRYPTO_ALG_TYPE_AHASH:
2628 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2631 list_del(&t_alg->entry);
2635 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2636 talitos_unregister_rng(dev);
2638 for (i = 0; i < priv->num_channels; i++)
2639 kfree(priv->chan[i].fifo);
2643 for (i = 0; i < 2; i++)
2645 free_irq(priv->irq[i], dev);
2646 irq_dispose_mapping(priv->irq[i]);
2649 tasklet_kill(&priv->done_task[0]);
2651 tasklet_kill(&priv->done_task[1]);
2655 dev_set_drvdata(dev, NULL);
2662 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2663 struct talitos_alg_template
2666 struct talitos_private *priv = dev_get_drvdata(dev);
2667 struct talitos_crypto_alg *t_alg;
2668 struct crypto_alg *alg;
2670 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2672 return ERR_PTR(-ENOMEM);
2674 t_alg->algt = *template;
2676 switch (t_alg->algt.type) {
2677 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2678 alg = &t_alg->algt.alg.crypto;
2679 alg->cra_init = talitos_cra_init;
2681 case CRYPTO_ALG_TYPE_AEAD:
2682 alg = &t_alg->algt.alg.crypto;
2683 alg->cra_init = talitos_cra_init_aead;
2685 case CRYPTO_ALG_TYPE_AHASH:
2686 alg = &t_alg->algt.alg.hash.halg.base;
2687 alg->cra_init = talitos_cra_init_ahash;
2688 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2689 !strncmp(alg->cra_name, "hmac", 4)) {
2691 return ERR_PTR(-ENOTSUPP);
2693 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2694 (!strcmp(alg->cra_name, "sha224") ||
2695 !strcmp(alg->cra_name, "hmac(sha224)"))) {
2696 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2697 t_alg->algt.desc_hdr_template =
2698 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2699 DESC_HDR_SEL0_MDEUA |
2700 DESC_HDR_MODE0_MDEU_SHA256;
2704 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2705 return ERR_PTR(-EINVAL);
2708 alg->cra_module = THIS_MODULE;
2709 alg->cra_priority = TALITOS_CRA_PRIORITY;
2710 alg->cra_alignmask = 0;
2711 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2712 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2719 static int talitos_probe_irq(struct platform_device *ofdev)
2721 struct device *dev = &ofdev->dev;
2722 struct device_node *np = ofdev->dev.of_node;
2723 struct talitos_private *priv = dev_get_drvdata(dev);
2726 priv->irq[0] = irq_of_parse_and_map(np, 0);
2727 if (!priv->irq[0]) {
2728 dev_err(dev, "failed to map irq\n");
2732 priv->irq[1] = irq_of_parse_and_map(np, 1);
2734 /* get the primary irq line */
2735 if (!priv->irq[1]) {
2736 err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
2737 dev_driver_string(dev), dev);
2741 err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
2742 dev_driver_string(dev), dev);
2746 /* get the secondary irq line */
2747 err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
2748 dev_driver_string(dev), dev);
2750 dev_err(dev, "failed to request secondary irq\n");
2751 irq_dispose_mapping(priv->irq[1]);
2759 dev_err(dev, "failed to request primary irq\n");
2760 irq_dispose_mapping(priv->irq[0]);
2767 static int talitos_probe(struct platform_device *ofdev)
2769 struct device *dev = &ofdev->dev;
2770 struct device_node *np = ofdev->dev.of_node;
2771 struct talitos_private *priv;
2772 const unsigned int *prop;
2775 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2779 dev_set_drvdata(dev, priv);
2781 priv->ofdev = ofdev;
2783 spin_lock_init(&priv->reg_lock);
2785 err = talitos_probe_irq(ofdev);
2789 if (!priv->irq[1]) {
2790 tasklet_init(&priv->done_task[0], talitos_done_4ch,
2791 (unsigned long)dev);
2793 tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
2794 (unsigned long)dev);
2795 tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
2796 (unsigned long)dev);
2799 INIT_LIST_HEAD(&priv->alg_list);
2801 priv->reg = of_iomap(np, 0);
2803 dev_err(dev, "failed to of_iomap\n");
2808 /* get SEC version capabilities from device tree */
2809 prop = of_get_property(np, "fsl,num-channels", NULL);
2811 priv->num_channels = *prop;
2813 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2815 priv->chfifo_len = *prop;
2817 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2819 priv->exec_units = *prop;
2821 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2823 priv->desc_types = *prop;
2825 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2826 !priv->exec_units || !priv->desc_types) {
2827 dev_err(dev, "invalid property data in device tree node\n");
2832 if (of_device_is_compatible(np, "fsl,sec3.0"))
2833 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2835 if (of_device_is_compatible(np, "fsl,sec2.1"))
2836 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2837 TALITOS_FTR_SHA224_HWINIT |
2838 TALITOS_FTR_HMAC_OK;
2840 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2841 priv->num_channels, GFP_KERNEL);
2843 dev_err(dev, "failed to allocate channel management space\n");
2848 for (i = 0; i < priv->num_channels; i++) {
2849 priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2850 if (!priv->irq[1] || !(i & 1))
2851 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2854 for (i = 0; i < priv->num_channels; i++) {
2855 spin_lock_init(&priv->chan[i].head_lock);
2856 spin_lock_init(&priv->chan[i].tail_lock);
2859 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2861 for (i = 0; i < priv->num_channels; i++) {
2862 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2863 priv->fifo_len, GFP_KERNEL);
2864 if (!priv->chan[i].fifo) {
2865 dev_err(dev, "failed to allocate request fifo %d\n", i);
2871 for (i = 0; i < priv->num_channels; i++)
2872 atomic_set(&priv->chan[i].submit_count,
2873 -(priv->chfifo_len - 1));
2875 dma_set_mask(dev, DMA_BIT_MASK(36));
2877 /* reset and initialize the h/w */
2878 err = init_device(dev);
2880 dev_err(dev, "failed to initialize device\n");
2884 /* register the RNG, if available */
2885 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2886 err = talitos_register_rng(dev);
2888 dev_err(dev, "failed to register hwrng: %d\n", err);
2891 dev_info(dev, "hwrng\n");
2894 /* register crypto algorithms the device supports */
2895 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2896 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2897 struct talitos_crypto_alg *t_alg;
2900 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2901 if (IS_ERR(t_alg)) {
2902 err = PTR_ERR(t_alg);
2903 if (err == -ENOTSUPP)
2908 switch (t_alg->algt.type) {
2909 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2910 case CRYPTO_ALG_TYPE_AEAD:
2911 err = crypto_register_alg(
2912 &t_alg->algt.alg.crypto);
2913 name = t_alg->algt.alg.crypto.cra_driver_name;
2915 case CRYPTO_ALG_TYPE_AHASH:
2916 err = crypto_register_ahash(
2917 &t_alg->algt.alg.hash);
2919 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2923 dev_err(dev, "%s alg registration failed\n",
2927 list_add_tail(&t_alg->entry, &priv->alg_list);
2930 if (!list_empty(&priv->alg_list))
2931 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2932 (char *)of_get_property(np, "compatible", NULL));
2937 talitos_remove(ofdev);
2942 static const struct of_device_id talitos_match[] = {
2944 .compatible = "fsl,sec2.0",
2948 MODULE_DEVICE_TABLE(of, talitos_match);
2950 static struct platform_driver talitos_driver = {
2953 .owner = THIS_MODULE,
2954 .of_match_table = talitos_match,
2956 .probe = talitos_probe,
2957 .remove = talitos_remove,
2960 module_platform_driver(talitos_driver);
2962 MODULE_LICENSE("GPL");
2963 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2964 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");