]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/crypto/talitos.c
Merge remote-tracking branches 'spi/topic/pxa2xx', 'spi/topic/qup', 'spi/topic/rockch...
[linux-beck.git] / drivers / crypto / talitos.c
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59                            bool is_sec1)
60 {
61         ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62         if (!is_sec1)
63                 ptr->eptr = upper_32_bits(dma_addr);
64 }
65
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67                              struct talitos_ptr *src_ptr, bool is_sec1)
68 {
69         dst_ptr->ptr = src_ptr->ptr;
70         if (!is_sec1)
71                 dst_ptr->eptr = src_ptr->eptr;
72 }
73
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
75                                bool is_sec1)
76 {
77         if (is_sec1) {
78                 ptr->res = 0;
79                 ptr->len1 = cpu_to_be16(len);
80         } else {
81                 ptr->len = cpu_to_be16(len);
82         }
83 }
84
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86                                            bool is_sec1)
87 {
88         if (is_sec1)
89                 return be16_to_cpu(ptr->len1);
90         else
91                 return be16_to_cpu(ptr->len);
92 }
93
94 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
95 {
96         if (!is_sec1)
97                 ptr->j_extent = 0;
98 }
99
100 /*
101  * map virtual single (contiguous) pointer to h/w descriptor pointer
102  */
103 static void map_single_talitos_ptr(struct device *dev,
104                                    struct talitos_ptr *ptr,
105                                    unsigned int len, void *data,
106                                    enum dma_data_direction dir)
107 {
108         dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
109         struct talitos_private *priv = dev_get_drvdata(dev);
110         bool is_sec1 = has_ftr_sec1(priv);
111
112         to_talitos_ptr_len(ptr, len, is_sec1);
113         to_talitos_ptr(ptr, dma_addr, is_sec1);
114         to_talitos_ptr_extent_clear(ptr, is_sec1);
115 }
116
117 /*
118  * unmap bus single (contiguous) h/w descriptor pointer
119  */
120 static void unmap_single_talitos_ptr(struct device *dev,
121                                      struct talitos_ptr *ptr,
122                                      enum dma_data_direction dir)
123 {
124         struct talitos_private *priv = dev_get_drvdata(dev);
125         bool is_sec1 = has_ftr_sec1(priv);
126
127         dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
128                          from_talitos_ptr_len(ptr, is_sec1), dir);
129 }
130
131 static int reset_channel(struct device *dev, int ch)
132 {
133         struct talitos_private *priv = dev_get_drvdata(dev);
134         unsigned int timeout = TALITOS_TIMEOUT;
135         bool is_sec1 = has_ftr_sec1(priv);
136
137         if (is_sec1) {
138                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
139                           TALITOS1_CCCR_LO_RESET);
140
141                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
142                         TALITOS1_CCCR_LO_RESET) && --timeout)
143                         cpu_relax();
144         } else {
145                 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
146                           TALITOS2_CCCR_RESET);
147
148                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
149                         TALITOS2_CCCR_RESET) && --timeout)
150                         cpu_relax();
151         }
152
153         if (timeout == 0) {
154                 dev_err(dev, "failed to reset channel %d\n", ch);
155                 return -EIO;
156         }
157
158         /* set 36-bit addressing, done writeback enable and done IRQ enable */
159         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
160                   TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
161
162         /* and ICCR writeback, if available */
163         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
164                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
165                           TALITOS_CCCR_LO_IWSE);
166
167         return 0;
168 }
169
170 static int reset_device(struct device *dev)
171 {
172         struct talitos_private *priv = dev_get_drvdata(dev);
173         unsigned int timeout = TALITOS_TIMEOUT;
174         bool is_sec1 = has_ftr_sec1(priv);
175         u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
176
177         setbits32(priv->reg + TALITOS_MCR, mcr);
178
179         while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
180                && --timeout)
181                 cpu_relax();
182
183         if (priv->irq[1]) {
184                 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
185                 setbits32(priv->reg + TALITOS_MCR, mcr);
186         }
187
188         if (timeout == 0) {
189                 dev_err(dev, "failed to reset device\n");
190                 return -EIO;
191         }
192
193         return 0;
194 }
195
196 /*
197  * Reset and initialize the device
198  */
199 static int init_device(struct device *dev)
200 {
201         struct talitos_private *priv = dev_get_drvdata(dev);
202         int ch, err;
203         bool is_sec1 = has_ftr_sec1(priv);
204
205         /*
206          * Master reset
207          * errata documentation: warning: certain SEC interrupts
208          * are not fully cleared by writing the MCR:SWR bit,
209          * set bit twice to completely reset
210          */
211         err = reset_device(dev);
212         if (err)
213                 return err;
214
215         err = reset_device(dev);
216         if (err)
217                 return err;
218
219         /* reset channels */
220         for (ch = 0; ch < priv->num_channels; ch++) {
221                 err = reset_channel(dev, ch);
222                 if (err)
223                         return err;
224         }
225
226         /* enable channel done and error interrupts */
227         if (is_sec1) {
228                 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
229                 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
230                 /* disable parity error check in DEU (erroneous? test vect.) */
231                 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
232         } else {
233                 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
234                 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
235         }
236
237         /* disable integrity check error interrupts (use writeback instead) */
238         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
239                 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
240                           TALITOS_MDEUICR_LO_ICE);
241
242         return 0;
243 }
244
245 /**
246  * talitos_submit - submits a descriptor to the device for processing
247  * @dev:        the SEC device to be used
248  * @ch:         the SEC device channel to be used
249  * @desc:       the descriptor to be processed by the device
250  * @callback:   whom to call when processing is complete
251  * @context:    a handle for use by caller (optional)
252  *
253  * desc must contain valid dma-mapped (bus physical) address pointers.
254  * callback must check err and feedback in descriptor header
255  * for device processing status.
256  */
257 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
258                    void (*callback)(struct device *dev,
259                                     struct talitos_desc *desc,
260                                     void *context, int error),
261                    void *context)
262 {
263         struct talitos_private *priv = dev_get_drvdata(dev);
264         struct talitos_request *request;
265         unsigned long flags;
266         int head;
267         bool is_sec1 = has_ftr_sec1(priv);
268
269         spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
270
271         if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
272                 /* h/w fifo is full */
273                 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
274                 return -EAGAIN;
275         }
276
277         head = priv->chan[ch].head;
278         request = &priv->chan[ch].fifo[head];
279
280         /* map descriptor and save caller data */
281         if (is_sec1) {
282                 desc->hdr1 = desc->hdr;
283                 desc->next_desc = 0;
284                 request->dma_desc = dma_map_single(dev, &desc->hdr1,
285                                                    TALITOS_DESC_SIZE,
286                                                    DMA_BIDIRECTIONAL);
287         } else {
288                 request->dma_desc = dma_map_single(dev, desc,
289                                                    TALITOS_DESC_SIZE,
290                                                    DMA_BIDIRECTIONAL);
291         }
292         request->callback = callback;
293         request->context = context;
294
295         /* increment fifo head */
296         priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
297
298         smp_wmb();
299         request->desc = desc;
300
301         /* GO! */
302         wmb();
303         out_be32(priv->chan[ch].reg + TALITOS_FF,
304                  upper_32_bits(request->dma_desc));
305         out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
306                  lower_32_bits(request->dma_desc));
307
308         spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
309
310         return -EINPROGRESS;
311 }
312 EXPORT_SYMBOL(talitos_submit);
313
314 /*
315  * process what was done, notify callback of error if not
316  */
317 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
318 {
319         struct talitos_private *priv = dev_get_drvdata(dev);
320         struct talitos_request *request, saved_req;
321         unsigned long flags;
322         int tail, status;
323         bool is_sec1 = has_ftr_sec1(priv);
324
325         spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
326
327         tail = priv->chan[ch].tail;
328         while (priv->chan[ch].fifo[tail].desc) {
329                 __be32 hdr;
330
331                 request = &priv->chan[ch].fifo[tail];
332
333                 /* descriptors with their done bits set don't get the error */
334                 rmb();
335                 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
336
337                 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
338                         status = 0;
339                 else
340                         if (!error)
341                                 break;
342                         else
343                                 status = error;
344
345                 dma_unmap_single(dev, request->dma_desc,
346                                  TALITOS_DESC_SIZE,
347                                  DMA_BIDIRECTIONAL);
348
349                 /* copy entries so we can call callback outside lock */
350                 saved_req.desc = request->desc;
351                 saved_req.callback = request->callback;
352                 saved_req.context = request->context;
353
354                 /* release request entry in fifo */
355                 smp_wmb();
356                 request->desc = NULL;
357
358                 /* increment fifo tail */
359                 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
360
361                 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
362
363                 atomic_dec(&priv->chan[ch].submit_count);
364
365                 saved_req.callback(dev, saved_req.desc, saved_req.context,
366                                    status);
367                 /* channel may resume processing in single desc error case */
368                 if (error && !reset_ch && status == error)
369                         return;
370                 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
371                 tail = priv->chan[ch].tail;
372         }
373
374         spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
375 }
376
377 /*
378  * process completed requests for channels that have done status
379  */
380 #define DEF_TALITOS1_DONE(name, ch_done_mask)                           \
381 static void talitos1_done_##name(unsigned long data)                    \
382 {                                                                       \
383         struct device *dev = (struct device *)data;                     \
384         struct talitos_private *priv = dev_get_drvdata(dev);            \
385         unsigned long flags;                                            \
386                                                                         \
387         if (ch_done_mask & 0x10000000)                                  \
388                 flush_channel(dev, 0, 0, 0);                    \
389         if (priv->num_channels == 1)                                    \
390                 goto out;                                               \
391         if (ch_done_mask & 0x40000000)                                  \
392                 flush_channel(dev, 1, 0, 0);                    \
393         if (ch_done_mask & 0x00010000)                                  \
394                 flush_channel(dev, 2, 0, 0);                    \
395         if (ch_done_mask & 0x00040000)                                  \
396                 flush_channel(dev, 3, 0, 0);                    \
397                                                                         \
398 out:                                                                    \
399         /* At this point, all completed channels have been processed */ \
400         /* Unmask done interrupts for channels completed later on. */   \
401         spin_lock_irqsave(&priv->reg_lock, flags);                      \
402         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
403         clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);    \
404         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
405 }
406
407 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
408
409 #define DEF_TALITOS2_DONE(name, ch_done_mask)                           \
410 static void talitos2_done_##name(unsigned long data)                    \
411 {                                                                       \
412         struct device *dev = (struct device *)data;                     \
413         struct talitos_private *priv = dev_get_drvdata(dev);            \
414         unsigned long flags;                                            \
415                                                                         \
416         if (ch_done_mask & 1)                                           \
417                 flush_channel(dev, 0, 0, 0);                            \
418         if (priv->num_channels == 1)                                    \
419                 goto out;                                               \
420         if (ch_done_mask & (1 << 2))                                    \
421                 flush_channel(dev, 1, 0, 0);                            \
422         if (ch_done_mask & (1 << 4))                                    \
423                 flush_channel(dev, 2, 0, 0);                            \
424         if (ch_done_mask & (1 << 6))                                    \
425                 flush_channel(dev, 3, 0, 0);                            \
426                                                                         \
427 out:                                                                    \
428         /* At this point, all completed channels have been processed */ \
429         /* Unmask done interrupts for channels completed later on. */   \
430         spin_lock_irqsave(&priv->reg_lock, flags);                      \
431         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
432         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);    \
433         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
434 }
435
436 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
437 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
438 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
439
440 /*
441  * locate current (offending) descriptor
442  */
443 static u32 current_desc_hdr(struct device *dev, int ch)
444 {
445         struct talitos_private *priv = dev_get_drvdata(dev);
446         int tail, iter;
447         dma_addr_t cur_desc;
448
449         cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
450         cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
451
452         if (!cur_desc) {
453                 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
454                 return 0;
455         }
456
457         tail = priv->chan[ch].tail;
458
459         iter = tail;
460         while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
461                 iter = (iter + 1) & (priv->fifo_len - 1);
462                 if (iter == tail) {
463                         dev_err(dev, "couldn't locate current descriptor\n");
464                         return 0;
465                 }
466         }
467
468         return priv->chan[ch].fifo[iter].desc->hdr;
469 }
470
471 /*
472  * user diagnostics; report root cause of error based on execution unit status
473  */
474 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
475 {
476         struct talitos_private *priv = dev_get_drvdata(dev);
477         int i;
478
479         if (!desc_hdr)
480                 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
481
482         switch (desc_hdr & DESC_HDR_SEL0_MASK) {
483         case DESC_HDR_SEL0_AFEU:
484                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
485                         in_be32(priv->reg_afeu + TALITOS_EUISR),
486                         in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
487                 break;
488         case DESC_HDR_SEL0_DEU:
489                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
490                         in_be32(priv->reg_deu + TALITOS_EUISR),
491                         in_be32(priv->reg_deu + TALITOS_EUISR_LO));
492                 break;
493         case DESC_HDR_SEL0_MDEUA:
494         case DESC_HDR_SEL0_MDEUB:
495                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
496                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
497                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
498                 break;
499         case DESC_HDR_SEL0_RNG:
500                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
501                         in_be32(priv->reg_rngu + TALITOS_ISR),
502                         in_be32(priv->reg_rngu + TALITOS_ISR_LO));
503                 break;
504         case DESC_HDR_SEL0_PKEU:
505                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
506                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
507                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
508                 break;
509         case DESC_HDR_SEL0_AESU:
510                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
511                         in_be32(priv->reg_aesu + TALITOS_EUISR),
512                         in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
513                 break;
514         case DESC_HDR_SEL0_CRCU:
515                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
516                         in_be32(priv->reg_crcu + TALITOS_EUISR),
517                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
518                 break;
519         case DESC_HDR_SEL0_KEU:
520                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
521                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
522                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
523                 break;
524         }
525
526         switch (desc_hdr & DESC_HDR_SEL1_MASK) {
527         case DESC_HDR_SEL1_MDEUA:
528         case DESC_HDR_SEL1_MDEUB:
529                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
530                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
531                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
532                 break;
533         case DESC_HDR_SEL1_CRCU:
534                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
535                         in_be32(priv->reg_crcu + TALITOS_EUISR),
536                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
537                 break;
538         }
539
540         for (i = 0; i < 8; i++)
541                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
542                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
543                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
544 }
545
546 /*
547  * recover from error interrupts
548  */
549 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
550 {
551         struct talitos_private *priv = dev_get_drvdata(dev);
552         unsigned int timeout = TALITOS_TIMEOUT;
553         int ch, error, reset_dev = 0;
554         u32 v_lo;
555         bool is_sec1 = has_ftr_sec1(priv);
556         int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
557
558         for (ch = 0; ch < priv->num_channels; ch++) {
559                 /* skip channels without errors */
560                 if (is_sec1) {
561                         /* bits 29, 31, 17, 19 */
562                         if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
563                                 continue;
564                 } else {
565                         if (!(isr & (1 << (ch * 2 + 1))))
566                                 continue;
567                 }
568
569                 error = -EINVAL;
570
571                 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
572
573                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
574                         dev_err(dev, "double fetch fifo overflow error\n");
575                         error = -EAGAIN;
576                         reset_ch = 1;
577                 }
578                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
579                         /* h/w dropped descriptor */
580                         dev_err(dev, "single fetch fifo overflow error\n");
581                         error = -EAGAIN;
582                 }
583                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
584                         dev_err(dev, "master data transfer error\n");
585                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
586                         dev_err(dev, is_sec1 ? "pointeur not complete error\n"
587                                              : "s/g data length zero error\n");
588                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
589                         dev_err(dev, is_sec1 ? "parity error\n"
590                                              : "fetch pointer zero error\n");
591                 if (v_lo & TALITOS_CCPSR_LO_IDH)
592                         dev_err(dev, "illegal descriptor header error\n");
593                 if (v_lo & TALITOS_CCPSR_LO_IEU)
594                         dev_err(dev, is_sec1 ? "static assignment error\n"
595                                              : "invalid exec unit error\n");
596                 if (v_lo & TALITOS_CCPSR_LO_EU)
597                         report_eu_error(dev, ch, current_desc_hdr(dev, ch));
598                 if (!is_sec1) {
599                         if (v_lo & TALITOS_CCPSR_LO_GB)
600                                 dev_err(dev, "gather boundary error\n");
601                         if (v_lo & TALITOS_CCPSR_LO_GRL)
602                                 dev_err(dev, "gather return/length error\n");
603                         if (v_lo & TALITOS_CCPSR_LO_SB)
604                                 dev_err(dev, "scatter boundary error\n");
605                         if (v_lo & TALITOS_CCPSR_LO_SRL)
606                                 dev_err(dev, "scatter return/length error\n");
607                 }
608
609                 flush_channel(dev, ch, error, reset_ch);
610
611                 if (reset_ch) {
612                         reset_channel(dev, ch);
613                 } else {
614                         setbits32(priv->chan[ch].reg + TALITOS_CCCR,
615                                   TALITOS2_CCCR_CONT);
616                         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
617                         while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
618                                TALITOS2_CCCR_CONT) && --timeout)
619                                 cpu_relax();
620                         if (timeout == 0) {
621                                 dev_err(dev, "failed to restart channel %d\n",
622                                         ch);
623                                 reset_dev = 1;
624                         }
625                 }
626         }
627         if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
628             (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
629                 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
630                         dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
631                                 isr, isr_lo);
632                 else
633                         dev_err(dev, "done overflow, internal time out, or "
634                                 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
635
636                 /* purge request queues */
637                 for (ch = 0; ch < priv->num_channels; ch++)
638                         flush_channel(dev, ch, -EIO, 1);
639
640                 /* reset and reinitialize the device */
641                 init_device(dev);
642         }
643 }
644
645 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
646 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)              \
647 {                                                                              \
648         struct device *dev = data;                                             \
649         struct talitos_private *priv = dev_get_drvdata(dev);                   \
650         u32 isr, isr_lo;                                                       \
651         unsigned long flags;                                                   \
652                                                                                \
653         spin_lock_irqsave(&priv->reg_lock, flags);                             \
654         isr = in_be32(priv->reg + TALITOS_ISR);                                \
655         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
656         /* Acknowledge interrupt */                                            \
657         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
659                                                                                \
660         if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
661                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
662                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
663         }                                                                      \
664         else {                                                                 \
665                 if (likely(isr & ch_done_mask)) {                              \
666                         /* mask further done interrupts. */                    \
667                         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
668                         /* done_task will unmask done interrupts at exit */    \
669                         tasklet_schedule(&priv->done_task[tlet]);              \
670                 }                                                              \
671                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
672         }                                                                      \
673                                                                                \
674         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
675                                                                 IRQ_NONE;      \
676 }
677
678 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
679
680 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
681 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)              \
682 {                                                                              \
683         struct device *dev = data;                                             \
684         struct talitos_private *priv = dev_get_drvdata(dev);                   \
685         u32 isr, isr_lo;                                                       \
686         unsigned long flags;                                                   \
687                                                                                \
688         spin_lock_irqsave(&priv->reg_lock, flags);                             \
689         isr = in_be32(priv->reg + TALITOS_ISR);                                \
690         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
691         /* Acknowledge interrupt */                                            \
692         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
694                                                                                \
695         if (unlikely(isr & ch_err_mask || isr_lo)) {                           \
696                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
697                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
698         }                                                                      \
699         else {                                                                 \
700                 if (likely(isr & ch_done_mask)) {                              \
701                         /* mask further done interrupts. */                    \
702                         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
703                         /* done_task will unmask done interrupts at exit */    \
704                         tasklet_schedule(&priv->done_task[tlet]);              \
705                 }                                                              \
706                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
707         }                                                                      \
708                                                                                \
709         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
710                                                                 IRQ_NONE;      \
711 }
712
713 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
714 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
715                        0)
716 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
717                        1)
718
719 /*
720  * hwrng
721  */
722 static int talitos_rng_data_present(struct hwrng *rng, int wait)
723 {
724         struct device *dev = (struct device *)rng->priv;
725         struct talitos_private *priv = dev_get_drvdata(dev);
726         u32 ofl;
727         int i;
728
729         for (i = 0; i < 20; i++) {
730                 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
731                       TALITOS_RNGUSR_LO_OFL;
732                 if (ofl || !wait)
733                         break;
734                 udelay(10);
735         }
736
737         return !!ofl;
738 }
739
740 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
741 {
742         struct device *dev = (struct device *)rng->priv;
743         struct talitos_private *priv = dev_get_drvdata(dev);
744
745         /* rng fifo requires 64-bit accesses */
746         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
747         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
748
749         return sizeof(u32);
750 }
751
752 static int talitos_rng_init(struct hwrng *rng)
753 {
754         struct device *dev = (struct device *)rng->priv;
755         struct talitos_private *priv = dev_get_drvdata(dev);
756         unsigned int timeout = TALITOS_TIMEOUT;
757
758         setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
759         while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
760                  & TALITOS_RNGUSR_LO_RD)
761                && --timeout)
762                 cpu_relax();
763         if (timeout == 0) {
764                 dev_err(dev, "failed to reset rng hw\n");
765                 return -ENODEV;
766         }
767
768         /* start generating */
769         setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
770
771         return 0;
772 }
773
774 static int talitos_register_rng(struct device *dev)
775 {
776         struct talitos_private *priv = dev_get_drvdata(dev);
777         int err;
778
779         priv->rng.name          = dev_driver_string(dev),
780         priv->rng.init          = talitos_rng_init,
781         priv->rng.data_present  = talitos_rng_data_present,
782         priv->rng.data_read     = talitos_rng_data_read,
783         priv->rng.priv          = (unsigned long)dev;
784
785         err = hwrng_register(&priv->rng);
786         if (!err)
787                 priv->rng_registered = true;
788
789         return err;
790 }
791
792 static void talitos_unregister_rng(struct device *dev)
793 {
794         struct talitos_private *priv = dev_get_drvdata(dev);
795
796         if (!priv->rng_registered)
797                 return;
798
799         hwrng_unregister(&priv->rng);
800         priv->rng_registered = false;
801 }
802
803 /*
804  * crypto alg
805  */
806 #define TALITOS_CRA_PRIORITY            3000
807 #define TALITOS_MAX_KEY_SIZE            96
808 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
809
810 struct talitos_ctx {
811         struct device *dev;
812         int ch;
813         __be32 desc_hdr_template;
814         u8 key[TALITOS_MAX_KEY_SIZE];
815         u8 iv[TALITOS_MAX_IV_LENGTH];
816         unsigned int keylen;
817         unsigned int enckeylen;
818         unsigned int authkeylen;
819 };
820
821 #define HASH_MAX_BLOCK_SIZE             SHA512_BLOCK_SIZE
822 #define TALITOS_MDEU_MAX_CONTEXT_SIZE   TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
823
824 struct talitos_ahash_req_ctx {
825         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
826         unsigned int hw_context_size;
827         u8 buf[HASH_MAX_BLOCK_SIZE];
828         u8 bufnext[HASH_MAX_BLOCK_SIZE];
829         unsigned int swinit;
830         unsigned int first;
831         unsigned int last;
832         unsigned int to_hash_later;
833         unsigned int nbuf;
834         struct scatterlist bufsl[2];
835         struct scatterlist *psrc;
836 };
837
838 static int aead_setkey(struct crypto_aead *authenc,
839                        const u8 *key, unsigned int keylen)
840 {
841         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
842         struct crypto_authenc_keys keys;
843
844         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
845                 goto badkey;
846
847         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
848                 goto badkey;
849
850         memcpy(ctx->key, keys.authkey, keys.authkeylen);
851         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
852
853         ctx->keylen = keys.authkeylen + keys.enckeylen;
854         ctx->enckeylen = keys.enckeylen;
855         ctx->authkeylen = keys.authkeylen;
856
857         return 0;
858
859 badkey:
860         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
861         return -EINVAL;
862 }
863
864 /*
865  * talitos_edesc - s/w-extended descriptor
866  * @src_nents: number of segments in input scatterlist
867  * @dst_nents: number of segments in output scatterlist
868  * @icv_ool: whether ICV is out-of-line
869  * @iv_dma: dma address of iv for checking continuity and link table
870  * @dma_len: length of dma mapped link_tbl space
871  * @dma_link_tbl: bus physical address of link_tbl/buf
872  * @desc: h/w descriptor
873  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
874  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
875  *
876  * if decrypting (with authcheck), or either one of src_nents or dst_nents
877  * is greater than 1, an integrity check value is concatenated to the end
878  * of link_tbl data
879  */
880 struct talitos_edesc {
881         int src_nents;
882         int dst_nents;
883         bool icv_ool;
884         dma_addr_t iv_dma;
885         int dma_len;
886         dma_addr_t dma_link_tbl;
887         struct talitos_desc desc;
888         union {
889                 struct talitos_ptr link_tbl[0];
890                 u8 buf[0];
891         };
892 };
893
894 static void talitos_sg_unmap(struct device *dev,
895                              struct talitos_edesc *edesc,
896                              struct scatterlist *src,
897                              struct scatterlist *dst)
898 {
899         unsigned int src_nents = edesc->src_nents ? : 1;
900         unsigned int dst_nents = edesc->dst_nents ? : 1;
901
902         if (src != dst) {
903                 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
904
905                 if (dst) {
906                         dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
907                 }
908         } else
909                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
910 }
911
912 static void ipsec_esp_unmap(struct device *dev,
913                             struct talitos_edesc *edesc,
914                             struct aead_request *areq)
915 {
916         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
917         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
918         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
919         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
920
921         talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
922
923         if (edesc->dma_len)
924                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
925                                  DMA_BIDIRECTIONAL);
926 }
927
928 /*
929  * ipsec_esp descriptor callbacks
930  */
931 static void ipsec_esp_encrypt_done(struct device *dev,
932                                    struct talitos_desc *desc, void *context,
933                                    int err)
934 {
935         struct aead_request *areq = context;
936         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
937         unsigned int authsize = crypto_aead_authsize(authenc);
938         struct talitos_edesc *edesc;
939         struct scatterlist *sg;
940         void *icvdata;
941
942         edesc = container_of(desc, struct talitos_edesc, desc);
943
944         ipsec_esp_unmap(dev, edesc, areq);
945
946         /* copy the generated ICV to dst */
947         if (edesc->icv_ool) {
948                 icvdata = &edesc->link_tbl[edesc->src_nents +
949                                            edesc->dst_nents + 2];
950                 sg = sg_last(areq->dst, edesc->dst_nents);
951                 memcpy((char *)sg_virt(sg) + sg->length - authsize,
952                        icvdata, authsize);
953         }
954
955         kfree(edesc);
956
957         aead_request_complete(areq, err);
958 }
959
960 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
961                                           struct talitos_desc *desc,
962                                           void *context, int err)
963 {
964         struct aead_request *req = context;
965         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
966         unsigned int authsize = crypto_aead_authsize(authenc);
967         struct talitos_edesc *edesc;
968         struct scatterlist *sg;
969         char *oicv, *icv;
970
971         edesc = container_of(desc, struct talitos_edesc, desc);
972
973         ipsec_esp_unmap(dev, edesc, req);
974
975         if (!err) {
976                 /* auth check */
977                 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
978                 icv = (char *)sg_virt(sg) + sg->length - authsize;
979
980                 if (edesc->dma_len) {
981                         oicv = (char *)&edesc->link_tbl[edesc->src_nents +
982                                                         edesc->dst_nents + 2];
983                         if (edesc->icv_ool)
984                                 icv = oicv + authsize;
985                 } else
986                         oicv = (char *)&edesc->link_tbl[0];
987
988                 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
989         }
990
991         kfree(edesc);
992
993         aead_request_complete(req, err);
994 }
995
996 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
997                                           struct talitos_desc *desc,
998                                           void *context, int err)
999 {
1000         struct aead_request *req = context;
1001         struct talitos_edesc *edesc;
1002
1003         edesc = container_of(desc, struct talitos_edesc, desc);
1004
1005         ipsec_esp_unmap(dev, edesc, req);
1006
1007         /* check ICV auth status */
1008         if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1009                      DESC_HDR_LO_ICCR1_PASS))
1010                 err = -EBADMSG;
1011
1012         kfree(edesc);
1013
1014         aead_request_complete(req, err);
1015 }
1016
1017 /*
1018  * convert scatterlist to SEC h/w link table format
1019  * stop at cryptlen bytes
1020  */
1021 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1022                                  unsigned int offset, int cryptlen,
1023                                  struct talitos_ptr *link_tbl_ptr)
1024 {
1025         int n_sg = sg_count;
1026         int count = 0;
1027
1028         while (cryptlen && sg && n_sg--) {
1029                 unsigned int len = sg_dma_len(sg);
1030
1031                 if (offset >= len) {
1032                         offset -= len;
1033                         goto next;
1034                 }
1035
1036                 len -= offset;
1037
1038                 if (len > cryptlen)
1039                         len = cryptlen;
1040
1041                 to_talitos_ptr(link_tbl_ptr + count,
1042                                sg_dma_address(sg) + offset, 0);
1043                 link_tbl_ptr[count].len = cpu_to_be16(len);
1044                 link_tbl_ptr[count].j_extent = 0;
1045                 count++;
1046                 cryptlen -= len;
1047                 offset = 0;
1048
1049 next:
1050                 sg = sg_next(sg);
1051         }
1052
1053         /* tag end of link table */
1054         if (count > 0)
1055                 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
1056
1057         return count;
1058 }
1059
1060 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1061                                  int cryptlen,
1062                                  struct talitos_ptr *link_tbl_ptr)
1063 {
1064         return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1065                                      link_tbl_ptr);
1066 }
1067
1068 /*
1069  * fill in and submit ipsec_esp descriptor
1070  */
1071 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1072                      void (*callback)(struct device *dev,
1073                                       struct talitos_desc *desc,
1074                                       void *context, int error))
1075 {
1076         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1077         unsigned int authsize = crypto_aead_authsize(aead);
1078         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1079         struct device *dev = ctx->dev;
1080         struct talitos_desc *desc = &edesc->desc;
1081         unsigned int cryptlen = areq->cryptlen;
1082         unsigned int ivsize = crypto_aead_ivsize(aead);
1083         int tbl_off = 0;
1084         int sg_count, ret;
1085         int sg_link_tbl_len;
1086
1087         /* hmac key */
1088         map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1089                                DMA_TO_DEVICE);
1090
1091         sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1092                               (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1093                                                            : DMA_TO_DEVICE);
1094         /* hmac data */
1095         desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1096         if (sg_count > 1 &&
1097             (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1098                                          areq->assoclen,
1099                                          &edesc->link_tbl[tbl_off])) > 1) {
1100                 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1101                                sizeof(struct talitos_ptr), 0);
1102                 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1103
1104                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1105                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1106
1107                 tbl_off += ret;
1108         } else {
1109                 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1110                 desc->ptr[1].j_extent = 0;
1111         }
1112
1113         /* cipher iv */
1114         to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1115         desc->ptr[2].len = cpu_to_be16(ivsize);
1116         desc->ptr[2].j_extent = 0;
1117
1118         /* cipher key */
1119         map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1120                                (char *)&ctx->key + ctx->authkeylen,
1121                                DMA_TO_DEVICE);
1122
1123         /*
1124          * cipher in
1125          * map and adjust cipher len to aead request cryptlen.
1126          * extent is bytes of HMAC postpended to ciphertext,
1127          * typically 12 for ipsec
1128          */
1129         desc->ptr[4].len = cpu_to_be16(cryptlen);
1130         desc->ptr[4].j_extent = authsize;
1131
1132         sg_link_tbl_len = cryptlen;
1133         if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1134                 sg_link_tbl_len += authsize;
1135
1136         if (sg_count == 1) {
1137                 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1138                                areq->assoclen, 0);
1139         } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1140                                                 areq->assoclen, sg_link_tbl_len,
1141                                                 &edesc->link_tbl[tbl_off])) >
1142                    1) {
1143                 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1144                 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1145                                               tbl_off *
1146                                               sizeof(struct talitos_ptr), 0);
1147                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1148                                            edesc->dma_len,
1149                                            DMA_BIDIRECTIONAL);
1150                 tbl_off += ret;
1151         } else {
1152                 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1153         }
1154
1155         /* cipher out */
1156         desc->ptr[5].len = cpu_to_be16(cryptlen);
1157         desc->ptr[5].j_extent = authsize;
1158
1159         if (areq->src != areq->dst)
1160                 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1161                                       DMA_FROM_DEVICE);
1162
1163         edesc->icv_ool = false;
1164
1165         if (sg_count == 1) {
1166                 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1167                                areq->assoclen, 0);
1168         } else if ((sg_count =
1169                         sg_to_link_tbl_offset(areq->dst, sg_count,
1170                                               areq->assoclen, cryptlen,
1171                                               &edesc->link_tbl[tbl_off])) > 1) {
1172                 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1173
1174                 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1175                                tbl_off * sizeof(struct talitos_ptr), 0);
1176
1177                 /* Add an entry to the link table for ICV data */
1178                 tbl_ptr += sg_count - 1;
1179                 tbl_ptr->j_extent = 0;
1180                 tbl_ptr++;
1181                 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1182                 tbl_ptr->len = cpu_to_be16(authsize);
1183
1184                 /* icv data follows link tables */
1185                 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1186                                         (edesc->src_nents + edesc->dst_nents +
1187                                          2) * sizeof(struct talitos_ptr) +
1188                                         authsize, 0);
1189                 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1190                 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1191                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1192
1193                 edesc->icv_ool = true;
1194         } else {
1195                 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1196         }
1197
1198         /* iv out */
1199         map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1200                                DMA_FROM_DEVICE);
1201
1202         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1203         if (ret != -EINPROGRESS) {
1204                 ipsec_esp_unmap(dev, edesc, areq);
1205                 kfree(edesc);
1206         }
1207         return ret;
1208 }
1209
1210 /*
1211  * allocate and map the extended descriptor
1212  */
1213 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1214                                                  struct scatterlist *src,
1215                                                  struct scatterlist *dst,
1216                                                  u8 *iv,
1217                                                  unsigned int assoclen,
1218                                                  unsigned int cryptlen,
1219                                                  unsigned int authsize,
1220                                                  unsigned int ivsize,
1221                                                  int icv_stashing,
1222                                                  u32 cryptoflags,
1223                                                  bool encrypt)
1224 {
1225         struct talitos_edesc *edesc;
1226         int src_nents, dst_nents, alloc_len, dma_len;
1227         dma_addr_t iv_dma = 0;
1228         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1229                       GFP_ATOMIC;
1230         struct talitos_private *priv = dev_get_drvdata(dev);
1231         bool is_sec1 = has_ftr_sec1(priv);
1232         int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1233         void *err;
1234
1235         if (cryptlen + authsize > max_len) {
1236                 dev_err(dev, "length exceeds h/w max limit\n");
1237                 return ERR_PTR(-EINVAL);
1238         }
1239
1240         if (ivsize)
1241                 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1242
1243         if (!dst || dst == src) {
1244                 src_nents = sg_nents_for_len(src,
1245                                              assoclen + cryptlen + authsize);
1246                 if (src_nents < 0) {
1247                         dev_err(dev, "Invalid number of src SG.\n");
1248                         err = ERR_PTR(-EINVAL);
1249                         goto error_sg;
1250                 }
1251                 src_nents = (src_nents == 1) ? 0 : src_nents;
1252                 dst_nents = dst ? src_nents : 0;
1253         } else { /* dst && dst != src*/
1254                 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1255                                                  (encrypt ? 0 : authsize));
1256                 if (src_nents < 0) {
1257                         dev_err(dev, "Invalid number of src SG.\n");
1258                         err = ERR_PTR(-EINVAL);
1259                         goto error_sg;
1260                 }
1261                 src_nents = (src_nents == 1) ? 0 : src_nents;
1262                 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1263                                                  (encrypt ? authsize : 0));
1264                 if (dst_nents < 0) {
1265                         dev_err(dev, "Invalid number of dst SG.\n");
1266                         err = ERR_PTR(-EINVAL);
1267                         goto error_sg;
1268                 }
1269                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1270         }
1271
1272         /*
1273          * allocate space for base edesc plus the link tables,
1274          * allowing for two separate entries for AD and generated ICV (+ 2),
1275          * and space for two sets of ICVs (stashed and generated)
1276          */
1277         alloc_len = sizeof(struct talitos_edesc);
1278         if (src_nents || dst_nents) {
1279                 if (is_sec1)
1280                         dma_len = (src_nents ? cryptlen : 0) +
1281                                   (dst_nents ? cryptlen : 0);
1282                 else
1283                         dma_len = (src_nents + dst_nents + 2) *
1284                                   sizeof(struct talitos_ptr) + authsize * 2;
1285                 alloc_len += dma_len;
1286         } else {
1287                 dma_len = 0;
1288                 alloc_len += icv_stashing ? authsize : 0;
1289         }
1290
1291         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1292         if (!edesc) {
1293                 dev_err(dev, "could not allocate edescriptor\n");
1294                 err = ERR_PTR(-ENOMEM);
1295                 goto error_sg;
1296         }
1297
1298         edesc->src_nents = src_nents;
1299         edesc->dst_nents = dst_nents;
1300         edesc->iv_dma = iv_dma;
1301         edesc->dma_len = dma_len;
1302         if (dma_len)
1303                 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1304                                                      edesc->dma_len,
1305                                                      DMA_BIDIRECTIONAL);
1306
1307         return edesc;
1308 error_sg:
1309         if (iv_dma)
1310                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1311         return err;
1312 }
1313
1314 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1315                                               int icv_stashing, bool encrypt)
1316 {
1317         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1318         unsigned int authsize = crypto_aead_authsize(authenc);
1319         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1320         unsigned int ivsize = crypto_aead_ivsize(authenc);
1321
1322         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1323                                    iv, areq->assoclen, areq->cryptlen,
1324                                    authsize, ivsize, icv_stashing,
1325                                    areq->base.flags, encrypt);
1326 }
1327
1328 static int aead_encrypt(struct aead_request *req)
1329 {
1330         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1331         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1332         struct talitos_edesc *edesc;
1333
1334         /* allocate extended descriptor */
1335         edesc = aead_edesc_alloc(req, req->iv, 0, true);
1336         if (IS_ERR(edesc))
1337                 return PTR_ERR(edesc);
1338
1339         /* set encrypt */
1340         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1341
1342         return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1343 }
1344
1345 static int aead_decrypt(struct aead_request *req)
1346 {
1347         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1348         unsigned int authsize = crypto_aead_authsize(authenc);
1349         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1350         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1351         struct talitos_edesc *edesc;
1352         struct scatterlist *sg;
1353         void *icvdata;
1354
1355         req->cryptlen -= authsize;
1356
1357         /* allocate extended descriptor */
1358         edesc = aead_edesc_alloc(req, req->iv, 1, false);
1359         if (IS_ERR(edesc))
1360                 return PTR_ERR(edesc);
1361
1362         if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1363             ((!edesc->src_nents && !edesc->dst_nents) ||
1364              priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1365
1366                 /* decrypt and check the ICV */
1367                 edesc->desc.hdr = ctx->desc_hdr_template |
1368                                   DESC_HDR_DIR_INBOUND |
1369                                   DESC_HDR_MODE1_MDEU_CICV;
1370
1371                 /* reset integrity check result bits */
1372                 edesc->desc.hdr_lo = 0;
1373
1374                 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1375         }
1376
1377         /* Have to check the ICV with software */
1378         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1379
1380         /* stash incoming ICV for later cmp with ICV generated by the h/w */
1381         if (edesc->dma_len)
1382                 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1383                                                    edesc->dst_nents + 2];
1384         else
1385                 icvdata = &edesc->link_tbl[0];
1386
1387         sg = sg_last(req->src, edesc->src_nents ? : 1);
1388
1389         memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1390
1391         return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1392 }
1393
1394 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1395                              const u8 *key, unsigned int keylen)
1396 {
1397         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1398
1399         memcpy(&ctx->key, key, keylen);
1400         ctx->keylen = keylen;
1401
1402         return 0;
1403 }
1404
1405 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1406                                  struct scatterlist *dst, unsigned int len,
1407                                  struct talitos_edesc *edesc)
1408 {
1409         struct talitos_private *priv = dev_get_drvdata(dev);
1410         bool is_sec1 = has_ftr_sec1(priv);
1411
1412         if (is_sec1) {
1413                 if (!edesc->src_nents) {
1414                         dma_unmap_sg(dev, src, 1,
1415                                      dst != src ? DMA_TO_DEVICE
1416                                                 : DMA_BIDIRECTIONAL);
1417                 }
1418                 if (dst && edesc->dst_nents) {
1419                         dma_sync_single_for_device(dev,
1420                                                    edesc->dma_link_tbl + len,
1421                                                    len, DMA_FROM_DEVICE);
1422                         sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1423                                             edesc->buf + len, len);
1424                 } else if (dst && dst != src) {
1425                         dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1426                 }
1427         } else {
1428                 talitos_sg_unmap(dev, edesc, src, dst);
1429         }
1430 }
1431
1432 static void common_nonsnoop_unmap(struct device *dev,
1433                                   struct talitos_edesc *edesc,
1434                                   struct ablkcipher_request *areq)
1435 {
1436         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1437
1438         unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1439         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1440         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1441
1442         if (edesc->dma_len)
1443                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1444                                  DMA_BIDIRECTIONAL);
1445 }
1446
1447 static void ablkcipher_done(struct device *dev,
1448                             struct talitos_desc *desc, void *context,
1449                             int err)
1450 {
1451         struct ablkcipher_request *areq = context;
1452         struct talitos_edesc *edesc;
1453
1454         edesc = container_of(desc, struct talitos_edesc, desc);
1455
1456         common_nonsnoop_unmap(dev, edesc, areq);
1457
1458         kfree(edesc);
1459
1460         areq->base.complete(&areq->base, err);
1461 }
1462
1463 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1464                           unsigned int len, struct talitos_edesc *edesc,
1465                           enum dma_data_direction dir, struct talitos_ptr *ptr)
1466 {
1467         int sg_count;
1468         struct talitos_private *priv = dev_get_drvdata(dev);
1469         bool is_sec1 = has_ftr_sec1(priv);
1470
1471         to_talitos_ptr_len(ptr, len, is_sec1);
1472
1473         if (is_sec1) {
1474                 sg_count = edesc->src_nents ? : 1;
1475
1476                 if (sg_count == 1) {
1477                         dma_map_sg(dev, src, 1, dir);
1478                         to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1479                 } else {
1480                         sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1481                         to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1482                         dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1483                                                    len, DMA_TO_DEVICE);
1484                 }
1485         } else {
1486                 to_talitos_ptr_extent_clear(ptr, is_sec1);
1487
1488                 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1489
1490                 if (sg_count == 1) {
1491                         to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1492                 } else {
1493                         sg_count = sg_to_link_tbl(src, sg_count, len,
1494                                                   &edesc->link_tbl[0]);
1495                         if (sg_count > 1) {
1496                                 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1497                                 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1498                                 dma_sync_single_for_device(dev,
1499                                                            edesc->dma_link_tbl,
1500                                                            edesc->dma_len,
1501                                                            DMA_BIDIRECTIONAL);
1502                         } else {
1503                                 /* Only one segment now, so no link tbl needed*/
1504                                 to_talitos_ptr(ptr, sg_dma_address(src),
1505                                                is_sec1);
1506                         }
1507                 }
1508         }
1509         return sg_count;
1510 }
1511
1512 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1513                             unsigned int len, struct talitos_edesc *edesc,
1514                             enum dma_data_direction dir,
1515                             struct talitos_ptr *ptr, int sg_count)
1516 {
1517         struct talitos_private *priv = dev_get_drvdata(dev);
1518         bool is_sec1 = has_ftr_sec1(priv);
1519
1520         if (dir != DMA_NONE)
1521                 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1522
1523         to_talitos_ptr_len(ptr, len, is_sec1);
1524
1525         if (is_sec1) {
1526                 if (sg_count == 1) {
1527                         if (dir != DMA_NONE)
1528                                 dma_map_sg(dev, dst, 1, dir);
1529                         to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1530                 } else {
1531                         to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1532                         dma_sync_single_for_device(dev,
1533                                                    edesc->dma_link_tbl + len,
1534                                                    len, DMA_FROM_DEVICE);
1535                 }
1536         } else {
1537                 to_talitos_ptr_extent_clear(ptr, is_sec1);
1538
1539                 if (sg_count == 1) {
1540                         to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1541                 } else {
1542                         struct talitos_ptr *link_tbl_ptr =
1543                                 &edesc->link_tbl[edesc->src_nents + 1];
1544
1545                         to_talitos_ptr(ptr, edesc->dma_link_tbl +
1546                                             (edesc->src_nents + 1) *
1547                                              sizeof(struct talitos_ptr), 0);
1548                         ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1549                         sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1550                         dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1551                                                    edesc->dma_len,
1552                                                    DMA_BIDIRECTIONAL);
1553                 }
1554         }
1555 }
1556
1557 static int common_nonsnoop(struct talitos_edesc *edesc,
1558                            struct ablkcipher_request *areq,
1559                            void (*callback) (struct device *dev,
1560                                              struct talitos_desc *desc,
1561                                              void *context, int error))
1562 {
1563         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1564         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1565         struct device *dev = ctx->dev;
1566         struct talitos_desc *desc = &edesc->desc;
1567         unsigned int cryptlen = areq->nbytes;
1568         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1569         int sg_count, ret;
1570         struct talitos_private *priv = dev_get_drvdata(dev);
1571         bool is_sec1 = has_ftr_sec1(priv);
1572
1573         /* first DWORD empty */
1574         desc->ptr[0] = zero_entry;
1575
1576         /* cipher iv */
1577         to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1578         to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1579         to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1580
1581         /* cipher key */
1582         map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1583                                (char *)&ctx->key, DMA_TO_DEVICE);
1584
1585         /*
1586          * cipher in
1587          */
1588         sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1589                                          (areq->src == areq->dst) ?
1590                                           DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1591                                           &desc->ptr[3]);
1592
1593         /* cipher out */
1594         map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1595                                (areq->src == areq->dst) ? DMA_NONE
1596                                                         : DMA_FROM_DEVICE,
1597                                &desc->ptr[4], sg_count);
1598
1599         /* iv out */
1600         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1601                                DMA_FROM_DEVICE);
1602
1603         /* last DWORD empty */
1604         desc->ptr[6] = zero_entry;
1605
1606         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1607         if (ret != -EINPROGRESS) {
1608                 common_nonsnoop_unmap(dev, edesc, areq);
1609                 kfree(edesc);
1610         }
1611         return ret;
1612 }
1613
1614 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1615                                                     areq, bool encrypt)
1616 {
1617         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1618         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1619         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1620
1621         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1622                                    areq->info, 0, areq->nbytes, 0, ivsize, 0,
1623                                    areq->base.flags, encrypt);
1624 }
1625
1626 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1627 {
1628         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1629         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1630         struct talitos_edesc *edesc;
1631
1632         /* allocate extended descriptor */
1633         edesc = ablkcipher_edesc_alloc(areq, true);
1634         if (IS_ERR(edesc))
1635                 return PTR_ERR(edesc);
1636
1637         /* set encrypt */
1638         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1639
1640         return common_nonsnoop(edesc, areq, ablkcipher_done);
1641 }
1642
1643 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1644 {
1645         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1646         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1647         struct talitos_edesc *edesc;
1648
1649         /* allocate extended descriptor */
1650         edesc = ablkcipher_edesc_alloc(areq, false);
1651         if (IS_ERR(edesc))
1652                 return PTR_ERR(edesc);
1653
1654         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1655
1656         return common_nonsnoop(edesc, areq, ablkcipher_done);
1657 }
1658
1659 static void common_nonsnoop_hash_unmap(struct device *dev,
1660                                        struct talitos_edesc *edesc,
1661                                        struct ahash_request *areq)
1662 {
1663         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1664         struct talitos_private *priv = dev_get_drvdata(dev);
1665         bool is_sec1 = has_ftr_sec1(priv);
1666
1667         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1668
1669         unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1670
1671         /* When using hashctx-in, must unmap it. */
1672         if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1673                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1674                                          DMA_TO_DEVICE);
1675
1676         if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1677                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1678                                          DMA_TO_DEVICE);
1679
1680         if (edesc->dma_len)
1681                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1682                                  DMA_BIDIRECTIONAL);
1683
1684 }
1685
1686 static void ahash_done(struct device *dev,
1687                        struct talitos_desc *desc, void *context,
1688                        int err)
1689 {
1690         struct ahash_request *areq = context;
1691         struct talitos_edesc *edesc =
1692                  container_of(desc, struct talitos_edesc, desc);
1693         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1694
1695         if (!req_ctx->last && req_ctx->to_hash_later) {
1696                 /* Position any partial block for next update/final/finup */
1697                 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1698                 req_ctx->nbuf = req_ctx->to_hash_later;
1699         }
1700         common_nonsnoop_hash_unmap(dev, edesc, areq);
1701
1702         kfree(edesc);
1703
1704         areq->base.complete(&areq->base, err);
1705 }
1706
1707 /*
1708  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1709  * ourself and submit a padded block
1710  */
1711 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1712                                struct talitos_edesc *edesc,
1713                                struct talitos_ptr *ptr)
1714 {
1715         static u8 padded_hash[64] = {
1716                 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1717                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1718                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1719                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1720         };
1721
1722         pr_err_once("Bug in SEC1, padding ourself\n");
1723         edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1724         map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1725                                (char *)padded_hash, DMA_TO_DEVICE);
1726 }
1727
1728 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1729                                 struct ahash_request *areq, unsigned int length,
1730                                 void (*callback) (struct device *dev,
1731                                                   struct talitos_desc *desc,
1732                                                   void *context, int error))
1733 {
1734         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1735         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1736         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1737         struct device *dev = ctx->dev;
1738         struct talitos_desc *desc = &edesc->desc;
1739         int ret;
1740         struct talitos_private *priv = dev_get_drvdata(dev);
1741         bool is_sec1 = has_ftr_sec1(priv);
1742
1743         /* first DWORD empty */
1744         desc->ptr[0] = zero_entry;
1745
1746         /* hash context in */
1747         if (!req_ctx->first || req_ctx->swinit) {
1748                 map_single_talitos_ptr(dev, &desc->ptr[1],
1749                                        req_ctx->hw_context_size,
1750                                        (char *)req_ctx->hw_context,
1751                                        DMA_TO_DEVICE);
1752                 req_ctx->swinit = 0;
1753         } else {
1754                 desc->ptr[1] = zero_entry;
1755                 /* Indicate next op is not the first. */
1756                 req_ctx->first = 0;
1757         }
1758
1759         /* HMAC key */
1760         if (ctx->keylen)
1761                 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1762                                        (char *)&ctx->key, DMA_TO_DEVICE);
1763         else
1764                 desc->ptr[2] = zero_entry;
1765
1766         /*
1767          * data in
1768          */
1769         map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1770                               DMA_TO_DEVICE, &desc->ptr[3]);
1771
1772         /* fifth DWORD empty */
1773         desc->ptr[4] = zero_entry;
1774
1775         /* hash/HMAC out -or- hash context out */
1776         if (req_ctx->last)
1777                 map_single_talitos_ptr(dev, &desc->ptr[5],
1778                                        crypto_ahash_digestsize(tfm),
1779                                        areq->result, DMA_FROM_DEVICE);
1780         else
1781                 map_single_talitos_ptr(dev, &desc->ptr[5],
1782                                        req_ctx->hw_context_size,
1783                                        req_ctx->hw_context, DMA_FROM_DEVICE);
1784
1785         /* last DWORD empty */
1786         desc->ptr[6] = zero_entry;
1787
1788         if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1789                 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1790
1791         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1792         if (ret != -EINPROGRESS) {
1793                 common_nonsnoop_hash_unmap(dev, edesc, areq);
1794                 kfree(edesc);
1795         }
1796         return ret;
1797 }
1798
1799 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1800                                                unsigned int nbytes)
1801 {
1802         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1803         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1804         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1805
1806         return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1807                                    nbytes, 0, 0, 0, areq->base.flags, false);
1808 }
1809
1810 static int ahash_init(struct ahash_request *areq)
1811 {
1812         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1813         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1814
1815         /* Initialize the context */
1816         req_ctx->nbuf = 0;
1817         req_ctx->first = 1; /* first indicates h/w must init its context */
1818         req_ctx->swinit = 0; /* assume h/w init of context */
1819         req_ctx->hw_context_size =
1820                 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1821                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1822                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1823
1824         return 0;
1825 }
1826
1827 /*
1828  * on h/w without explicit sha224 support, we initialize h/w context
1829  * manually with sha224 constants, and tell it to run sha256.
1830  */
1831 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1832 {
1833         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1834
1835         ahash_init(areq);
1836         req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1837
1838         req_ctx->hw_context[0] = SHA224_H0;
1839         req_ctx->hw_context[1] = SHA224_H1;
1840         req_ctx->hw_context[2] = SHA224_H2;
1841         req_ctx->hw_context[3] = SHA224_H3;
1842         req_ctx->hw_context[4] = SHA224_H4;
1843         req_ctx->hw_context[5] = SHA224_H5;
1844         req_ctx->hw_context[6] = SHA224_H6;
1845         req_ctx->hw_context[7] = SHA224_H7;
1846
1847         /* init 64-bit count */
1848         req_ctx->hw_context[8] = 0;
1849         req_ctx->hw_context[9] = 0;
1850
1851         return 0;
1852 }
1853
1854 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1855 {
1856         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1857         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1858         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1859         struct talitos_edesc *edesc;
1860         unsigned int blocksize =
1861                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1862         unsigned int nbytes_to_hash;
1863         unsigned int to_hash_later;
1864         unsigned int nsg;
1865         int nents;
1866
1867         if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1868                 /* Buffer up to one whole block */
1869                 nents = sg_nents_for_len(areq->src, nbytes);
1870                 if (nents < 0) {
1871                         dev_err(ctx->dev, "Invalid number of src SG.\n");
1872                         return nents;
1873                 }
1874                 sg_copy_to_buffer(areq->src, nents,
1875                                   req_ctx->buf + req_ctx->nbuf, nbytes);
1876                 req_ctx->nbuf += nbytes;
1877                 return 0;
1878         }
1879
1880         /* At least (blocksize + 1) bytes are available to hash */
1881         nbytes_to_hash = nbytes + req_ctx->nbuf;
1882         to_hash_later = nbytes_to_hash & (blocksize - 1);
1883
1884         if (req_ctx->last)
1885                 to_hash_later = 0;
1886         else if (to_hash_later)
1887                 /* There is a partial block. Hash the full block(s) now */
1888                 nbytes_to_hash -= to_hash_later;
1889         else {
1890                 /* Keep one block buffered */
1891                 nbytes_to_hash -= blocksize;
1892                 to_hash_later = blocksize;
1893         }
1894
1895         /* Chain in any previously buffered data */
1896         if (req_ctx->nbuf) {
1897                 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1898                 sg_init_table(req_ctx->bufsl, nsg);
1899                 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1900                 if (nsg > 1)
1901                         sg_chain(req_ctx->bufsl, 2, areq->src);
1902                 req_ctx->psrc = req_ctx->bufsl;
1903         } else
1904                 req_ctx->psrc = areq->src;
1905
1906         if (to_hash_later) {
1907                 nents = sg_nents_for_len(areq->src, nbytes);
1908                 if (nents < 0) {
1909                         dev_err(ctx->dev, "Invalid number of src SG.\n");
1910                         return nents;
1911                 }
1912                 sg_pcopy_to_buffer(areq->src, nents,
1913                                       req_ctx->bufnext,
1914                                       to_hash_later,
1915                                       nbytes - to_hash_later);
1916         }
1917         req_ctx->to_hash_later = to_hash_later;
1918
1919         /* Allocate extended descriptor */
1920         edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1921         if (IS_ERR(edesc))
1922                 return PTR_ERR(edesc);
1923
1924         edesc->desc.hdr = ctx->desc_hdr_template;
1925
1926         /* On last one, request SEC to pad; otherwise continue */
1927         if (req_ctx->last)
1928                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1929         else
1930                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1931
1932         /* request SEC to INIT hash. */
1933         if (req_ctx->first && !req_ctx->swinit)
1934                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1935
1936         /* When the tfm context has a keylen, it's an HMAC.
1937          * A first or last (ie. not middle) descriptor must request HMAC.
1938          */
1939         if (ctx->keylen && (req_ctx->first || req_ctx->last))
1940                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1941
1942         return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1943                                     ahash_done);
1944 }
1945
1946 static int ahash_update(struct ahash_request *areq)
1947 {
1948         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1949
1950         req_ctx->last = 0;
1951
1952         return ahash_process_req(areq, areq->nbytes);
1953 }
1954
1955 static int ahash_final(struct ahash_request *areq)
1956 {
1957         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1958
1959         req_ctx->last = 1;
1960
1961         return ahash_process_req(areq, 0);
1962 }
1963
1964 static int ahash_finup(struct ahash_request *areq)
1965 {
1966         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1967
1968         req_ctx->last = 1;
1969
1970         return ahash_process_req(areq, areq->nbytes);
1971 }
1972
1973 static int ahash_digest(struct ahash_request *areq)
1974 {
1975         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1976         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1977
1978         ahash->init(areq);
1979         req_ctx->last = 1;
1980
1981         return ahash_process_req(areq, areq->nbytes);
1982 }
1983
1984 struct keyhash_result {
1985         struct completion completion;
1986         int err;
1987 };
1988
1989 static void keyhash_complete(struct crypto_async_request *req, int err)
1990 {
1991         struct keyhash_result *res = req->data;
1992
1993         if (err == -EINPROGRESS)
1994                 return;
1995
1996         res->err = err;
1997         complete(&res->completion);
1998 }
1999
2000 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2001                    u8 *hash)
2002 {
2003         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2004
2005         struct scatterlist sg[1];
2006         struct ahash_request *req;
2007         struct keyhash_result hresult;
2008         int ret;
2009
2010         init_completion(&hresult.completion);
2011
2012         req = ahash_request_alloc(tfm, GFP_KERNEL);
2013         if (!req)
2014                 return -ENOMEM;
2015
2016         /* Keep tfm keylen == 0 during hash of the long key */
2017         ctx->keylen = 0;
2018         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2019                                    keyhash_complete, &hresult);
2020
2021         sg_init_one(&sg[0], key, keylen);
2022
2023         ahash_request_set_crypt(req, sg, hash, keylen);
2024         ret = crypto_ahash_digest(req);
2025         switch (ret) {
2026         case 0:
2027                 break;
2028         case -EINPROGRESS:
2029         case -EBUSY:
2030                 ret = wait_for_completion_interruptible(
2031                         &hresult.completion);
2032                 if (!ret)
2033                         ret = hresult.err;
2034                 break;
2035         default:
2036                 break;
2037         }
2038         ahash_request_free(req);
2039
2040         return ret;
2041 }
2042
2043 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2044                         unsigned int keylen)
2045 {
2046         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2047         unsigned int blocksize =
2048                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2049         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2050         unsigned int keysize = keylen;
2051         u8 hash[SHA512_DIGEST_SIZE];
2052         int ret;
2053
2054         if (keylen <= blocksize)
2055                 memcpy(ctx->key, key, keysize);
2056         else {
2057                 /* Must get the hash of the long key */
2058                 ret = keyhash(tfm, key, keylen, hash);
2059
2060                 if (ret) {
2061                         crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2062                         return -EINVAL;
2063                 }
2064
2065                 keysize = digestsize;
2066                 memcpy(ctx->key, hash, digestsize);
2067         }
2068
2069         ctx->keylen = keysize;
2070
2071         return 0;
2072 }
2073
2074
2075 struct talitos_alg_template {
2076         u32 type;
2077         union {
2078                 struct crypto_alg crypto;
2079                 struct ahash_alg hash;
2080                 struct aead_alg aead;
2081         } alg;
2082         __be32 desc_hdr_template;
2083 };
2084
2085 static struct talitos_alg_template driver_algs[] = {
2086         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2087         {       .type = CRYPTO_ALG_TYPE_AEAD,
2088                 .alg.aead = {
2089                         .base = {
2090                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2091                                 .cra_driver_name = "authenc-hmac-sha1-"
2092                                                    "cbc-aes-talitos",
2093                                 .cra_blocksize = AES_BLOCK_SIZE,
2094                                 .cra_flags = CRYPTO_ALG_ASYNC,
2095                         },
2096                         .ivsize = AES_BLOCK_SIZE,
2097                         .maxauthsize = SHA1_DIGEST_SIZE,
2098                 },
2099                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2100                                      DESC_HDR_SEL0_AESU |
2101                                      DESC_HDR_MODE0_AESU_CBC |
2102                                      DESC_HDR_SEL1_MDEUA |
2103                                      DESC_HDR_MODE1_MDEU_INIT |
2104                                      DESC_HDR_MODE1_MDEU_PAD |
2105                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2106         },
2107         {       .type = CRYPTO_ALG_TYPE_AEAD,
2108                 .alg.aead = {
2109                         .base = {
2110                                 .cra_name = "authenc(hmac(sha1),"
2111                                             "cbc(des3_ede))",
2112                                 .cra_driver_name = "authenc-hmac-sha1-"
2113                                                    "cbc-3des-talitos",
2114                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2115                                 .cra_flags = CRYPTO_ALG_ASYNC,
2116                         },
2117                         .ivsize = DES3_EDE_BLOCK_SIZE,
2118                         .maxauthsize = SHA1_DIGEST_SIZE,
2119                 },
2120                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2121                                      DESC_HDR_SEL0_DEU |
2122                                      DESC_HDR_MODE0_DEU_CBC |
2123                                      DESC_HDR_MODE0_DEU_3DES |
2124                                      DESC_HDR_SEL1_MDEUA |
2125                                      DESC_HDR_MODE1_MDEU_INIT |
2126                                      DESC_HDR_MODE1_MDEU_PAD |
2127                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2128         },
2129         {       .type = CRYPTO_ALG_TYPE_AEAD,
2130                 .alg.aead = {
2131                         .base = {
2132                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2133                                 .cra_driver_name = "authenc-hmac-sha224-"
2134                                                    "cbc-aes-talitos",
2135                                 .cra_blocksize = AES_BLOCK_SIZE,
2136                                 .cra_flags = CRYPTO_ALG_ASYNC,
2137                         },
2138                         .ivsize = AES_BLOCK_SIZE,
2139                         .maxauthsize = SHA224_DIGEST_SIZE,
2140                 },
2141                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2142                                      DESC_HDR_SEL0_AESU |
2143                                      DESC_HDR_MODE0_AESU_CBC |
2144                                      DESC_HDR_SEL1_MDEUA |
2145                                      DESC_HDR_MODE1_MDEU_INIT |
2146                                      DESC_HDR_MODE1_MDEU_PAD |
2147                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2148         },
2149         {       .type = CRYPTO_ALG_TYPE_AEAD,
2150                 .alg.aead = {
2151                         .base = {
2152                                 .cra_name = "authenc(hmac(sha224),"
2153                                             "cbc(des3_ede))",
2154                                 .cra_driver_name = "authenc-hmac-sha224-"
2155                                                    "cbc-3des-talitos",
2156                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2157                                 .cra_flags = CRYPTO_ALG_ASYNC,
2158                         },
2159                         .ivsize = DES3_EDE_BLOCK_SIZE,
2160                         .maxauthsize = SHA224_DIGEST_SIZE,
2161                 },
2162                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2163                                      DESC_HDR_SEL0_DEU |
2164                                      DESC_HDR_MODE0_DEU_CBC |
2165                                      DESC_HDR_MODE0_DEU_3DES |
2166                                      DESC_HDR_SEL1_MDEUA |
2167                                      DESC_HDR_MODE1_MDEU_INIT |
2168                                      DESC_HDR_MODE1_MDEU_PAD |
2169                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2170         },
2171         {       .type = CRYPTO_ALG_TYPE_AEAD,
2172                 .alg.aead = {
2173                         .base = {
2174                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2175                                 .cra_driver_name = "authenc-hmac-sha256-"
2176                                                    "cbc-aes-talitos",
2177                                 .cra_blocksize = AES_BLOCK_SIZE,
2178                                 .cra_flags = CRYPTO_ALG_ASYNC,
2179                         },
2180                         .ivsize = AES_BLOCK_SIZE,
2181                         .maxauthsize = SHA256_DIGEST_SIZE,
2182                 },
2183                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2184                                      DESC_HDR_SEL0_AESU |
2185                                      DESC_HDR_MODE0_AESU_CBC |
2186                                      DESC_HDR_SEL1_MDEUA |
2187                                      DESC_HDR_MODE1_MDEU_INIT |
2188                                      DESC_HDR_MODE1_MDEU_PAD |
2189                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2190         },
2191         {       .type = CRYPTO_ALG_TYPE_AEAD,
2192                 .alg.aead = {
2193                         .base = {
2194                                 .cra_name = "authenc(hmac(sha256),"
2195                                             "cbc(des3_ede))",
2196                                 .cra_driver_name = "authenc-hmac-sha256-"
2197                                                    "cbc-3des-talitos",
2198                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2199                                 .cra_flags = CRYPTO_ALG_ASYNC,
2200                         },
2201                         .ivsize = DES3_EDE_BLOCK_SIZE,
2202                         .maxauthsize = SHA256_DIGEST_SIZE,
2203                 },
2204                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2205                                      DESC_HDR_SEL0_DEU |
2206                                      DESC_HDR_MODE0_DEU_CBC |
2207                                      DESC_HDR_MODE0_DEU_3DES |
2208                                      DESC_HDR_SEL1_MDEUA |
2209                                      DESC_HDR_MODE1_MDEU_INIT |
2210                                      DESC_HDR_MODE1_MDEU_PAD |
2211                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2212         },
2213         {       .type = CRYPTO_ALG_TYPE_AEAD,
2214                 .alg.aead = {
2215                         .base = {
2216                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2217                                 .cra_driver_name = "authenc-hmac-sha384-"
2218                                                    "cbc-aes-talitos",
2219                                 .cra_blocksize = AES_BLOCK_SIZE,
2220                                 .cra_flags = CRYPTO_ALG_ASYNC,
2221                         },
2222                         .ivsize = AES_BLOCK_SIZE,
2223                         .maxauthsize = SHA384_DIGEST_SIZE,
2224                 },
2225                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2226                                      DESC_HDR_SEL0_AESU |
2227                                      DESC_HDR_MODE0_AESU_CBC |
2228                                      DESC_HDR_SEL1_MDEUB |
2229                                      DESC_HDR_MODE1_MDEU_INIT |
2230                                      DESC_HDR_MODE1_MDEU_PAD |
2231                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2232         },
2233         {       .type = CRYPTO_ALG_TYPE_AEAD,
2234                 .alg.aead = {
2235                         .base = {
2236                                 .cra_name = "authenc(hmac(sha384),"
2237                                             "cbc(des3_ede))",
2238                                 .cra_driver_name = "authenc-hmac-sha384-"
2239                                                    "cbc-3des-talitos",
2240                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2241                                 .cra_flags = CRYPTO_ALG_ASYNC,
2242                         },
2243                         .ivsize = DES3_EDE_BLOCK_SIZE,
2244                         .maxauthsize = SHA384_DIGEST_SIZE,
2245                 },
2246                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2247                                      DESC_HDR_SEL0_DEU |
2248                                      DESC_HDR_MODE0_DEU_CBC |
2249                                      DESC_HDR_MODE0_DEU_3DES |
2250                                      DESC_HDR_SEL1_MDEUB |
2251                                      DESC_HDR_MODE1_MDEU_INIT |
2252                                      DESC_HDR_MODE1_MDEU_PAD |
2253                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2254         },
2255         {       .type = CRYPTO_ALG_TYPE_AEAD,
2256                 .alg.aead = {
2257                         .base = {
2258                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2259                                 .cra_driver_name = "authenc-hmac-sha512-"
2260                                                    "cbc-aes-talitos",
2261                                 .cra_blocksize = AES_BLOCK_SIZE,
2262                                 .cra_flags = CRYPTO_ALG_ASYNC,
2263                         },
2264                         .ivsize = AES_BLOCK_SIZE,
2265                         .maxauthsize = SHA512_DIGEST_SIZE,
2266                 },
2267                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2268                                      DESC_HDR_SEL0_AESU |
2269                                      DESC_HDR_MODE0_AESU_CBC |
2270                                      DESC_HDR_SEL1_MDEUB |
2271                                      DESC_HDR_MODE1_MDEU_INIT |
2272                                      DESC_HDR_MODE1_MDEU_PAD |
2273                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2274         },
2275         {       .type = CRYPTO_ALG_TYPE_AEAD,
2276                 .alg.aead = {
2277                         .base = {
2278                                 .cra_name = "authenc(hmac(sha512),"
2279                                             "cbc(des3_ede))",
2280                                 .cra_driver_name = "authenc-hmac-sha512-"
2281                                                    "cbc-3des-talitos",
2282                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2283                                 .cra_flags = CRYPTO_ALG_ASYNC,
2284                         },
2285                         .ivsize = DES3_EDE_BLOCK_SIZE,
2286                         .maxauthsize = SHA512_DIGEST_SIZE,
2287                 },
2288                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2289                                      DESC_HDR_SEL0_DEU |
2290                                      DESC_HDR_MODE0_DEU_CBC |
2291                                      DESC_HDR_MODE0_DEU_3DES |
2292                                      DESC_HDR_SEL1_MDEUB |
2293                                      DESC_HDR_MODE1_MDEU_INIT |
2294                                      DESC_HDR_MODE1_MDEU_PAD |
2295                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2296         },
2297         {       .type = CRYPTO_ALG_TYPE_AEAD,
2298                 .alg.aead = {
2299                         .base = {
2300                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2301                                 .cra_driver_name = "authenc-hmac-md5-"
2302                                                    "cbc-aes-talitos",
2303                                 .cra_blocksize = AES_BLOCK_SIZE,
2304                                 .cra_flags = CRYPTO_ALG_ASYNC,
2305                         },
2306                         .ivsize = AES_BLOCK_SIZE,
2307                         .maxauthsize = MD5_DIGEST_SIZE,
2308                 },
2309                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2310                                      DESC_HDR_SEL0_AESU |
2311                                      DESC_HDR_MODE0_AESU_CBC |
2312                                      DESC_HDR_SEL1_MDEUA |
2313                                      DESC_HDR_MODE1_MDEU_INIT |
2314                                      DESC_HDR_MODE1_MDEU_PAD |
2315                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2316         },
2317         {       .type = CRYPTO_ALG_TYPE_AEAD,
2318                 .alg.aead = {
2319                         .base = {
2320                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2321                                 .cra_driver_name = "authenc-hmac-md5-"
2322                                                    "cbc-3des-talitos",
2323                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2324                                 .cra_flags = CRYPTO_ALG_ASYNC,
2325                         },
2326                         .ivsize = DES3_EDE_BLOCK_SIZE,
2327                         .maxauthsize = MD5_DIGEST_SIZE,
2328                 },
2329                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2330                                      DESC_HDR_SEL0_DEU |
2331                                      DESC_HDR_MODE0_DEU_CBC |
2332                                      DESC_HDR_MODE0_DEU_3DES |
2333                                      DESC_HDR_SEL1_MDEUA |
2334                                      DESC_HDR_MODE1_MDEU_INIT |
2335                                      DESC_HDR_MODE1_MDEU_PAD |
2336                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2337         },
2338         /* ABLKCIPHER algorithms. */
2339         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2340                 .alg.crypto = {
2341                         .cra_name = "ecb(aes)",
2342                         .cra_driver_name = "ecb-aes-talitos",
2343                         .cra_blocksize = AES_BLOCK_SIZE,
2344                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2345                                      CRYPTO_ALG_ASYNC,
2346                         .cra_ablkcipher = {
2347                                 .min_keysize = AES_MIN_KEY_SIZE,
2348                                 .max_keysize = AES_MAX_KEY_SIZE,
2349                                 .ivsize = AES_BLOCK_SIZE,
2350                         }
2351                 },
2352                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2353                                      DESC_HDR_SEL0_AESU,
2354         },
2355         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2356                 .alg.crypto = {
2357                         .cra_name = "cbc(aes)",
2358                         .cra_driver_name = "cbc-aes-talitos",
2359                         .cra_blocksize = AES_BLOCK_SIZE,
2360                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2361                                      CRYPTO_ALG_ASYNC,
2362                         .cra_ablkcipher = {
2363                                 .min_keysize = AES_MIN_KEY_SIZE,
2364                                 .max_keysize = AES_MAX_KEY_SIZE,
2365                                 .ivsize = AES_BLOCK_SIZE,
2366                         }
2367                 },
2368                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2369                                      DESC_HDR_SEL0_AESU |
2370                                      DESC_HDR_MODE0_AESU_CBC,
2371         },
2372         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2373                 .alg.crypto = {
2374                         .cra_name = "ctr(aes)",
2375                         .cra_driver_name = "ctr-aes-talitos",
2376                         .cra_blocksize = AES_BLOCK_SIZE,
2377                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2378                                      CRYPTO_ALG_ASYNC,
2379                         .cra_ablkcipher = {
2380                                 .min_keysize = AES_MIN_KEY_SIZE,
2381                                 .max_keysize = AES_MAX_KEY_SIZE,
2382                                 .ivsize = AES_BLOCK_SIZE,
2383                         }
2384                 },
2385                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2386                                      DESC_HDR_SEL0_AESU |
2387                                      DESC_HDR_MODE0_AESU_CTR,
2388         },
2389         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2390                 .alg.crypto = {
2391                         .cra_name = "ecb(des)",
2392                         .cra_driver_name = "ecb-des-talitos",
2393                         .cra_blocksize = DES_BLOCK_SIZE,
2394                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2395                                      CRYPTO_ALG_ASYNC,
2396                         .cra_ablkcipher = {
2397                                 .min_keysize = DES_KEY_SIZE,
2398                                 .max_keysize = DES_KEY_SIZE,
2399                                 .ivsize = DES_BLOCK_SIZE,
2400                         }
2401                 },
2402                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2403                                      DESC_HDR_SEL0_DEU,
2404         },
2405         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2406                 .alg.crypto = {
2407                         .cra_name = "cbc(des)",
2408                         .cra_driver_name = "cbc-des-talitos",
2409                         .cra_blocksize = DES_BLOCK_SIZE,
2410                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2411                                      CRYPTO_ALG_ASYNC,
2412                         .cra_ablkcipher = {
2413                                 .min_keysize = DES_KEY_SIZE,
2414                                 .max_keysize = DES_KEY_SIZE,
2415                                 .ivsize = DES_BLOCK_SIZE,
2416                         }
2417                 },
2418                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2419                                      DESC_HDR_SEL0_DEU |
2420                                      DESC_HDR_MODE0_DEU_CBC,
2421         },
2422         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2423                 .alg.crypto = {
2424                         .cra_name = "ecb(des3_ede)",
2425                         .cra_driver_name = "ecb-3des-talitos",
2426                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2427                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2428                                      CRYPTO_ALG_ASYNC,
2429                         .cra_ablkcipher = {
2430                                 .min_keysize = DES3_EDE_KEY_SIZE,
2431                                 .max_keysize = DES3_EDE_KEY_SIZE,
2432                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2433                         }
2434                 },
2435                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2436                                      DESC_HDR_SEL0_DEU |
2437                                      DESC_HDR_MODE0_DEU_3DES,
2438         },
2439         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2440                 .alg.crypto = {
2441                         .cra_name = "cbc(des3_ede)",
2442                         .cra_driver_name = "cbc-3des-talitos",
2443                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2444                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2445                                      CRYPTO_ALG_ASYNC,
2446                         .cra_ablkcipher = {
2447                                 .min_keysize = DES3_EDE_KEY_SIZE,
2448                                 .max_keysize = DES3_EDE_KEY_SIZE,
2449                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2450                         }
2451                 },
2452                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2453                                      DESC_HDR_SEL0_DEU |
2454                                      DESC_HDR_MODE0_DEU_CBC |
2455                                      DESC_HDR_MODE0_DEU_3DES,
2456         },
2457         /* AHASH algorithms. */
2458         {       .type = CRYPTO_ALG_TYPE_AHASH,
2459                 .alg.hash = {
2460                         .halg.digestsize = MD5_DIGEST_SIZE,
2461                         .halg.base = {
2462                                 .cra_name = "md5",
2463                                 .cra_driver_name = "md5-talitos",
2464                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2465                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2466                                              CRYPTO_ALG_ASYNC,
2467                         }
2468                 },
2469                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2470                                      DESC_HDR_SEL0_MDEUA |
2471                                      DESC_HDR_MODE0_MDEU_MD5,
2472         },
2473         {       .type = CRYPTO_ALG_TYPE_AHASH,
2474                 .alg.hash = {
2475                         .halg.digestsize = SHA1_DIGEST_SIZE,
2476                         .halg.base = {
2477                                 .cra_name = "sha1",
2478                                 .cra_driver_name = "sha1-talitos",
2479                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2480                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2481                                              CRYPTO_ALG_ASYNC,
2482                         }
2483                 },
2484                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2485                                      DESC_HDR_SEL0_MDEUA |
2486                                      DESC_HDR_MODE0_MDEU_SHA1,
2487         },
2488         {       .type = CRYPTO_ALG_TYPE_AHASH,
2489                 .alg.hash = {
2490                         .halg.digestsize = SHA224_DIGEST_SIZE,
2491                         .halg.base = {
2492                                 .cra_name = "sha224",
2493                                 .cra_driver_name = "sha224-talitos",
2494                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2495                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2496                                              CRYPTO_ALG_ASYNC,
2497                         }
2498                 },
2499                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2500                                      DESC_HDR_SEL0_MDEUA |
2501                                      DESC_HDR_MODE0_MDEU_SHA224,
2502         },
2503         {       .type = CRYPTO_ALG_TYPE_AHASH,
2504                 .alg.hash = {
2505                         .halg.digestsize = SHA256_DIGEST_SIZE,
2506                         .halg.base = {
2507                                 .cra_name = "sha256",
2508                                 .cra_driver_name = "sha256-talitos",
2509                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2510                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2511                                              CRYPTO_ALG_ASYNC,
2512                         }
2513                 },
2514                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2515                                      DESC_HDR_SEL0_MDEUA |
2516                                      DESC_HDR_MODE0_MDEU_SHA256,
2517         },
2518         {       .type = CRYPTO_ALG_TYPE_AHASH,
2519                 .alg.hash = {
2520                         .halg.digestsize = SHA384_DIGEST_SIZE,
2521                         .halg.base = {
2522                                 .cra_name = "sha384",
2523                                 .cra_driver_name = "sha384-talitos",
2524                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2525                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2526                                              CRYPTO_ALG_ASYNC,
2527                         }
2528                 },
2529                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2530                                      DESC_HDR_SEL0_MDEUB |
2531                                      DESC_HDR_MODE0_MDEUB_SHA384,
2532         },
2533         {       .type = CRYPTO_ALG_TYPE_AHASH,
2534                 .alg.hash = {
2535                         .halg.digestsize = SHA512_DIGEST_SIZE,
2536                         .halg.base = {
2537                                 .cra_name = "sha512",
2538                                 .cra_driver_name = "sha512-talitos",
2539                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2540                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2541                                              CRYPTO_ALG_ASYNC,
2542                         }
2543                 },
2544                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2545                                      DESC_HDR_SEL0_MDEUB |
2546                                      DESC_HDR_MODE0_MDEUB_SHA512,
2547         },
2548         {       .type = CRYPTO_ALG_TYPE_AHASH,
2549                 .alg.hash = {
2550                         .halg.digestsize = MD5_DIGEST_SIZE,
2551                         .halg.base = {
2552                                 .cra_name = "hmac(md5)",
2553                                 .cra_driver_name = "hmac-md5-talitos",
2554                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2555                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2556                                              CRYPTO_ALG_ASYNC,
2557                         }
2558                 },
2559                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2560                                      DESC_HDR_SEL0_MDEUA |
2561                                      DESC_HDR_MODE0_MDEU_MD5,
2562         },
2563         {       .type = CRYPTO_ALG_TYPE_AHASH,
2564                 .alg.hash = {
2565                         .halg.digestsize = SHA1_DIGEST_SIZE,
2566                         .halg.base = {
2567                                 .cra_name = "hmac(sha1)",
2568                                 .cra_driver_name = "hmac-sha1-talitos",
2569                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2570                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2571                                              CRYPTO_ALG_ASYNC,
2572                         }
2573                 },
2574                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2575                                      DESC_HDR_SEL0_MDEUA |
2576                                      DESC_HDR_MODE0_MDEU_SHA1,
2577         },
2578         {       .type = CRYPTO_ALG_TYPE_AHASH,
2579                 .alg.hash = {
2580                         .halg.digestsize = SHA224_DIGEST_SIZE,
2581                         .halg.base = {
2582                                 .cra_name = "hmac(sha224)",
2583                                 .cra_driver_name = "hmac-sha224-talitos",
2584                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2585                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2586                                              CRYPTO_ALG_ASYNC,
2587                         }
2588                 },
2589                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2590                                      DESC_HDR_SEL0_MDEUA |
2591                                      DESC_HDR_MODE0_MDEU_SHA224,
2592         },
2593         {       .type = CRYPTO_ALG_TYPE_AHASH,
2594                 .alg.hash = {
2595                         .halg.digestsize = SHA256_DIGEST_SIZE,
2596                         .halg.base = {
2597                                 .cra_name = "hmac(sha256)",
2598                                 .cra_driver_name = "hmac-sha256-talitos",
2599                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2600                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2601                                              CRYPTO_ALG_ASYNC,
2602                         }
2603                 },
2604                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2605                                      DESC_HDR_SEL0_MDEUA |
2606                                      DESC_HDR_MODE0_MDEU_SHA256,
2607         },
2608         {       .type = CRYPTO_ALG_TYPE_AHASH,
2609                 .alg.hash = {
2610                         .halg.digestsize = SHA384_DIGEST_SIZE,
2611                         .halg.base = {
2612                                 .cra_name = "hmac(sha384)",
2613                                 .cra_driver_name = "hmac-sha384-talitos",
2614                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2615                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2616                                              CRYPTO_ALG_ASYNC,
2617                         }
2618                 },
2619                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2620                                      DESC_HDR_SEL0_MDEUB |
2621                                      DESC_HDR_MODE0_MDEUB_SHA384,
2622         },
2623         {       .type = CRYPTO_ALG_TYPE_AHASH,
2624                 .alg.hash = {
2625                         .halg.digestsize = SHA512_DIGEST_SIZE,
2626                         .halg.base = {
2627                                 .cra_name = "hmac(sha512)",
2628                                 .cra_driver_name = "hmac-sha512-talitos",
2629                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2630                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2631                                              CRYPTO_ALG_ASYNC,
2632                         }
2633                 },
2634                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2635                                      DESC_HDR_SEL0_MDEUB |
2636                                      DESC_HDR_MODE0_MDEUB_SHA512,
2637         }
2638 };
2639
2640 struct talitos_crypto_alg {
2641         struct list_head entry;
2642         struct device *dev;
2643         struct talitos_alg_template algt;
2644 };
2645
2646 static int talitos_init_common(struct talitos_ctx *ctx,
2647                                struct talitos_crypto_alg *talitos_alg)
2648 {
2649         struct talitos_private *priv;
2650
2651         /* update context with ptr to dev */
2652         ctx->dev = talitos_alg->dev;
2653
2654         /* assign SEC channel to tfm in round-robin fashion */
2655         priv = dev_get_drvdata(ctx->dev);
2656         ctx->ch = atomic_inc_return(&priv->last_chan) &
2657                   (priv->num_channels - 1);
2658
2659         /* copy descriptor header template value */
2660         ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2661
2662         /* select done notification */
2663         ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2664
2665         return 0;
2666 }
2667
2668 static int talitos_cra_init(struct crypto_tfm *tfm)
2669 {
2670         struct crypto_alg *alg = tfm->__crt_alg;
2671         struct talitos_crypto_alg *talitos_alg;
2672         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2673
2674         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2675                 talitos_alg = container_of(__crypto_ahash_alg(alg),
2676                                            struct talitos_crypto_alg,
2677                                            algt.alg.hash);
2678         else
2679                 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2680                                            algt.alg.crypto);
2681
2682         return talitos_init_common(ctx, talitos_alg);
2683 }
2684
2685 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2686 {
2687         struct aead_alg *alg = crypto_aead_alg(tfm);
2688         struct talitos_crypto_alg *talitos_alg;
2689         struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2690
2691         talitos_alg = container_of(alg, struct talitos_crypto_alg,
2692                                    algt.alg.aead);
2693
2694         return talitos_init_common(ctx, talitos_alg);
2695 }
2696
2697 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2698 {
2699         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2700
2701         talitos_cra_init(tfm);
2702
2703         ctx->keylen = 0;
2704         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2705                                  sizeof(struct talitos_ahash_req_ctx));
2706
2707         return 0;
2708 }
2709
2710 /*
2711  * given the alg's descriptor header template, determine whether descriptor
2712  * type and primary/secondary execution units required match the hw
2713  * capabilities description provided in the device tree node.
2714  */
2715 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2716 {
2717         struct talitos_private *priv = dev_get_drvdata(dev);
2718         int ret;
2719
2720         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2721               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2722
2723         if (SECONDARY_EU(desc_hdr_template))
2724                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2725                               & priv->exec_units);
2726
2727         return ret;
2728 }
2729
2730 static int talitos_remove(struct platform_device *ofdev)
2731 {
2732         struct device *dev = &ofdev->dev;
2733         struct talitos_private *priv = dev_get_drvdata(dev);
2734         struct talitos_crypto_alg *t_alg, *n;
2735         int i;
2736
2737         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2738                 switch (t_alg->algt.type) {
2739                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2740                         break;
2741                 case CRYPTO_ALG_TYPE_AEAD:
2742                         crypto_unregister_aead(&t_alg->algt.alg.aead);
2743                 case CRYPTO_ALG_TYPE_AHASH:
2744                         crypto_unregister_ahash(&t_alg->algt.alg.hash);
2745                         break;
2746                 }
2747                 list_del(&t_alg->entry);
2748                 kfree(t_alg);
2749         }
2750
2751         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2752                 talitos_unregister_rng(dev);
2753
2754         for (i = 0; priv->chan && i < priv->num_channels; i++)
2755                 kfree(priv->chan[i].fifo);
2756
2757         kfree(priv->chan);
2758
2759         for (i = 0; i < 2; i++)
2760                 if (priv->irq[i]) {
2761                         free_irq(priv->irq[i], dev);
2762                         irq_dispose_mapping(priv->irq[i]);
2763                 }
2764
2765         tasklet_kill(&priv->done_task[0]);
2766         if (priv->irq[1])
2767                 tasklet_kill(&priv->done_task[1]);
2768
2769         iounmap(priv->reg);
2770
2771         kfree(priv);
2772
2773         return 0;
2774 }
2775
2776 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2777                                                     struct talitos_alg_template
2778                                                            *template)
2779 {
2780         struct talitos_private *priv = dev_get_drvdata(dev);
2781         struct talitos_crypto_alg *t_alg;
2782         struct crypto_alg *alg;
2783
2784         t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2785         if (!t_alg)
2786                 return ERR_PTR(-ENOMEM);
2787
2788         t_alg->algt = *template;
2789
2790         switch (t_alg->algt.type) {
2791         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2792                 alg = &t_alg->algt.alg.crypto;
2793                 alg->cra_init = talitos_cra_init;
2794                 alg->cra_type = &crypto_ablkcipher_type;
2795                 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2796                 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2797                 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2798                 alg->cra_ablkcipher.geniv = "eseqiv";
2799                 break;
2800         case CRYPTO_ALG_TYPE_AEAD:
2801                 alg = &t_alg->algt.alg.aead.base;
2802                 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2803                 t_alg->algt.alg.aead.setkey = aead_setkey;
2804                 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2805                 t_alg->algt.alg.aead.decrypt = aead_decrypt;
2806                 break;
2807         case CRYPTO_ALG_TYPE_AHASH:
2808                 alg = &t_alg->algt.alg.hash.halg.base;
2809                 alg->cra_init = talitos_cra_init_ahash;
2810                 alg->cra_type = &crypto_ahash_type;
2811                 t_alg->algt.alg.hash.init = ahash_init;
2812                 t_alg->algt.alg.hash.update = ahash_update;
2813                 t_alg->algt.alg.hash.final = ahash_final;
2814                 t_alg->algt.alg.hash.finup = ahash_finup;
2815                 t_alg->algt.alg.hash.digest = ahash_digest;
2816                 t_alg->algt.alg.hash.setkey = ahash_setkey;
2817
2818                 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2819                     !strncmp(alg->cra_name, "hmac", 4)) {
2820                         kfree(t_alg);
2821                         return ERR_PTR(-ENOTSUPP);
2822                 }
2823                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2824                     (!strcmp(alg->cra_name, "sha224") ||
2825                      !strcmp(alg->cra_name, "hmac(sha224)"))) {
2826                         t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2827                         t_alg->algt.desc_hdr_template =
2828                                         DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2829                                         DESC_HDR_SEL0_MDEUA |
2830                                         DESC_HDR_MODE0_MDEU_SHA256;
2831                 }
2832                 break;
2833         default:
2834                 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2835                 kfree(t_alg);
2836                 return ERR_PTR(-EINVAL);
2837         }
2838
2839         alg->cra_module = THIS_MODULE;
2840         alg->cra_priority = TALITOS_CRA_PRIORITY;
2841         alg->cra_alignmask = 0;
2842         alg->cra_ctxsize = sizeof(struct talitos_ctx);
2843         alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2844
2845         t_alg->dev = dev;
2846
2847         return t_alg;
2848 }
2849
2850 static int talitos_probe_irq(struct platform_device *ofdev)
2851 {
2852         struct device *dev = &ofdev->dev;
2853         struct device_node *np = ofdev->dev.of_node;
2854         struct talitos_private *priv = dev_get_drvdata(dev);
2855         int err;
2856         bool is_sec1 = has_ftr_sec1(priv);
2857
2858         priv->irq[0] = irq_of_parse_and_map(np, 0);
2859         if (!priv->irq[0]) {
2860                 dev_err(dev, "failed to map irq\n");
2861                 return -EINVAL;
2862         }
2863         if (is_sec1) {
2864                 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2865                                   dev_driver_string(dev), dev);
2866                 goto primary_out;
2867         }
2868
2869         priv->irq[1] = irq_of_parse_and_map(np, 1);
2870
2871         /* get the primary irq line */
2872         if (!priv->irq[1]) {
2873                 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2874                                   dev_driver_string(dev), dev);
2875                 goto primary_out;
2876         }
2877
2878         err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2879                           dev_driver_string(dev), dev);
2880         if (err)
2881                 goto primary_out;
2882
2883         /* get the secondary irq line */
2884         err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2885                           dev_driver_string(dev), dev);
2886         if (err) {
2887                 dev_err(dev, "failed to request secondary irq\n");
2888                 irq_dispose_mapping(priv->irq[1]);
2889                 priv->irq[1] = 0;
2890         }
2891
2892         return err;
2893
2894 primary_out:
2895         if (err) {
2896                 dev_err(dev, "failed to request primary irq\n");
2897                 irq_dispose_mapping(priv->irq[0]);
2898                 priv->irq[0] = 0;
2899         }
2900
2901         return err;
2902 }
2903
2904 static int talitos_probe(struct platform_device *ofdev)
2905 {
2906         struct device *dev = &ofdev->dev;
2907         struct device_node *np = ofdev->dev.of_node;
2908         struct talitos_private *priv;
2909         const unsigned int *prop;
2910         int i, err;
2911         int stride;
2912
2913         priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2914         if (!priv)
2915                 return -ENOMEM;
2916
2917         INIT_LIST_HEAD(&priv->alg_list);
2918
2919         dev_set_drvdata(dev, priv);
2920
2921         priv->ofdev = ofdev;
2922
2923         spin_lock_init(&priv->reg_lock);
2924
2925         priv->reg = of_iomap(np, 0);
2926         if (!priv->reg) {
2927                 dev_err(dev, "failed to of_iomap\n");
2928                 err = -ENOMEM;
2929                 goto err_out;
2930         }
2931
2932         /* get SEC version capabilities from device tree */
2933         prop = of_get_property(np, "fsl,num-channels", NULL);
2934         if (prop)
2935                 priv->num_channels = *prop;
2936
2937         prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2938         if (prop)
2939                 priv->chfifo_len = *prop;
2940
2941         prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2942         if (prop)
2943                 priv->exec_units = *prop;
2944
2945         prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2946         if (prop)
2947                 priv->desc_types = *prop;
2948
2949         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2950             !priv->exec_units || !priv->desc_types) {
2951                 dev_err(dev, "invalid property data in device tree node\n");
2952                 err = -EINVAL;
2953                 goto err_out;
2954         }
2955
2956         if (of_device_is_compatible(np, "fsl,sec3.0"))
2957                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2958
2959         if (of_device_is_compatible(np, "fsl,sec2.1"))
2960                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2961                                   TALITOS_FTR_SHA224_HWINIT |
2962                                   TALITOS_FTR_HMAC_OK;
2963
2964         if (of_device_is_compatible(np, "fsl,sec1.0"))
2965                 priv->features |= TALITOS_FTR_SEC1;
2966
2967         if (of_device_is_compatible(np, "fsl,sec1.2")) {
2968                 priv->reg_deu = priv->reg + TALITOS12_DEU;
2969                 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2970                 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2971                 stride = TALITOS1_CH_STRIDE;
2972         } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2973                 priv->reg_deu = priv->reg + TALITOS10_DEU;
2974                 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2975                 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2976                 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2977                 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2978                 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2979                 stride = TALITOS1_CH_STRIDE;
2980         } else {
2981                 priv->reg_deu = priv->reg + TALITOS2_DEU;
2982                 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2983                 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2984                 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2985                 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2986                 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2987                 priv->reg_keu = priv->reg + TALITOS2_KEU;
2988                 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2989                 stride = TALITOS2_CH_STRIDE;
2990         }
2991
2992         err = talitos_probe_irq(ofdev);
2993         if (err)
2994                 goto err_out;
2995
2996         if (of_device_is_compatible(np, "fsl,sec1.0")) {
2997                 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2998                              (unsigned long)dev);
2999         } else {
3000                 if (!priv->irq[1]) {
3001                         tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3002                                      (unsigned long)dev);
3003                 } else {
3004                         tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3005                                      (unsigned long)dev);
3006                         tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3007                                      (unsigned long)dev);
3008                 }
3009         }
3010
3011         priv->chan = kzalloc(sizeof(struct talitos_channel) *
3012                              priv->num_channels, GFP_KERNEL);
3013         if (!priv->chan) {
3014                 dev_err(dev, "failed to allocate channel management space\n");
3015                 err = -ENOMEM;
3016                 goto err_out;
3017         }
3018
3019         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3020
3021         for (i = 0; i < priv->num_channels; i++) {
3022                 priv->chan[i].reg = priv->reg + stride * (i + 1);
3023                 if (!priv->irq[1] || !(i & 1))
3024                         priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3025
3026                 spin_lock_init(&priv->chan[i].head_lock);
3027                 spin_lock_init(&priv->chan[i].tail_lock);
3028
3029                 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3030                                              priv->fifo_len, GFP_KERNEL);
3031                 if (!priv->chan[i].fifo) {
3032                         dev_err(dev, "failed to allocate request fifo %d\n", i);
3033                         err = -ENOMEM;
3034                         goto err_out;
3035                 }
3036
3037                 atomic_set(&priv->chan[i].submit_count,
3038                            -(priv->chfifo_len - 1));
3039         }
3040
3041         dma_set_mask(dev, DMA_BIT_MASK(36));
3042
3043         /* reset and initialize the h/w */
3044         err = init_device(dev);
3045         if (err) {
3046                 dev_err(dev, "failed to initialize device\n");
3047                 goto err_out;
3048         }
3049
3050         /* register the RNG, if available */
3051         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3052                 err = talitos_register_rng(dev);
3053                 if (err) {
3054                         dev_err(dev, "failed to register hwrng: %d\n", err);
3055                         goto err_out;
3056                 } else
3057                         dev_info(dev, "hwrng\n");
3058         }
3059
3060         /* register crypto algorithms the device supports */
3061         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3062                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3063                         struct talitos_crypto_alg *t_alg;
3064                         struct crypto_alg *alg = NULL;
3065
3066                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3067                         if (IS_ERR(t_alg)) {
3068                                 err = PTR_ERR(t_alg);
3069                                 if (err == -ENOTSUPP)
3070                                         continue;
3071                                 goto err_out;
3072                         }
3073
3074                         switch (t_alg->algt.type) {
3075                         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3076                                 err = crypto_register_alg(
3077                                                 &t_alg->algt.alg.crypto);
3078                                 alg = &t_alg->algt.alg.crypto;
3079                                 break;
3080
3081                         case CRYPTO_ALG_TYPE_AEAD:
3082                                 err = crypto_register_aead(
3083                                         &t_alg->algt.alg.aead);
3084                                 alg = &t_alg->algt.alg.aead.base;
3085                                 break;
3086
3087                         case CRYPTO_ALG_TYPE_AHASH:
3088                                 err = crypto_register_ahash(
3089                                                 &t_alg->algt.alg.hash);
3090                                 alg = &t_alg->algt.alg.hash.halg.base;
3091                                 break;
3092                         }
3093                         if (err) {
3094                                 dev_err(dev, "%s alg registration failed\n",
3095                                         alg->cra_driver_name);
3096                                 kfree(t_alg);
3097                         } else
3098                                 list_add_tail(&t_alg->entry, &priv->alg_list);
3099                 }
3100         }
3101         if (!list_empty(&priv->alg_list))
3102                 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3103                          (char *)of_get_property(np, "compatible", NULL));
3104
3105         return 0;
3106
3107 err_out:
3108         talitos_remove(ofdev);
3109
3110         return err;
3111 }
3112
3113 static const struct of_device_id talitos_match[] = {
3114 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3115         {
3116                 .compatible = "fsl,sec1.0",
3117         },
3118 #endif
3119 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3120         {
3121                 .compatible = "fsl,sec2.0",
3122         },
3123 #endif
3124         {},
3125 };
3126 MODULE_DEVICE_TABLE(of, talitos_match);
3127
3128 static struct platform_driver talitos_driver = {
3129         .driver = {
3130                 .name = "talitos",
3131                 .of_match_table = talitos_match,
3132         },
3133         .probe = talitos_probe,
3134         .remove = talitos_remove,
3135 };
3136
3137 module_platform_driver(talitos_driver);
3138
3139 MODULE_LICENSE("GPL");
3140 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3141 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");