]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/crypto/omap-aes.c
crypto: omap-aes - Sync SG before DMA operation
[karo-tx-linux.git] / drivers / crypto / omap-aes.c
1 /*
2  * Cryptographic API.
3  *
4  * Support for OMAP AES HW acceleration.
5  *
6  * Copyright (c) 2010 Nokia Corporation
7  * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8  * Copyright (c) 2011 Texas Instruments Incorporated
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as published
12  * by the Free Software Foundation.
13  *
14  */
15
16 #define pr_fmt(fmt) "%20s: " fmt, __func__
17 #define prn(num) pr_debug(#num "=%d\n", num)
18 #define prx(num) pr_debug(#num "=%x\n", num)
19
20 #include <linux/err.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/kernel.h>
25 #include <linux/platform_device.h>
26 #include <linux/scatterlist.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/omap-dma.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/of.h>
32 #include <linux/of_device.h>
33 #include <linux/of_address.h>
34 #include <linux/io.h>
35 #include <linux/crypto.h>
36 #include <linux/interrupt.h>
37 #include <crypto/scatterwalk.h>
38 #include <crypto/aes.h>
39
40 #define DST_MAXBURST                    4
41 #define DMA_MIN                         (DST_MAXBURST * sizeof(u32))
42
43 /* OMAP TRM gives bitfields as start:end, where start is the higher bit
44    number. For example 7:0 */
45 #define FLD_MASK(start, end)    (((1 << ((start) - (end) + 1)) - 1) << (end))
46 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
47
48 #define AES_REG_KEY(dd, x)              ((dd)->pdata->key_ofs - \
49                                                 ((x ^ 0x01) * 0x04))
50 #define AES_REG_IV(dd, x)               ((dd)->pdata->iv_ofs + ((x) * 0x04))
51
52 #define AES_REG_CTRL(dd)                ((dd)->pdata->ctrl_ofs)
53 #define AES_REG_CTRL_CTR_WIDTH_MASK     (3 << 7)
54 #define AES_REG_CTRL_CTR_WIDTH_32               (0 << 7)
55 #define AES_REG_CTRL_CTR_WIDTH_64               (1 << 7)
56 #define AES_REG_CTRL_CTR_WIDTH_96               (2 << 7)
57 #define AES_REG_CTRL_CTR_WIDTH_128              (3 << 7)
58 #define AES_REG_CTRL_CTR                (1 << 6)
59 #define AES_REG_CTRL_CBC                (1 << 5)
60 #define AES_REG_CTRL_KEY_SIZE           (3 << 3)
61 #define AES_REG_CTRL_DIRECTION          (1 << 2)
62 #define AES_REG_CTRL_INPUT_READY        (1 << 1)
63 #define AES_REG_CTRL_OUTPUT_READY       (1 << 0)
64
65 #define AES_REG_DATA_N(dd, x)           ((dd)->pdata->data_ofs + ((x) * 0x04))
66
67 #define AES_REG_REV(dd)                 ((dd)->pdata->rev_ofs)
68
69 #define AES_REG_MASK(dd)                ((dd)->pdata->mask_ofs)
70 #define AES_REG_MASK_SIDLE              (1 << 6)
71 #define AES_REG_MASK_START              (1 << 5)
72 #define AES_REG_MASK_DMA_OUT_EN         (1 << 3)
73 #define AES_REG_MASK_DMA_IN_EN          (1 << 2)
74 #define AES_REG_MASK_SOFTRESET          (1 << 1)
75 #define AES_REG_AUTOIDLE                (1 << 0)
76
77 #define AES_REG_LENGTH_N(x)             (0x54 + ((x) * 0x04))
78
79 #define DEFAULT_TIMEOUT         (5*HZ)
80
81 #define FLAGS_MODE_MASK         0x000f
82 #define FLAGS_ENCRYPT           BIT(0)
83 #define FLAGS_CBC               BIT(1)
84 #define FLAGS_GIV               BIT(2)
85 #define FLAGS_CTR               BIT(3)
86
87 #define FLAGS_INIT              BIT(4)
88 #define FLAGS_FAST              BIT(5)
89 #define FLAGS_BUSY              BIT(6)
90
91 struct omap_aes_ctx {
92         struct omap_aes_dev *dd;
93
94         int             keylen;
95         u32             key[AES_KEYSIZE_256 / sizeof(u32)];
96         unsigned long   flags;
97 };
98
99 struct omap_aes_reqctx {
100         unsigned long mode;
101 };
102
103 #define OMAP_AES_QUEUE_LENGTH   1
104 #define OMAP_AES_CACHE_SIZE     0
105
106 struct omap_aes_algs_info {
107         struct crypto_alg       *algs_list;
108         unsigned int            size;
109         unsigned int            registered;
110 };
111
112 struct omap_aes_pdata {
113         struct omap_aes_algs_info       *algs_info;
114         unsigned int    algs_info_size;
115
116         void            (*trigger)(struct omap_aes_dev *dd, int length);
117
118         u32             key_ofs;
119         u32             iv_ofs;
120         u32             ctrl_ofs;
121         u32             data_ofs;
122         u32             rev_ofs;
123         u32             mask_ofs;
124
125         u32             dma_enable_in;
126         u32             dma_enable_out;
127         u32             dma_start;
128
129         u32             major_mask;
130         u32             major_shift;
131         u32             minor_mask;
132         u32             minor_shift;
133 };
134
135 struct omap_aes_dev {
136         struct list_head        list;
137         unsigned long           phys_base;
138         void __iomem            *io_base;
139         struct omap_aes_ctx     *ctx;
140         struct device           *dev;
141         unsigned long           flags;
142         int                     err;
143
144         spinlock_t              lock;
145         struct crypto_queue     queue;
146
147         struct tasklet_struct   done_task;
148         struct tasklet_struct   queue_task;
149
150         struct ablkcipher_request       *req;
151         size_t                          total;
152         struct scatterlist              *in_sg;
153         struct scatterlist              in_sgl;
154         size_t                          in_offset;
155         struct scatterlist              *out_sg;
156         struct scatterlist              out_sgl;
157         size_t                          out_offset;
158
159         size_t                  buflen;
160         void                    *buf_in;
161         size_t                  dma_size;
162         int                     dma_in;
163         struct dma_chan         *dma_lch_in;
164         dma_addr_t              dma_addr_in;
165         void                    *buf_out;
166         int                     dma_out;
167         struct dma_chan         *dma_lch_out;
168         int                     in_sg_len;
169         int                     out_sg_len;
170         dma_addr_t              dma_addr_out;
171
172         const struct omap_aes_pdata     *pdata;
173 };
174
175 /* keep registered devices data here */
176 static LIST_HEAD(dev_list);
177 static DEFINE_SPINLOCK(list_lock);
178
179 #ifdef DEBUG
180 #define omap_aes_read(dd, offset)                               \
181 ({                                                              \
182         int _read_ret;                                          \
183         _read_ret = __raw_readl(dd->io_base + offset);          \
184         pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n",       \
185                  offset, _read_ret);                            \
186         _read_ret;                                              \
187 })
188 #else
189 static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
190 {
191         return __raw_readl(dd->io_base + offset);
192 }
193 #endif
194
195 #ifdef DEBUG
196 #define omap_aes_write(dd, offset, value)                               \
197         do {                                                            \
198                 pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
199                          offset, value);                                \
200                 __raw_writel(value, dd->io_base + offset);              \
201         } while (0)
202 #else
203 static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
204                                   u32 value)
205 {
206         __raw_writel(value, dd->io_base + offset);
207 }
208 #endif
209
210 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
211                                         u32 value, u32 mask)
212 {
213         u32 val;
214
215         val = omap_aes_read(dd, offset);
216         val &= ~mask;
217         val |= value;
218         omap_aes_write(dd, offset, val);
219 }
220
221 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
222                                         u32 *value, int count)
223 {
224         for (; count--; value++, offset += 4)
225                 omap_aes_write(dd, offset, *value);
226 }
227
228 static int omap_aes_hw_init(struct omap_aes_dev *dd)
229 {
230         if (!(dd->flags & FLAGS_INIT)) {
231                 dd->flags |= FLAGS_INIT;
232                 dd->err = 0;
233         }
234
235         return 0;
236 }
237
238 static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
239 {
240         unsigned int key32;
241         int i, err;
242         u32 val, mask = 0;
243
244         err = omap_aes_hw_init(dd);
245         if (err)
246                 return err;
247
248         key32 = dd->ctx->keylen / sizeof(u32);
249
250         /* it seems a key should always be set even if it has not changed */
251         for (i = 0; i < key32; i++) {
252                 omap_aes_write(dd, AES_REG_KEY(dd, i),
253                         __le32_to_cpu(dd->ctx->key[i]));
254         }
255
256         if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
257                 omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
258
259         val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
260         if (dd->flags & FLAGS_CBC)
261                 val |= AES_REG_CTRL_CBC;
262         if (dd->flags & FLAGS_CTR) {
263                 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32;
264                 mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
265         }
266         if (dd->flags & FLAGS_ENCRYPT)
267                 val |= AES_REG_CTRL_DIRECTION;
268
269         mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
270                         AES_REG_CTRL_KEY_SIZE;
271
272         omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
273
274         return 0;
275 }
276
277 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
278 {
279         u32 mask, val;
280
281         val = dd->pdata->dma_start;
282
283         if (dd->dma_lch_out != NULL)
284                 val |= dd->pdata->dma_enable_out;
285         if (dd->dma_lch_in != NULL)
286                 val |= dd->pdata->dma_enable_in;
287
288         mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
289                dd->pdata->dma_start;
290
291         omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
292
293 }
294
295 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
296 {
297         omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
298         omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
299
300         omap_aes_dma_trigger_omap2(dd, length);
301 }
302
303 static void omap_aes_dma_stop(struct omap_aes_dev *dd)
304 {
305         u32 mask;
306
307         mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
308                dd->pdata->dma_start;
309
310         omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
311 }
312
313 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
314 {
315         struct omap_aes_dev *dd = NULL, *tmp;
316
317         spin_lock_bh(&list_lock);
318         if (!ctx->dd) {
319                 list_for_each_entry(tmp, &dev_list, list) {
320                         /* FIXME: take fist available aes core */
321                         dd = tmp;
322                         break;
323                 }
324                 ctx->dd = dd;
325         } else {
326                 /* already found before */
327                 dd = ctx->dd;
328         }
329         spin_unlock_bh(&list_lock);
330
331         return dd;
332 }
333
334 static void omap_aes_dma_out_callback(void *data)
335 {
336         struct omap_aes_dev *dd = data;
337
338         /* dma_lch_out - completed */
339         tasklet_schedule(&dd->done_task);
340 }
341
342 static int omap_aes_dma_init(struct omap_aes_dev *dd)
343 {
344         int err = -ENOMEM;
345         dma_cap_mask_t mask;
346
347         dd->dma_lch_out = NULL;
348         dd->dma_lch_in = NULL;
349
350         dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
351         dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
352         dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
353         dd->buflen &= ~(AES_BLOCK_SIZE - 1);
354
355         if (!dd->buf_in || !dd->buf_out) {
356                 dev_err(dd->dev, "unable to alloc pages.\n");
357                 goto err_alloc;
358         }
359
360         /* MAP here */
361         dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
362                                          DMA_TO_DEVICE);
363         if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
364                 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
365                 err = -EINVAL;
366                 goto err_map_in;
367         }
368
369         dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
370                                           DMA_FROM_DEVICE);
371         if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
372                 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
373                 err = -EINVAL;
374                 goto err_map_out;
375         }
376
377         dma_cap_zero(mask);
378         dma_cap_set(DMA_SLAVE, mask);
379
380         dd->dma_lch_in = dma_request_slave_channel_compat(mask,
381                                                           omap_dma_filter_fn,
382                                                           &dd->dma_in,
383                                                           dd->dev, "rx");
384         if (!dd->dma_lch_in) {
385                 dev_err(dd->dev, "Unable to request in DMA channel\n");
386                 goto err_dma_in;
387         }
388
389         dd->dma_lch_out = dma_request_slave_channel_compat(mask,
390                                                            omap_dma_filter_fn,
391                                                            &dd->dma_out,
392                                                            dd->dev, "tx");
393         if (!dd->dma_lch_out) {
394                 dev_err(dd->dev, "Unable to request out DMA channel\n");
395                 goto err_dma_out;
396         }
397
398         return 0;
399
400 err_dma_out:
401         dma_release_channel(dd->dma_lch_in);
402 err_dma_in:
403         dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
404                          DMA_FROM_DEVICE);
405 err_map_out:
406         dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
407 err_map_in:
408         free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
409         free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
410 err_alloc:
411         if (err)
412                 pr_err("error: %d\n", err);
413         return err;
414 }
415
416 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
417 {
418         dma_release_channel(dd->dma_lch_out);
419         dma_release_channel(dd->dma_lch_in);
420         dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
421                          DMA_FROM_DEVICE);
422         dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
423         free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
424         free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
425 }
426
427 static void sg_copy_buf(void *buf, struct scatterlist *sg,
428                               unsigned int start, unsigned int nbytes, int out)
429 {
430         struct scatter_walk walk;
431
432         if (!nbytes)
433                 return;
434
435         scatterwalk_start(&walk, sg);
436         scatterwalk_advance(&walk, start);
437         scatterwalk_copychunks(buf, &walk, nbytes, out);
438         scatterwalk_done(&walk, out, 0);
439 }
440
441 static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
442                    size_t buflen, size_t total, int out)
443 {
444         unsigned int count, off = 0;
445
446         while (buflen && total) {
447                 count = min((*sg)->length - *offset, total);
448                 count = min(count, buflen);
449
450                 if (!count)
451                         return off;
452
453                 /*
454                  * buflen and total are AES_BLOCK_SIZE size aligned,
455                  * so count should be also aligned
456                  */
457
458                 sg_copy_buf(buf + off, *sg, *offset, count, out);
459
460                 off += count;
461                 buflen -= count;
462                 *offset += count;
463                 total -= count;
464
465                 if (*offset == (*sg)->length) {
466                         *sg = sg_next(*sg);
467                         if (*sg)
468                                 *offset = 0;
469                         else
470                                 total = 0;
471                 }
472         }
473
474         return off;
475 }
476
477 static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
478                 struct scatterlist *in_sg, struct scatterlist *out_sg,
479                 int in_sg_len, int out_sg_len)
480 {
481         struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
482         struct omap_aes_dev *dd = ctx->dd;
483         struct dma_async_tx_descriptor *tx_in, *tx_out;
484         struct dma_slave_config cfg;
485         int ret;
486
487         dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
488
489         memset(&cfg, 0, sizeof(cfg));
490
491         cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
492         cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
493         cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
494         cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
495         cfg.src_maxburst = DST_MAXBURST;
496         cfg.dst_maxburst = DST_MAXBURST;
497
498         /* IN */
499         ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
500         if (ret) {
501                 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
502                         ret);
503                 return ret;
504         }
505
506         tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
507                                         DMA_MEM_TO_DEV,
508                                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
509         if (!tx_in) {
510                 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
511                 return -EINVAL;
512         }
513
514         /* No callback necessary */
515         tx_in->callback_param = dd;
516
517         /* OUT */
518         ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
519         if (ret) {
520                 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
521                         ret);
522                 return ret;
523         }
524
525         tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
526                                         DMA_DEV_TO_MEM,
527                                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
528         if (!tx_out) {
529                 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
530                 return -EINVAL;
531         }
532
533         tx_out->callback = omap_aes_dma_out_callback;
534         tx_out->callback_param = dd;
535
536         dmaengine_submit(tx_in);
537         dmaengine_submit(tx_out);
538
539         dma_async_issue_pending(dd->dma_lch_in);
540         dma_async_issue_pending(dd->dma_lch_out);
541
542         /* start DMA */
543         dd->pdata->trigger(dd, dd->total);
544
545         return 0;
546 }
547
548 static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
549 {
550         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
551                                         crypto_ablkcipher_reqtfm(dd->req));
552         int err;
553
554         pr_debug("total: %d\n", dd->total);
555
556         err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
557         if (!err) {
558                 dev_err(dd->dev, "dma_map_sg() error\n");
559                 return -EINVAL;
560         }
561
562         err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
563         if (!err) {
564                 dev_err(dd->dev, "dma_map_sg() error\n");
565                 return -EINVAL;
566         }
567
568         err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
569                                  dd->out_sg_len);
570         if (err) {
571                 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
572                 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
573                              DMA_FROM_DEVICE);
574         }
575
576         return err;
577 }
578
579 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
580 {
581         struct ablkcipher_request *req = dd->req;
582
583         pr_debug("err: %d\n", err);
584
585         dd->flags &= ~FLAGS_BUSY;
586
587         req->base.complete(&req->base, err);
588 }
589
590 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
591 {
592         int err = 0;
593
594         pr_debug("total: %d\n", dd->total);
595
596         omap_aes_dma_stop(dd);
597
598         dmaengine_terminate_all(dd->dma_lch_in);
599         dmaengine_terminate_all(dd->dma_lch_out);
600
601         dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
602         dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
603
604         return err;
605 }
606
607 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
608                                struct ablkcipher_request *req)
609 {
610         struct crypto_async_request *async_req, *backlog;
611         struct omap_aes_ctx *ctx;
612         struct omap_aes_reqctx *rctx;
613         unsigned long flags;
614         int err, ret = 0;
615
616         spin_lock_irqsave(&dd->lock, flags);
617         if (req)
618                 ret = ablkcipher_enqueue_request(&dd->queue, req);
619         if (dd->flags & FLAGS_BUSY) {
620                 spin_unlock_irqrestore(&dd->lock, flags);
621                 return ret;
622         }
623         backlog = crypto_get_backlog(&dd->queue);
624         async_req = crypto_dequeue_request(&dd->queue);
625         if (async_req)
626                 dd->flags |= FLAGS_BUSY;
627         spin_unlock_irqrestore(&dd->lock, flags);
628
629         if (!async_req)
630                 return ret;
631
632         if (backlog)
633                 backlog->complete(backlog, -EINPROGRESS);
634
635         req = ablkcipher_request_cast(async_req);
636
637         /* assign new request to device */
638         dd->req = req;
639         dd->total = req->nbytes;
640         dd->in_offset = 0;
641         dd->in_sg = req->src;
642         dd->out_offset = 0;
643         dd->out_sg = req->dst;
644
645         dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
646         dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
647         BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
648
649         rctx = ablkcipher_request_ctx(req);
650         ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
651         rctx->mode &= FLAGS_MODE_MASK;
652         dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
653
654         dd->ctx = ctx;
655         ctx->dd = dd;
656
657         err = omap_aes_write_ctrl(dd);
658         if (!err)
659                 err = omap_aes_crypt_dma_start(dd);
660         if (err) {
661                 /* aes_task will not finish it, so do it here */
662                 omap_aes_finish_req(dd, err);
663                 tasklet_schedule(&dd->queue_task);
664         }
665
666         return ret; /* return ret, which is enqueue return value */
667 }
668
669 static void omap_aes_done_task(unsigned long data)
670 {
671         struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
672
673         pr_debug("enter done_task\n");
674
675         dma_sync_sg_for_cpu(dd->dev, dd->in_sg, dd->in_sg_len, DMA_FROM_DEVICE);
676
677         omap_aes_crypt_dma_stop(dd);
678         omap_aes_finish_req(dd, 0);
679         omap_aes_handle_queue(dd, NULL);
680
681         pr_debug("exit\n");
682 }
683
684 static void omap_aes_queue_task(unsigned long data)
685 {
686         struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
687
688         omap_aes_handle_queue(dd, NULL);
689 }
690
691 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
692 {
693         struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
694                         crypto_ablkcipher_reqtfm(req));
695         struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
696         struct omap_aes_dev *dd;
697
698         pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
699                   !!(mode & FLAGS_ENCRYPT),
700                   !!(mode & FLAGS_CBC));
701
702         if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
703                 pr_err("request size is not exact amount of AES blocks\n");
704                 return -EINVAL;
705         }
706
707         dd = omap_aes_find_dev(ctx);
708         if (!dd)
709                 return -ENODEV;
710
711         rctx->mode = mode;
712
713         return omap_aes_handle_queue(dd, req);
714 }
715
716 /* ********************** ALG API ************************************ */
717
718 static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
719                            unsigned int keylen)
720 {
721         struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
722
723         if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
724                    keylen != AES_KEYSIZE_256)
725                 return -EINVAL;
726
727         pr_debug("enter, keylen: %d\n", keylen);
728
729         memcpy(ctx->key, key, keylen);
730         ctx->keylen = keylen;
731
732         return 0;
733 }
734
735 static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
736 {
737         return omap_aes_crypt(req, FLAGS_ENCRYPT);
738 }
739
740 static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
741 {
742         return omap_aes_crypt(req, 0);
743 }
744
745 static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
746 {
747         return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
748 }
749
750 static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
751 {
752         return omap_aes_crypt(req, FLAGS_CBC);
753 }
754
755 static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
756 {
757         return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
758 }
759
760 static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
761 {
762         return omap_aes_crypt(req, FLAGS_CTR);
763 }
764
765 static int omap_aes_cra_init(struct crypto_tfm *tfm)
766 {
767         struct omap_aes_dev *dd = NULL;
768
769         /* Find AES device, currently picks the first device */
770         spin_lock_bh(&list_lock);
771         list_for_each_entry(dd, &dev_list, list) {
772                 break;
773         }
774         spin_unlock_bh(&list_lock);
775
776         pm_runtime_get_sync(dd->dev);
777         tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
778
779         return 0;
780 }
781
782 static void omap_aes_cra_exit(struct crypto_tfm *tfm)
783 {
784         struct omap_aes_dev *dd = NULL;
785
786         /* Find AES device, currently picks the first device */
787         spin_lock_bh(&list_lock);
788         list_for_each_entry(dd, &dev_list, list) {
789                 break;
790         }
791         spin_unlock_bh(&list_lock);
792
793         pm_runtime_put_sync(dd->dev);
794 }
795
796 /* ********************** ALGS ************************************ */
797
798 static struct crypto_alg algs_ecb_cbc[] = {
799 {
800         .cra_name               = "ecb(aes)",
801         .cra_driver_name        = "ecb-aes-omap",
802         .cra_priority           = 100,
803         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
804                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
805                                   CRYPTO_ALG_ASYNC,
806         .cra_blocksize          = AES_BLOCK_SIZE,
807         .cra_ctxsize            = sizeof(struct omap_aes_ctx),
808         .cra_alignmask          = 0,
809         .cra_type               = &crypto_ablkcipher_type,
810         .cra_module             = THIS_MODULE,
811         .cra_init               = omap_aes_cra_init,
812         .cra_exit               = omap_aes_cra_exit,
813         .cra_u.ablkcipher = {
814                 .min_keysize    = AES_MIN_KEY_SIZE,
815                 .max_keysize    = AES_MAX_KEY_SIZE,
816                 .setkey         = omap_aes_setkey,
817                 .encrypt        = omap_aes_ecb_encrypt,
818                 .decrypt        = omap_aes_ecb_decrypt,
819         }
820 },
821 {
822         .cra_name               = "cbc(aes)",
823         .cra_driver_name        = "cbc-aes-omap",
824         .cra_priority           = 100,
825         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
826                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
827                                   CRYPTO_ALG_ASYNC,
828         .cra_blocksize          = AES_BLOCK_SIZE,
829         .cra_ctxsize            = sizeof(struct omap_aes_ctx),
830         .cra_alignmask          = 0,
831         .cra_type               = &crypto_ablkcipher_type,
832         .cra_module             = THIS_MODULE,
833         .cra_init               = omap_aes_cra_init,
834         .cra_exit               = omap_aes_cra_exit,
835         .cra_u.ablkcipher = {
836                 .min_keysize    = AES_MIN_KEY_SIZE,
837                 .max_keysize    = AES_MAX_KEY_SIZE,
838                 .ivsize         = AES_BLOCK_SIZE,
839                 .setkey         = omap_aes_setkey,
840                 .encrypt        = omap_aes_cbc_encrypt,
841                 .decrypt        = omap_aes_cbc_decrypt,
842         }
843 }
844 };
845
846 static struct crypto_alg algs_ctr[] = {
847 {
848         .cra_name               = "ctr(aes)",
849         .cra_driver_name        = "ctr-aes-omap",
850         .cra_priority           = 100,
851         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
852                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
853                                   CRYPTO_ALG_ASYNC,
854         .cra_blocksize          = AES_BLOCK_SIZE,
855         .cra_ctxsize            = sizeof(struct omap_aes_ctx),
856         .cra_alignmask          = 0,
857         .cra_type               = &crypto_ablkcipher_type,
858         .cra_module             = THIS_MODULE,
859         .cra_init               = omap_aes_cra_init,
860         .cra_exit               = omap_aes_cra_exit,
861         .cra_u.ablkcipher = {
862                 .min_keysize    = AES_MIN_KEY_SIZE,
863                 .max_keysize    = AES_MAX_KEY_SIZE,
864                 .geniv          = "eseqiv",
865                 .ivsize         = AES_BLOCK_SIZE,
866                 .setkey         = omap_aes_setkey,
867                 .encrypt        = omap_aes_ctr_encrypt,
868                 .decrypt        = omap_aes_ctr_decrypt,
869         }
870 } ,
871 };
872
873 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
874         {
875                 .algs_list      = algs_ecb_cbc,
876                 .size           = ARRAY_SIZE(algs_ecb_cbc),
877         },
878 };
879
880 static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
881         .algs_info      = omap_aes_algs_info_ecb_cbc,
882         .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
883         .trigger        = omap_aes_dma_trigger_omap2,
884         .key_ofs        = 0x1c,
885         .iv_ofs         = 0x20,
886         .ctrl_ofs       = 0x30,
887         .data_ofs       = 0x34,
888         .rev_ofs        = 0x44,
889         .mask_ofs       = 0x48,
890         .dma_enable_in  = BIT(2),
891         .dma_enable_out = BIT(3),
892         .dma_start      = BIT(5),
893         .major_mask     = 0xf0,
894         .major_shift    = 4,
895         .minor_mask     = 0x0f,
896         .minor_shift    = 0,
897 };
898
899 #ifdef CONFIG_OF
900 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
901         {
902                 .algs_list      = algs_ecb_cbc,
903                 .size           = ARRAY_SIZE(algs_ecb_cbc),
904         },
905         {
906                 .algs_list      = algs_ctr,
907                 .size           = ARRAY_SIZE(algs_ctr),
908         },
909 };
910
911 static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
912         .algs_info      = omap_aes_algs_info_ecb_cbc_ctr,
913         .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
914         .trigger        = omap_aes_dma_trigger_omap2,
915         .key_ofs        = 0x1c,
916         .iv_ofs         = 0x20,
917         .ctrl_ofs       = 0x30,
918         .data_ofs       = 0x34,
919         .rev_ofs        = 0x44,
920         .mask_ofs       = 0x48,
921         .dma_enable_in  = BIT(2),
922         .dma_enable_out = BIT(3),
923         .dma_start      = BIT(5),
924         .major_mask     = 0xf0,
925         .major_shift    = 4,
926         .minor_mask     = 0x0f,
927         .minor_shift    = 0,
928 };
929
930 static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
931         .algs_info      = omap_aes_algs_info_ecb_cbc_ctr,
932         .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
933         .trigger        = omap_aes_dma_trigger_omap4,
934         .key_ofs        = 0x3c,
935         .iv_ofs         = 0x40,
936         .ctrl_ofs       = 0x50,
937         .data_ofs       = 0x60,
938         .rev_ofs        = 0x80,
939         .mask_ofs       = 0x84,
940         .dma_enable_in  = BIT(5),
941         .dma_enable_out = BIT(6),
942         .major_mask     = 0x0700,
943         .major_shift    = 8,
944         .minor_mask     = 0x003f,
945         .minor_shift    = 0,
946 };
947
948 static const struct of_device_id omap_aes_of_match[] = {
949         {
950                 .compatible     = "ti,omap2-aes",
951                 .data           = &omap_aes_pdata_omap2,
952         },
953         {
954                 .compatible     = "ti,omap3-aes",
955                 .data           = &omap_aes_pdata_omap3,
956         },
957         {
958                 .compatible     = "ti,omap4-aes",
959                 .data           = &omap_aes_pdata_omap4,
960         },
961         {},
962 };
963 MODULE_DEVICE_TABLE(of, omap_aes_of_match);
964
965 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
966                 struct device *dev, struct resource *res)
967 {
968         struct device_node *node = dev->of_node;
969         const struct of_device_id *match;
970         int err = 0;
971
972         match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
973         if (!match) {
974                 dev_err(dev, "no compatible OF match\n");
975                 err = -EINVAL;
976                 goto err;
977         }
978
979         err = of_address_to_resource(node, 0, res);
980         if (err < 0) {
981                 dev_err(dev, "can't translate OF node address\n");
982                 err = -EINVAL;
983                 goto err;
984         }
985
986         dd->dma_out = -1; /* Dummy value that's unused */
987         dd->dma_in = -1; /* Dummy value that's unused */
988
989         dd->pdata = match->data;
990
991 err:
992         return err;
993 }
994 #else
995 static const struct of_device_id omap_aes_of_match[] = {
996         {},
997 };
998
999 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1000                 struct device *dev, struct resource *res)
1001 {
1002         return -EINVAL;
1003 }
1004 #endif
1005
1006 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
1007                 struct platform_device *pdev, struct resource *res)
1008 {
1009         struct device *dev = &pdev->dev;
1010         struct resource *r;
1011         int err = 0;
1012
1013         /* Get the base address */
1014         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1015         if (!r) {
1016                 dev_err(dev, "no MEM resource info\n");
1017                 err = -ENODEV;
1018                 goto err;
1019         }
1020         memcpy(res, r, sizeof(*res));
1021
1022         /* Get the DMA out channel */
1023         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1024         if (!r) {
1025                 dev_err(dev, "no DMA out resource info\n");
1026                 err = -ENODEV;
1027                 goto err;
1028         }
1029         dd->dma_out = r->start;
1030
1031         /* Get the DMA in channel */
1032         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1033         if (!r) {
1034                 dev_err(dev, "no DMA in resource info\n");
1035                 err = -ENODEV;
1036                 goto err;
1037         }
1038         dd->dma_in = r->start;
1039
1040         /* Only OMAP2/3 can be non-DT */
1041         dd->pdata = &omap_aes_pdata_omap2;
1042
1043 err:
1044         return err;
1045 }
1046
1047 static int omap_aes_probe(struct platform_device *pdev)
1048 {
1049         struct device *dev = &pdev->dev;
1050         struct omap_aes_dev *dd;
1051         struct crypto_alg *algp;
1052         struct resource res;
1053         int err = -ENOMEM, i, j;
1054         u32 reg;
1055
1056         dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
1057         if (dd == NULL) {
1058                 dev_err(dev, "unable to alloc data struct.\n");
1059                 goto err_data;
1060         }
1061         dd->dev = dev;
1062         platform_set_drvdata(pdev, dd);
1063
1064         spin_lock_init(&dd->lock);
1065         crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
1066
1067         err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
1068                                omap_aes_get_res_pdev(dd, pdev, &res);
1069         if (err)
1070                 goto err_res;
1071
1072         dd->io_base = devm_ioremap_resource(dev, &res);
1073         if (IS_ERR(dd->io_base)) {
1074                 err = PTR_ERR(dd->io_base);
1075                 goto err_res;
1076         }
1077         dd->phys_base = res.start;
1078
1079         pm_runtime_enable(dev);
1080         pm_runtime_get_sync(dev);
1081
1082         omap_aes_dma_stop(dd);
1083
1084         reg = omap_aes_read(dd, AES_REG_REV(dd));
1085
1086         pm_runtime_put_sync(dev);
1087
1088         dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
1089                  (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
1090                  (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1091
1092         tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
1093         tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
1094
1095         err = omap_aes_dma_init(dd);
1096         if (err)
1097                 goto err_dma;
1098
1099         INIT_LIST_HEAD(&dd->list);
1100         spin_lock(&list_lock);
1101         list_add_tail(&dd->list, &dev_list);
1102         spin_unlock(&list_lock);
1103
1104         for (i = 0; i < dd->pdata->algs_info_size; i++) {
1105                 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1106                         algp = &dd->pdata->algs_info[i].algs_list[j];
1107
1108                         pr_debug("reg alg: %s\n", algp->cra_name);
1109                         INIT_LIST_HEAD(&algp->cra_list);
1110
1111                         err = crypto_register_alg(algp);
1112                         if (err)
1113                                 goto err_algs;
1114
1115                         dd->pdata->algs_info[i].registered++;
1116                 }
1117         }
1118
1119         return 0;
1120 err_algs:
1121         for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1122                 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1123                         crypto_unregister_alg(
1124                                         &dd->pdata->algs_info[i].algs_list[j]);
1125         omap_aes_dma_cleanup(dd);
1126 err_dma:
1127         tasklet_kill(&dd->done_task);
1128         tasklet_kill(&dd->queue_task);
1129         pm_runtime_disable(dev);
1130 err_res:
1131         kfree(dd);
1132         dd = NULL;
1133 err_data:
1134         dev_err(dev, "initialization failed.\n");
1135         return err;
1136 }
1137
1138 static int omap_aes_remove(struct platform_device *pdev)
1139 {
1140         struct omap_aes_dev *dd = platform_get_drvdata(pdev);
1141         int i, j;
1142
1143         if (!dd)
1144                 return -ENODEV;
1145
1146         spin_lock(&list_lock);
1147         list_del(&dd->list);
1148         spin_unlock(&list_lock);
1149
1150         for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1151                 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1152                         crypto_unregister_alg(
1153                                         &dd->pdata->algs_info[i].algs_list[j]);
1154
1155         tasklet_kill(&dd->done_task);
1156         tasklet_kill(&dd->queue_task);
1157         omap_aes_dma_cleanup(dd);
1158         pm_runtime_disable(dd->dev);
1159         kfree(dd);
1160         dd = NULL;
1161
1162         return 0;
1163 }
1164
1165 #ifdef CONFIG_PM_SLEEP
1166 static int omap_aes_suspend(struct device *dev)
1167 {
1168         pm_runtime_put_sync(dev);
1169         return 0;
1170 }
1171
1172 static int omap_aes_resume(struct device *dev)
1173 {
1174         pm_runtime_get_sync(dev);
1175         return 0;
1176 }
1177 #endif
1178
1179 static const struct dev_pm_ops omap_aes_pm_ops = {
1180         SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume)
1181 };
1182
1183 static struct platform_driver omap_aes_driver = {
1184         .probe  = omap_aes_probe,
1185         .remove = omap_aes_remove,
1186         .driver = {
1187                 .name   = "omap-aes",
1188                 .owner  = THIS_MODULE,
1189                 .pm     = &omap_aes_pm_ops,
1190                 .of_match_table = omap_aes_of_match,
1191         },
1192 };
1193
1194 module_platform_driver(omap_aes_driver);
1195
1196 MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1197 MODULE_LICENSE("GPL v2");
1198 MODULE_AUTHOR("Dmitry Kasatkin");
1199