]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/crypto/img-hash.c
crypto: img-hash - Add suspend resume hooks for img hash
[karo-tx-linux.git] / drivers / crypto / img-hash.c
1 /*
2  * Copyright (c) 2014 Imagination Technologies
3  * Authors:  Will Thomas, James Hartley
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation.
8  *
9  *      Interface structure taken from omap-sham driver
10  */
11
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/scatterlist.h>
21
22 #include <crypto/internal/hash.h>
23 #include <crypto/md5.h>
24 #include <crypto/sha.h>
25
26 #define CR_RESET                        0
27 #define CR_RESET_SET                    1
28 #define CR_RESET_UNSET                  0
29
30 #define CR_MESSAGE_LENGTH_H             0x4
31 #define CR_MESSAGE_LENGTH_L             0x8
32
33 #define CR_CONTROL                      0xc
34 #define CR_CONTROL_BYTE_ORDER_3210      0
35 #define CR_CONTROL_BYTE_ORDER_0123      1
36 #define CR_CONTROL_BYTE_ORDER_2310      2
37 #define CR_CONTROL_BYTE_ORDER_1032      3
38 #define CR_CONTROL_BYTE_ORDER_SHIFT     8
39 #define CR_CONTROL_ALGO_MD5     0
40 #define CR_CONTROL_ALGO_SHA1    1
41 #define CR_CONTROL_ALGO_SHA224  2
42 #define CR_CONTROL_ALGO_SHA256  3
43
44 #define CR_INTSTAT                      0x10
45 #define CR_INTENAB                      0x14
46 #define CR_INTCLEAR                     0x18
47 #define CR_INT_RESULTS_AVAILABLE        BIT(0)
48 #define CR_INT_NEW_RESULTS_SET          BIT(1)
49 #define CR_INT_RESULT_READ_ERR          BIT(2)
50 #define CR_INT_MESSAGE_WRITE_ERROR      BIT(3)
51 #define CR_INT_STATUS                   BIT(8)
52
53 #define CR_RESULT_QUEUE         0x1c
54 #define CR_RSD0                         0x40
55 #define CR_CORE_REV                     0x50
56 #define CR_CORE_DES1            0x60
57 #define CR_CORE_DES2            0x70
58
59 #define DRIVER_FLAGS_BUSY               BIT(0)
60 #define DRIVER_FLAGS_FINAL              BIT(1)
61 #define DRIVER_FLAGS_DMA_ACTIVE         BIT(2)
62 #define DRIVER_FLAGS_OUTPUT_READY       BIT(3)
63 #define DRIVER_FLAGS_INIT               BIT(4)
64 #define DRIVER_FLAGS_CPU                BIT(5)
65 #define DRIVER_FLAGS_DMA_READY          BIT(6)
66 #define DRIVER_FLAGS_ERROR              BIT(7)
67 #define DRIVER_FLAGS_SG                 BIT(8)
68 #define DRIVER_FLAGS_SHA1               BIT(18)
69 #define DRIVER_FLAGS_SHA224             BIT(19)
70 #define DRIVER_FLAGS_SHA256             BIT(20)
71 #define DRIVER_FLAGS_MD5                BIT(21)
72
73 #define IMG_HASH_QUEUE_LENGTH           20
74 #define IMG_HASH_DMA_BURST              4
75 #define IMG_HASH_DMA_THRESHOLD          64
76
77 #ifdef __LITTLE_ENDIAN
78 #define IMG_HASH_BYTE_ORDER             CR_CONTROL_BYTE_ORDER_3210
79 #else
80 #define IMG_HASH_BYTE_ORDER             CR_CONTROL_BYTE_ORDER_0123
81 #endif
82
83 struct img_hash_dev;
84
85 struct img_hash_request_ctx {
86         struct img_hash_dev     *hdev;
87         u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
88         unsigned long           flags;
89         size_t                  digsize;
90
91         dma_addr_t              dma_addr;
92         size_t                  dma_ct;
93
94         /* sg root */
95         struct scatterlist      *sgfirst;
96         /* walk state */
97         struct scatterlist      *sg;
98         size_t                  nents;
99         size_t                  offset;
100         unsigned int            total;
101         size_t                  sent;
102
103         unsigned long           op;
104
105         size_t                  bufcnt;
106         struct ahash_request    fallback_req;
107
108         /* Zero length buffer must remain last member of struct */
109         u8 buffer[0] __aligned(sizeof(u32));
110 };
111
112 struct img_hash_ctx {
113         struct img_hash_dev     *hdev;
114         unsigned long           flags;
115         struct crypto_ahash     *fallback;
116 };
117
118 struct img_hash_dev {
119         struct list_head        list;
120         struct device           *dev;
121         struct clk              *hash_clk;
122         struct clk              *sys_clk;
123         void __iomem            *io_base;
124
125         phys_addr_t             bus_addr;
126         void __iomem            *cpu_addr;
127
128         spinlock_t              lock;
129         int                     err;
130         struct tasklet_struct   done_task;
131         struct tasklet_struct   dma_task;
132
133         unsigned long           flags;
134         struct crypto_queue     queue;
135         struct ahash_request    *req;
136
137         struct dma_chan         *dma_lch;
138 };
139
140 struct img_hash_drv {
141         struct list_head dev_list;
142         spinlock_t lock;
143 };
144
145 static struct img_hash_drv img_hash = {
146         .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
147         .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
148 };
149
150 static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
151 {
152         return readl_relaxed(hdev->io_base + offset);
153 }
154
155 static inline void img_hash_write(struct img_hash_dev *hdev,
156                                   u32 offset, u32 value)
157 {
158         writel_relaxed(value, hdev->io_base + offset);
159 }
160
161 static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
162 {
163         return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
164 }
165
166 static void img_hash_start(struct img_hash_dev *hdev, bool dma)
167 {
168         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
169         u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
170
171         if (ctx->flags & DRIVER_FLAGS_MD5)
172                 cr |= CR_CONTROL_ALGO_MD5;
173         else if (ctx->flags & DRIVER_FLAGS_SHA1)
174                 cr |= CR_CONTROL_ALGO_SHA1;
175         else if (ctx->flags & DRIVER_FLAGS_SHA224)
176                 cr |= CR_CONTROL_ALGO_SHA224;
177         else if (ctx->flags & DRIVER_FLAGS_SHA256)
178                 cr |= CR_CONTROL_ALGO_SHA256;
179         dev_dbg(hdev->dev, "Starting hash process\n");
180         img_hash_write(hdev, CR_CONTROL, cr);
181
182         /*
183          * The hardware block requires two cycles between writing the control
184          * register and writing the first word of data in non DMA mode, to
185          * ensure the first data write is not grouped in burst with the control
186          * register write a read is issued to 'flush' the bus.
187          */
188         if (!dma)
189                 img_hash_read(hdev, CR_CONTROL);
190 }
191
192 static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
193                              size_t length, int final)
194 {
195         u32 count, len32;
196         const u32 *buffer = (const u32 *)buf;
197
198         dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
199
200         if (final)
201                 hdev->flags |= DRIVER_FLAGS_FINAL;
202
203         len32 = DIV_ROUND_UP(length, sizeof(u32));
204
205         for (count = 0; count < len32; count++)
206                 writel_relaxed(buffer[count], hdev->cpu_addr);
207
208         return -EINPROGRESS;
209 }
210
211 static void img_hash_dma_callback(void *data)
212 {
213         struct img_hash_dev *hdev = (struct img_hash_dev *)data;
214         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
215
216         if (ctx->bufcnt) {
217                 img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
218                 ctx->bufcnt = 0;
219         }
220         if (ctx->sg)
221                 tasklet_schedule(&hdev->dma_task);
222 }
223
224 static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
225 {
226         struct dma_async_tx_descriptor *desc;
227         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
228
229         ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV);
230         if (ctx->dma_ct == 0) {
231                 dev_err(hdev->dev, "Invalid DMA sg\n");
232                 hdev->err = -EINVAL;
233                 return -EINVAL;
234         }
235
236         desc = dmaengine_prep_slave_sg(hdev->dma_lch,
237                                        sg,
238                                        ctx->dma_ct,
239                                        DMA_MEM_TO_DEV,
240                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
241         if (!desc) {
242                 dev_err(hdev->dev, "Null DMA descriptor\n");
243                 hdev->err = -EINVAL;
244                 dma_unmap_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV);
245                 return -EINVAL;
246         }
247         desc->callback = img_hash_dma_callback;
248         desc->callback_param = hdev;
249         dmaengine_submit(desc);
250         dma_async_issue_pending(hdev->dma_lch);
251
252         return 0;
253 }
254
255 static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
256 {
257         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
258
259         ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
260                                         ctx->buffer, hdev->req->nbytes);
261
262         ctx->total = hdev->req->nbytes;
263         ctx->bufcnt = 0;
264
265         hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
266
267         img_hash_start(hdev, false);
268
269         return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
270 }
271
272 static int img_hash_finish(struct ahash_request *req)
273 {
274         struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
275
276         if (!req->result)
277                 return -EINVAL;
278
279         memcpy(req->result, ctx->digest, ctx->digsize);
280
281         return 0;
282 }
283
284 static void img_hash_copy_hash(struct ahash_request *req)
285 {
286         struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
287         u32 *hash = (u32 *)ctx->digest;
288         int i;
289
290         for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
291                 hash[i] = img_hash_read_result_queue(ctx->hdev);
292 }
293
294 static void img_hash_finish_req(struct ahash_request *req, int err)
295 {
296         struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
297         struct img_hash_dev *hdev =  ctx->hdev;
298
299         if (!err) {
300                 img_hash_copy_hash(req);
301                 if (DRIVER_FLAGS_FINAL & hdev->flags)
302                         err = img_hash_finish(req);
303         } else {
304                 dev_warn(hdev->dev, "Hash failed with error %d\n", err);
305                 ctx->flags |= DRIVER_FLAGS_ERROR;
306         }
307
308         hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
309                 DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
310
311         if (req->base.complete)
312                 req->base.complete(&req->base, err);
313 }
314
315 static int img_hash_write_via_dma(struct img_hash_dev *hdev)
316 {
317         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
318
319         img_hash_start(hdev, true);
320
321         dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
322
323         if (!ctx->total)
324                 hdev->flags |= DRIVER_FLAGS_FINAL;
325
326         hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
327
328         tasklet_schedule(&hdev->dma_task);
329
330         return -EINPROGRESS;
331 }
332
333 static int img_hash_dma_init(struct img_hash_dev *hdev)
334 {
335         struct dma_slave_config dma_conf;
336         int err = -EINVAL;
337
338         hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
339         if (!hdev->dma_lch) {
340                 dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
341                 return -EBUSY;
342         }
343         dma_conf.direction = DMA_MEM_TO_DEV;
344         dma_conf.dst_addr = hdev->bus_addr;
345         dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
346         dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
347         dma_conf.device_fc = false;
348
349         err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
350         if (err) {
351                 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
352                 dma_release_channel(hdev->dma_lch);
353                 return err;
354         }
355
356         return 0;
357 }
358
359 static void img_hash_dma_task(unsigned long d)
360 {
361         struct img_hash_dev *hdev = (struct img_hash_dev *)d;
362         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
363         u8 *addr;
364         size_t nbytes, bleft, wsend, len, tbc;
365         struct scatterlist tsg;
366
367         if (!hdev->req || !ctx->sg)
368                 return;
369
370         addr = sg_virt(ctx->sg);
371         nbytes = ctx->sg->length - ctx->offset;
372
373         /*
374          * The hash accelerator does not support a data valid mask. This means
375          * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
376          * padding bytes in the last word written by that dma would erroneously
377          * be included in the hash. To avoid this we round down the transfer,
378          * and add the excess to the start of the next dma. It does not matter
379          * that the final dma may not be a multiple of 4 bytes as the hashing
380          * block is programmed to accept the correct number of bytes.
381          */
382
383         bleft = nbytes % 4;
384         wsend = (nbytes / 4);
385
386         if (wsend) {
387                 sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
388                 if (img_hash_xmit_dma(hdev, &tsg)) {
389                         dev_err(hdev->dev, "DMA failed, falling back to CPU");
390                         ctx->flags |= DRIVER_FLAGS_CPU;
391                         hdev->err = 0;
392                         img_hash_xmit_cpu(hdev, addr + ctx->offset,
393                                           wsend * 4, 0);
394                         ctx->sent += wsend * 4;
395                         wsend = 0;
396                 } else {
397                         ctx->sent += wsend * 4;
398                 }
399         }
400
401         if (bleft) {
402                 ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
403                                                  ctx->buffer, bleft, ctx->sent);
404                 tbc = 0;
405                 ctx->sg = sg_next(ctx->sg);
406                 while (ctx->sg && (ctx->bufcnt < 4)) {
407                         len = ctx->sg->length;
408                         if (likely(len > (4 - ctx->bufcnt)))
409                                 len = 4 - ctx->bufcnt;
410                         tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
411                                                  ctx->buffer + ctx->bufcnt, len,
412                                         ctx->sent + ctx->bufcnt);
413                         ctx->bufcnt += tbc;
414                         if (tbc >= ctx->sg->length) {
415                                 ctx->sg = sg_next(ctx->sg);
416                                 tbc = 0;
417                         }
418                 }
419
420                 ctx->sent += ctx->bufcnt;
421                 ctx->offset = tbc;
422
423                 if (!wsend)
424                         img_hash_dma_callback(hdev);
425         } else {
426                 ctx->offset = 0;
427                 ctx->sg = sg_next(ctx->sg);
428         }
429 }
430
431 static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
432 {
433         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
434
435         if (ctx->flags & DRIVER_FLAGS_SG)
436                 dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
437
438         return 0;
439 }
440
441 static int img_hash_process_data(struct img_hash_dev *hdev)
442 {
443         struct ahash_request *req = hdev->req;
444         struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
445         int err = 0;
446
447         ctx->bufcnt = 0;
448
449         if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
450                 dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
451                         req->nbytes);
452                 err = img_hash_write_via_dma(hdev);
453         } else {
454                 dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
455                         req->nbytes);
456                 err = img_hash_write_via_cpu(hdev);
457         }
458         return err;
459 }
460
461 static int img_hash_hw_init(struct img_hash_dev *hdev)
462 {
463         unsigned long long nbits;
464         u32 u, l;
465
466         img_hash_write(hdev, CR_RESET, CR_RESET_SET);
467         img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
468         img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
469
470         nbits = (u64)hdev->req->nbytes << 3;
471         u = nbits >> 32;
472         l = nbits;
473         img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
474         img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
475
476         if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
477                 hdev->flags |= DRIVER_FLAGS_INIT;
478                 hdev->err = 0;
479         }
480         dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
481         return 0;
482 }
483
484 static int img_hash_init(struct ahash_request *req)
485 {
486         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
487         struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
488         struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
489
490         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
491         rctx->fallback_req.base.flags = req->base.flags
492                 & CRYPTO_TFM_REQ_MAY_SLEEP;
493
494         return crypto_ahash_init(&rctx->fallback_req);
495 }
496
497 static int img_hash_handle_queue(struct img_hash_dev *hdev,
498                                  struct ahash_request *req)
499 {
500         struct crypto_async_request *async_req, *backlog;
501         struct img_hash_request_ctx *ctx;
502         unsigned long flags;
503         int err = 0, res = 0;
504
505         spin_lock_irqsave(&hdev->lock, flags);
506
507         if (req)
508                 res = ahash_enqueue_request(&hdev->queue, req);
509
510         if (DRIVER_FLAGS_BUSY & hdev->flags) {
511                 spin_unlock_irqrestore(&hdev->lock, flags);
512                 return res;
513         }
514
515         backlog = crypto_get_backlog(&hdev->queue);
516         async_req = crypto_dequeue_request(&hdev->queue);
517         if (async_req)
518                 hdev->flags |= DRIVER_FLAGS_BUSY;
519
520         spin_unlock_irqrestore(&hdev->lock, flags);
521
522         if (!async_req)
523                 return res;
524
525         if (backlog)
526                 backlog->complete(backlog, -EINPROGRESS);
527
528         req = ahash_request_cast(async_req);
529         hdev->req = req;
530
531         ctx = ahash_request_ctx(req);
532
533         dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
534                  ctx->op, req->nbytes);
535
536         err = img_hash_hw_init(hdev);
537
538         if (!err)
539                 err = img_hash_process_data(hdev);
540
541         if (err != -EINPROGRESS) {
542                 /* done_task will not finish so do it here */
543                 img_hash_finish_req(req, err);
544         }
545         return res;
546 }
547
548 static int img_hash_update(struct ahash_request *req)
549 {
550         struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
551         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
552         struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
553
554         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
555         rctx->fallback_req.base.flags = req->base.flags
556                 & CRYPTO_TFM_REQ_MAY_SLEEP;
557         rctx->fallback_req.nbytes = req->nbytes;
558         rctx->fallback_req.src = req->src;
559
560         return crypto_ahash_update(&rctx->fallback_req);
561 }
562
563 static int img_hash_final(struct ahash_request *req)
564 {
565         struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
566         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
567         struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
568
569         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
570         rctx->fallback_req.base.flags = req->base.flags
571                 & CRYPTO_TFM_REQ_MAY_SLEEP;
572         rctx->fallback_req.result = req->result;
573
574         return crypto_ahash_final(&rctx->fallback_req);
575 }
576
577 static int img_hash_finup(struct ahash_request *req)
578 {
579         struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
580         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
581         struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
582
583         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
584         rctx->fallback_req.base.flags = req->base.flags
585                 & CRYPTO_TFM_REQ_MAY_SLEEP;
586         rctx->fallback_req.nbytes = req->nbytes;
587         rctx->fallback_req.src = req->src;
588         rctx->fallback_req.result = req->result;
589
590         return crypto_ahash_finup(&rctx->fallback_req);
591 }
592
593 static int img_hash_digest(struct ahash_request *req)
594 {
595         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
596         struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
597         struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
598         struct img_hash_dev *hdev = NULL;
599         struct img_hash_dev *tmp;
600         int err;
601
602         spin_lock(&img_hash.lock);
603         if (!tctx->hdev) {
604                 list_for_each_entry(tmp, &img_hash.dev_list, list) {
605                         hdev = tmp;
606                         break;
607                 }
608                 tctx->hdev = hdev;
609
610         } else {
611                 hdev = tctx->hdev;
612         }
613
614         spin_unlock(&img_hash.lock);
615         ctx->hdev = hdev;
616         ctx->flags = 0;
617         ctx->digsize = crypto_ahash_digestsize(tfm);
618
619         switch (ctx->digsize) {
620         case SHA1_DIGEST_SIZE:
621                 ctx->flags |= DRIVER_FLAGS_SHA1;
622                 break;
623         case SHA256_DIGEST_SIZE:
624                 ctx->flags |= DRIVER_FLAGS_SHA256;
625                 break;
626         case SHA224_DIGEST_SIZE:
627                 ctx->flags |= DRIVER_FLAGS_SHA224;
628                 break;
629         case MD5_DIGEST_SIZE:
630                 ctx->flags |= DRIVER_FLAGS_MD5;
631                 break;
632         default:
633                 return -EINVAL;
634         }
635
636         ctx->bufcnt = 0;
637         ctx->offset = 0;
638         ctx->sent = 0;
639         ctx->total = req->nbytes;
640         ctx->sg = req->src;
641         ctx->sgfirst = req->src;
642         ctx->nents = sg_nents(ctx->sg);
643
644         err = img_hash_handle_queue(tctx->hdev, req);
645
646         return err;
647 }
648
649 static int img_hash_cra_init(struct crypto_tfm *tfm)
650 {
651         struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
652         const char *alg_name = crypto_tfm_alg_name(tfm);
653         int err = -ENOMEM;
654
655         ctx->fallback = crypto_alloc_ahash(alg_name, 0,
656                                            CRYPTO_ALG_NEED_FALLBACK);
657         if (IS_ERR(ctx->fallback)) {
658                 pr_err("img_hash: Could not load fallback driver.\n");
659                 err = PTR_ERR(ctx->fallback);
660                 goto err;
661         }
662         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
663                                  sizeof(struct img_hash_request_ctx) +
664                                  IMG_HASH_DMA_THRESHOLD);
665
666         return 0;
667
668 err:
669         return err;
670 }
671
672 static void img_hash_cra_exit(struct crypto_tfm *tfm)
673 {
674         struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
675
676         crypto_free_ahash(tctx->fallback);
677 }
678
679 static irqreturn_t img_irq_handler(int irq, void *dev_id)
680 {
681         struct img_hash_dev *hdev = dev_id;
682         u32 reg;
683
684         reg = img_hash_read(hdev, CR_INTSTAT);
685         img_hash_write(hdev, CR_INTCLEAR, reg);
686
687         if (reg & CR_INT_NEW_RESULTS_SET) {
688                 dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
689                 if (DRIVER_FLAGS_BUSY & hdev->flags) {
690                         hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
691                         if (!(DRIVER_FLAGS_CPU & hdev->flags))
692                                 hdev->flags |= DRIVER_FLAGS_DMA_READY;
693                         tasklet_schedule(&hdev->done_task);
694                 } else {
695                         dev_warn(hdev->dev,
696                                  "HASH interrupt when no active requests.\n");
697                 }
698         } else if (reg & CR_INT_RESULTS_AVAILABLE) {
699                 dev_warn(hdev->dev,
700                          "IRQ triggered before the hash had completed\n");
701         } else if (reg & CR_INT_RESULT_READ_ERR) {
702                 dev_warn(hdev->dev,
703                          "Attempt to read from an empty result queue\n");
704         } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
705                 dev_warn(hdev->dev,
706                          "Data written before the hardware was configured\n");
707         }
708         return IRQ_HANDLED;
709 }
710
711 static struct ahash_alg img_algs[] = {
712         {
713                 .init = img_hash_init,
714                 .update = img_hash_update,
715                 .final = img_hash_final,
716                 .finup = img_hash_finup,
717                 .digest = img_hash_digest,
718                 .halg = {
719                         .digestsize = MD5_DIGEST_SIZE,
720                         .base = {
721                                 .cra_name = "md5",
722                                 .cra_driver_name = "img-md5",
723                                 .cra_priority = 300,
724                                 .cra_flags =
725                                 CRYPTO_ALG_ASYNC |
726                                 CRYPTO_ALG_NEED_FALLBACK,
727                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
728                                 .cra_ctxsize = sizeof(struct img_hash_ctx),
729                                 .cra_init = img_hash_cra_init,
730                                 .cra_exit = img_hash_cra_exit,
731                                 .cra_module = THIS_MODULE,
732                         }
733                 }
734         },
735         {
736                 .init = img_hash_init,
737                 .update = img_hash_update,
738                 .final = img_hash_final,
739                 .finup = img_hash_finup,
740                 .digest = img_hash_digest,
741                 .halg = {
742                         .digestsize = SHA1_DIGEST_SIZE,
743                         .base = {
744                                 .cra_name = "sha1",
745                                 .cra_driver_name = "img-sha1",
746                                 .cra_priority = 300,
747                                 .cra_flags =
748                                 CRYPTO_ALG_ASYNC |
749                                 CRYPTO_ALG_NEED_FALLBACK,
750                                 .cra_blocksize = SHA1_BLOCK_SIZE,
751                                 .cra_ctxsize = sizeof(struct img_hash_ctx),
752                                 .cra_init = img_hash_cra_init,
753                                 .cra_exit = img_hash_cra_exit,
754                                 .cra_module = THIS_MODULE,
755                         }
756                 }
757         },
758         {
759                 .init = img_hash_init,
760                 .update = img_hash_update,
761                 .final = img_hash_final,
762                 .finup = img_hash_finup,
763                 .digest = img_hash_digest,
764                 .halg = {
765                         .digestsize = SHA224_DIGEST_SIZE,
766                         .base = {
767                                 .cra_name = "sha224",
768                                 .cra_driver_name = "img-sha224",
769                                 .cra_priority = 300,
770                                 .cra_flags =
771                                 CRYPTO_ALG_ASYNC |
772                                 CRYPTO_ALG_NEED_FALLBACK,
773                                 .cra_blocksize = SHA224_BLOCK_SIZE,
774                                 .cra_ctxsize = sizeof(struct img_hash_ctx),
775                                 .cra_init = img_hash_cra_init,
776                                 .cra_exit = img_hash_cra_exit,
777                                 .cra_module = THIS_MODULE,
778                         }
779                 }
780         },
781         {
782                 .init = img_hash_init,
783                 .update = img_hash_update,
784                 .final = img_hash_final,
785                 .finup = img_hash_finup,
786                 .digest = img_hash_digest,
787                 .halg = {
788                         .digestsize = SHA256_DIGEST_SIZE,
789                         .base = {
790                                 .cra_name = "sha256",
791                                 .cra_driver_name = "img-sha256",
792                                 .cra_priority = 300,
793                                 .cra_flags =
794                                 CRYPTO_ALG_ASYNC |
795                                 CRYPTO_ALG_NEED_FALLBACK,
796                                 .cra_blocksize = SHA256_BLOCK_SIZE,
797                                 .cra_ctxsize = sizeof(struct img_hash_ctx),
798                                 .cra_init = img_hash_cra_init,
799                                 .cra_exit = img_hash_cra_exit,
800                                 .cra_module = THIS_MODULE,
801                         }
802                 }
803         }
804 };
805
806 static int img_register_algs(struct img_hash_dev *hdev)
807 {
808         int i, err;
809
810         for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
811                 err = crypto_register_ahash(&img_algs[i]);
812                 if (err)
813                         goto err_reg;
814         }
815         return 0;
816
817 err_reg:
818         for (; i--; )
819                 crypto_unregister_ahash(&img_algs[i]);
820
821         return err;
822 }
823
824 static int img_unregister_algs(struct img_hash_dev *hdev)
825 {
826         int i;
827
828         for (i = 0; i < ARRAY_SIZE(img_algs); i++)
829                 crypto_unregister_ahash(&img_algs[i]);
830         return 0;
831 }
832
833 static void img_hash_done_task(unsigned long data)
834 {
835         struct img_hash_dev *hdev = (struct img_hash_dev *)data;
836         int err = 0;
837
838         if (hdev->err == -EINVAL) {
839                 err = hdev->err;
840                 goto finish;
841         }
842
843         if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
844                 img_hash_handle_queue(hdev, NULL);
845                 return;
846         }
847
848         if (DRIVER_FLAGS_CPU & hdev->flags) {
849                 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
850                         hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
851                         goto finish;
852                 }
853         } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
854                 if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
855                         hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
856                         img_hash_write_via_dma_stop(hdev);
857                         if (hdev->err) {
858                                 err = hdev->err;
859                                 goto finish;
860                         }
861                 }
862                 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
863                         hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
864                                         DRIVER_FLAGS_OUTPUT_READY);
865                         goto finish;
866                 }
867         }
868         return;
869
870 finish:
871         img_hash_finish_req(hdev->req, err);
872 }
873
874 static const struct of_device_id img_hash_match[] = {
875         { .compatible = "img,hash-accelerator" },
876         {}
877 };
878 MODULE_DEVICE_TABLE(of, img_hash_match);
879
880 static int img_hash_probe(struct platform_device *pdev)
881 {
882         struct img_hash_dev *hdev;
883         struct device *dev = &pdev->dev;
884         struct resource *hash_res;
885         int     irq;
886         int err;
887
888         hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
889         if (hdev == NULL)
890                 return -ENOMEM;
891
892         spin_lock_init(&hdev->lock);
893
894         hdev->dev = dev;
895
896         platform_set_drvdata(pdev, hdev);
897
898         INIT_LIST_HEAD(&hdev->list);
899
900         tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
901         tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
902
903         crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
904
905         /* Register bank */
906         hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
907
908         hdev->io_base = devm_ioremap_resource(dev, hash_res);
909         if (IS_ERR(hdev->io_base)) {
910                 err = PTR_ERR(hdev->io_base);
911                 dev_err(dev, "can't ioremap, returned %d\n", err);
912
913                 goto res_err;
914         }
915
916         /* Write port (DMA or CPU) */
917         hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
918         hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
919         if (IS_ERR(hdev->cpu_addr)) {
920                 dev_err(dev, "can't ioremap write port\n");
921                 err = PTR_ERR(hdev->cpu_addr);
922                 goto res_err;
923         }
924         hdev->bus_addr = hash_res->start;
925
926         irq = platform_get_irq(pdev, 0);
927         if (irq < 0) {
928                 dev_err(dev, "no IRQ resource info\n");
929                 err = irq;
930                 goto res_err;
931         }
932
933         err = devm_request_irq(dev, irq, img_irq_handler, 0,
934                                dev_name(dev), hdev);
935         if (err) {
936                 dev_err(dev, "unable to request irq\n");
937                 goto res_err;
938         }
939         dev_dbg(dev, "using IRQ channel %d\n", irq);
940
941         hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
942         if (IS_ERR(hdev->hash_clk)) {
943                 dev_err(dev, "clock initialization failed.\n");
944                 err = PTR_ERR(hdev->hash_clk);
945                 goto res_err;
946         }
947
948         hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
949         if (IS_ERR(hdev->sys_clk)) {
950                 dev_err(dev, "clock initialization failed.\n");
951                 err = PTR_ERR(hdev->sys_clk);
952                 goto res_err;
953         }
954
955         err = clk_prepare_enable(hdev->hash_clk);
956         if (err)
957                 goto res_err;
958
959         err = clk_prepare_enable(hdev->sys_clk);
960         if (err)
961                 goto clk_err;
962
963         err = img_hash_dma_init(hdev);
964         if (err)
965                 goto dma_err;
966
967         dev_dbg(dev, "using %s for DMA transfers\n",
968                 dma_chan_name(hdev->dma_lch));
969
970         spin_lock(&img_hash.lock);
971         list_add_tail(&hdev->list, &img_hash.dev_list);
972         spin_unlock(&img_hash.lock);
973
974         err = img_register_algs(hdev);
975         if (err)
976                 goto err_algs;
977         dev_dbg(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
978
979         return 0;
980
981 err_algs:
982         spin_lock(&img_hash.lock);
983         list_del(&hdev->list);
984         spin_unlock(&img_hash.lock);
985         dma_release_channel(hdev->dma_lch);
986 dma_err:
987         clk_disable_unprepare(hdev->sys_clk);
988 clk_err:
989         clk_disable_unprepare(hdev->hash_clk);
990 res_err:
991         tasklet_kill(&hdev->done_task);
992         tasklet_kill(&hdev->dma_task);
993
994         return err;
995 }
996
997 static int img_hash_remove(struct platform_device *pdev)
998 {
999         static struct img_hash_dev *hdev;
1000
1001         hdev = platform_get_drvdata(pdev);
1002         spin_lock(&img_hash.lock);
1003         list_del(&hdev->list);
1004         spin_unlock(&img_hash.lock);
1005
1006         img_unregister_algs(hdev);
1007
1008         tasklet_kill(&hdev->done_task);
1009         tasklet_kill(&hdev->dma_task);
1010
1011         dma_release_channel(hdev->dma_lch);
1012
1013         clk_disable_unprepare(hdev->hash_clk);
1014         clk_disable_unprepare(hdev->sys_clk);
1015
1016         return 0;
1017 }
1018
1019 #ifdef CONFIG_PM_SLEEP
1020 static int img_hash_suspend(struct device *dev)
1021 {
1022         struct img_hash_dev *hdev = dev_get_drvdata(dev);
1023
1024         clk_disable_unprepare(hdev->hash_clk);
1025         clk_disable_unprepare(hdev->sys_clk);
1026
1027         return 0;
1028 }
1029
1030 static int img_hash_resume(struct device *dev)
1031 {
1032         struct img_hash_dev *hdev = dev_get_drvdata(dev);
1033
1034         clk_prepare_enable(hdev->hash_clk);
1035         clk_prepare_enable(hdev->sys_clk);
1036
1037         return 0;
1038 }
1039 #endif /* CONFIG_PM_SLEEP */
1040
1041 static const struct dev_pm_ops img_hash_pm_ops = {
1042         SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1043 };
1044
1045 static struct platform_driver img_hash_driver = {
1046         .probe          = img_hash_probe,
1047         .remove         = img_hash_remove,
1048         .driver         = {
1049                 .name   = "img-hash-accelerator",
1050                 .pm     = &img_hash_pm_ops,
1051                 .of_match_table = of_match_ptr(img_hash_match),
1052         }
1053 };
1054 module_platform_driver(img_hash_driver);
1055
1056 MODULE_LICENSE("GPL v2");
1057 MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1058 MODULE_AUTHOR("Will Thomas.");
1059 MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");