]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/crypto/bfin_crc.c
Merge branch 'linux-4.8' of git://github.com/skeggsb/linux into drm-next
[karo-tx-linux.git] / drivers / crypto / bfin_crc.c
1 /*
2  * Cryptographic API.
3  *
4  * Support Blackfin CRC HW acceleration.
5  *
6  * Copyright 2012 Analog Devices Inc.
7  *
8  * Licensed under the GPL-2.
9  */
10
11 #include <linux/err.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/irq.h>
19 #include <linux/io.h>
20 #include <linux/platform_device.h>
21 #include <linux/scatterlist.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
24 #include <linux/crypto.h>
25 #include <linux/cryptohash.h>
26 #include <crypto/scatterwalk.h>
27 #include <crypto/algapi.h>
28 #include <crypto/hash.h>
29 #include <crypto/internal/hash.h>
30 #include <asm/unaligned.h>
31
32 #include <asm/dma.h>
33 #include <asm/portmux.h>
34 #include <asm/io.h>
35
36 #include "bfin_crc.h"
37
38 #define CRC_CCRYPTO_QUEUE_LENGTH        5
39
40 #define DRIVER_NAME "bfin-hmac-crc"
41 #define CHKSUM_DIGEST_SIZE      4
42 #define CHKSUM_BLOCK_SIZE       1
43
44 #define CRC_MAX_DMA_DESC        100
45
46 #define CRC_CRYPTO_STATE_UPDATE         1
47 #define CRC_CRYPTO_STATE_FINALUPDATE    2
48 #define CRC_CRYPTO_STATE_FINISH         3
49
50 struct bfin_crypto_crc {
51         struct list_head        list;
52         struct device           *dev;
53         spinlock_t              lock;
54
55         int                     irq;
56         int                     dma_ch;
57         u32                     poly;
58         struct crc_register     *regs;
59
60         struct ahash_request    *req; /* current request in operation */
61         struct dma_desc_array   *sg_cpu; /* virt addr of sg dma descriptors */
62         dma_addr_t              sg_dma; /* phy addr of sg dma descriptors */
63         u8                      *sg_mid_buf;
64         dma_addr_t              sg_mid_dma; /* phy addr of sg mid buffer */
65
66         struct tasklet_struct   done_task;
67         struct crypto_queue     queue; /* waiting requests */
68
69         u8                      busy:1; /* crc device in operation flag */
70 };
71
72 static struct bfin_crypto_crc_list {
73         struct list_head        dev_list;
74         spinlock_t              lock;
75 } crc_list;
76
77 struct bfin_crypto_crc_reqctx {
78         struct bfin_crypto_crc  *crc;
79
80         unsigned int            total;  /* total request bytes */
81         size_t                  sg_buflen; /* bytes for this update */
82         unsigned int            sg_nents;
83         struct scatterlist      *sg; /* sg list head for this update*/
84         struct scatterlist      bufsl[2]; /* chained sg list */
85
86         size_t                  bufnext_len;
87         size_t                  buflast_len;
88         u8                      bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
89         u8                      buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
90
91         u8                      flag;
92 };
93
94 struct bfin_crypto_crc_ctx {
95         struct bfin_crypto_crc  *crc;
96         u32                     key;
97 };
98
99 /*
100  * get element in scatter list by given index
101  */
102 static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
103                                 unsigned int index)
104 {
105         struct scatterlist *sg = NULL;
106         int i;
107
108         for_each_sg(sg_list, sg, nents, i)
109                 if (i == index)
110                         break;
111
112         return sg;
113 }
114
115 static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
116 {
117         writel(0, &crc->regs->datacntrld);
118         writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
119         writel(key, &crc->regs->curresult);
120
121         /* setup CRC interrupts */
122         writel(CMPERRI | DCNTEXPI, &crc->regs->status);
123         writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
124
125         return 0;
126 }
127
128 static int bfin_crypto_crc_init(struct ahash_request *req)
129 {
130         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
131         struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
132         struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
133         struct bfin_crypto_crc *crc;
134
135         dev_dbg(ctx->crc->dev, "crc_init\n");
136         spin_lock_bh(&crc_list.lock);
137         list_for_each_entry(crc, &crc_list.dev_list, list) {
138                 crc_ctx->crc = crc;
139                 break;
140         }
141         spin_unlock_bh(&crc_list.lock);
142
143         if (sg_nents(req->src) > CRC_MAX_DMA_DESC) {
144                 dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
145                         CRC_MAX_DMA_DESC);
146                 return -EINVAL;
147         }
148
149         ctx->crc = crc;
150         ctx->bufnext_len = 0;
151         ctx->buflast_len = 0;
152         ctx->sg_buflen = 0;
153         ctx->total = 0;
154         ctx->flag = 0;
155
156         /* init crc results */
157         put_unaligned_le32(crc_ctx->key, req->result);
158
159         dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
160                 crypto_ahash_digestsize(tfm));
161
162         return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
163 }
164
165 static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
166 {
167         struct scatterlist *sg;
168         struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
169         int i = 0, j = 0;
170         unsigned long dma_config;
171         unsigned int dma_count;
172         unsigned int dma_addr;
173         unsigned int mid_dma_count = 0;
174         int dma_mod;
175
176         dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
177
178         for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
179                 dma_addr = sg_dma_address(sg);
180                 /* deduce extra bytes in last sg */
181                 if (sg_is_last(sg))
182                         dma_count = sg_dma_len(sg) - ctx->bufnext_len;
183                 else
184                         dma_count = sg_dma_len(sg);
185
186                 if (mid_dma_count) {
187                         /* Append last middle dma buffer to 4 bytes with first
188                            bytes in current sg buffer. Move addr of current
189                            sg and deduce the length of current sg.
190                          */
191                         memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
192                                 sg_virt(sg),
193                                 CHKSUM_DIGEST_SIZE - mid_dma_count);
194                         dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
195                         dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
196
197                         dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
198                                 DMAEN | PSIZE_32 | WDSIZE_32;
199
200                         /* setup new dma descriptor for next middle dma */
201                         crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
202                         crc->sg_cpu[i].cfg = dma_config;
203                         crc->sg_cpu[i].x_count = 1;
204                         crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
205                         dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
206                                 "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
207                                 i, crc->sg_cpu[i].start_addr,
208                                 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
209                                 crc->sg_cpu[i].x_modify);
210                         i++;
211                 }
212
213                 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
214                 /* chop current sg dma len to multiple of 32 bits */
215                 mid_dma_count = dma_count % 4;
216                 dma_count &= ~0x3;
217
218                 if (dma_addr % 4 == 0) {
219                         dma_config |= WDSIZE_32;
220                         dma_count >>= 2;
221                         dma_mod = 4;
222                 } else if (dma_addr % 2 == 0) {
223                         dma_config |= WDSIZE_16;
224                         dma_count >>= 1;
225                         dma_mod = 2;
226                 } else {
227                         dma_config |= WDSIZE_8;
228                         dma_mod = 1;
229                 }
230
231                 crc->sg_cpu[i].start_addr = dma_addr;
232                 crc->sg_cpu[i].cfg = dma_config;
233                 crc->sg_cpu[i].x_count = dma_count;
234                 crc->sg_cpu[i].x_modify = dma_mod;
235                 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
236                         "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
237                         i, crc->sg_cpu[i].start_addr,
238                         crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
239                         crc->sg_cpu[i].x_modify);
240                 i++;
241
242                 if (mid_dma_count) {
243                         /* copy extra bytes to next middle dma buffer */
244                         memcpy(crc->sg_mid_buf + (i << 2),
245                                 (u8*)sg_virt(sg) + (dma_count << 2),
246                                 mid_dma_count);
247                 }
248         }
249
250         dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
251         /* For final update req, append the buffer for next update as well*/
252         if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
253                 ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
254                 crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
255                                                 CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
256                 crc->sg_cpu[i].cfg = dma_config;
257                 crc->sg_cpu[i].x_count = 1;
258                 crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
259                 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
260                         "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
261                         i, crc->sg_cpu[i].start_addr,
262                         crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
263                         crc->sg_cpu[i].x_modify);
264                 i++;
265         }
266
267         if (i == 0)
268                 return;
269
270         /* Set the last descriptor to stop mode */
271         crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
272         crc->sg_cpu[i - 1].cfg |= DI_EN;
273         set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
274         set_dma_x_count(crc->dma_ch, 0);
275         set_dma_x_modify(crc->dma_ch, 0);
276         set_dma_config(crc->dma_ch, dma_config);
277 }
278
279 static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
280                                   struct ahash_request *req)
281 {
282         struct crypto_async_request *async_req, *backlog;
283         struct bfin_crypto_crc_reqctx *ctx;
284         struct scatterlist *sg;
285         int ret = 0;
286         int nsg, i, j;
287         unsigned int nextlen;
288         unsigned long flags;
289         u32 reg;
290
291         spin_lock_irqsave(&crc->lock, flags);
292         if (req)
293                 ret = ahash_enqueue_request(&crc->queue, req);
294         if (crc->busy) {
295                 spin_unlock_irqrestore(&crc->lock, flags);
296                 return ret;
297         }
298         backlog = crypto_get_backlog(&crc->queue);
299         async_req = crypto_dequeue_request(&crc->queue);
300         if (async_req)
301                 crc->busy = 1;
302         spin_unlock_irqrestore(&crc->lock, flags);
303
304         if (!async_req)
305                 return ret;
306
307         if (backlog)
308                 backlog->complete(backlog, -EINPROGRESS);
309
310         req = ahash_request_cast(async_req);
311         crc->req = req;
312         ctx = ahash_request_ctx(req);
313         ctx->sg = NULL;
314         ctx->sg_buflen = 0;
315         ctx->sg_nents = 0;
316
317         dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
318                                                 ctx->flag, req->nbytes);
319
320         if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
321                 if (ctx->bufnext_len == 0) {
322                         crc->busy = 0;
323                         return 0;
324                 }
325
326                 /* Pack last crc update buffer to 32bit */
327                 memset(ctx->bufnext + ctx->bufnext_len, 0,
328                                 CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
329         } else {
330                 /* Pack small data which is less than 32bit to buffer for next update. */
331                 if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
332                         memcpy(ctx->bufnext + ctx->bufnext_len,
333                                 sg_virt(req->src), req->nbytes);
334                         ctx->bufnext_len += req->nbytes;
335                         if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
336                                 ctx->bufnext_len) {
337                                 goto finish_update;
338                         } else {
339                                 crc->busy = 0;
340                                 return 0;
341                         }
342                 }
343
344                 if (ctx->bufnext_len) {
345                         /* Chain in extra bytes of last update */
346                         ctx->buflast_len = ctx->bufnext_len;
347                         memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
348
349                         nsg = ctx->sg_buflen ? 2 : 1;
350                         sg_init_table(ctx->bufsl, nsg);
351                         sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
352                         if (nsg > 1)
353                                 sg_chain(ctx->bufsl, nsg, req->src);
354                         ctx->sg = ctx->bufsl;
355                 } else
356                         ctx->sg = req->src;
357
358                 /* Chop crc buffer size to multiple of 32 bit */
359                 nsg = sg_nents(ctx->sg);
360                 ctx->sg_nents = nsg;
361                 ctx->sg_buflen = ctx->buflast_len + req->nbytes;
362                 ctx->bufnext_len = ctx->sg_buflen % 4;
363                 ctx->sg_buflen &= ~0x3;
364
365                 if (ctx->bufnext_len) {
366                         /* copy extra bytes to buffer for next update */
367                         memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
368                         nextlen = ctx->bufnext_len;
369                         for (i = nsg - 1; i >= 0; i--) {
370                                 sg = sg_get(ctx->sg, nsg, i);
371                                 j = min(nextlen, sg_dma_len(sg));
372                                 memcpy(ctx->bufnext + nextlen - j,
373                                         sg_virt(sg) + sg_dma_len(sg) - j, j);
374                                 if (j == sg_dma_len(sg))
375                                         ctx->sg_nents--;
376                                 nextlen -= j;
377                                 if (nextlen == 0)
378                                         break;
379                         }
380                 }
381         }
382
383 finish_update:
384         if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
385                 ctx->flag == CRC_CRYPTO_STATE_FINISH))
386                 ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
387
388         /* set CRC data count before start DMA */
389         writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
390
391         /* setup and enable CRC DMA */
392         bfin_crypto_crc_config_dma(crc);
393
394         /* finally kick off CRC operation */
395         reg = readl(&crc->regs->control);
396         writel(reg | BLKEN, &crc->regs->control);
397
398         return -EINPROGRESS;
399 }
400
401 static int bfin_crypto_crc_update(struct ahash_request *req)
402 {
403         struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
404
405         if (!req->nbytes)
406                 return 0;
407
408         dev_dbg(ctx->crc->dev, "crc_update\n");
409         ctx->total += req->nbytes;
410         ctx->flag = CRC_CRYPTO_STATE_UPDATE;
411
412         return bfin_crypto_crc_handle_queue(ctx->crc, req);
413 }
414
415 static int bfin_crypto_crc_final(struct ahash_request *req)
416 {
417         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
418         struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
419         struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
420
421         dev_dbg(ctx->crc->dev, "crc_final\n");
422         ctx->flag = CRC_CRYPTO_STATE_FINISH;
423         crc_ctx->key = 0;
424
425         return bfin_crypto_crc_handle_queue(ctx->crc, req);
426 }
427
428 static int bfin_crypto_crc_finup(struct ahash_request *req)
429 {
430         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
431         struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
432         struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
433
434         dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
435         ctx->total += req->nbytes;
436         ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
437         crc_ctx->key = 0;
438
439         return bfin_crypto_crc_handle_queue(ctx->crc, req);
440 }
441
442 static int bfin_crypto_crc_digest(struct ahash_request *req)
443 {
444         int ret;
445
446         ret = bfin_crypto_crc_init(req);
447         if (ret)
448                 return ret;
449
450         return bfin_crypto_crc_finup(req);
451 }
452
453 static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
454                         unsigned int keylen)
455 {
456         struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
457
458         dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
459         if (keylen != CHKSUM_DIGEST_SIZE) {
460                 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
461                 return -EINVAL;
462         }
463
464         crc_ctx->key = get_unaligned_le32(key);
465
466         return 0;
467 }
468
469 static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
470 {
471         struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
472
473         crc_ctx->key = 0;
474         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
475                                  sizeof(struct bfin_crypto_crc_reqctx));
476
477         return 0;
478 }
479
480 static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
481 {
482 }
483
484 static struct ahash_alg algs = {
485         .init           = bfin_crypto_crc_init,
486         .update         = bfin_crypto_crc_update,
487         .final          = bfin_crypto_crc_final,
488         .finup          = bfin_crypto_crc_finup,
489         .digest         = bfin_crypto_crc_digest,
490         .setkey         = bfin_crypto_crc_setkey,
491         .halg.digestsize        = CHKSUM_DIGEST_SIZE,
492         .halg.base      = {
493                 .cra_name               = "hmac(crc32)",
494                 .cra_driver_name        = DRIVER_NAME,
495                 .cra_priority           = 100,
496                 .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
497                                                 CRYPTO_ALG_ASYNC,
498                 .cra_blocksize          = CHKSUM_BLOCK_SIZE,
499                 .cra_ctxsize            = sizeof(struct bfin_crypto_crc_ctx),
500                 .cra_alignmask          = 3,
501                 .cra_module             = THIS_MODULE,
502                 .cra_init               = bfin_crypto_crc_cra_init,
503                 .cra_exit               = bfin_crypto_crc_cra_exit,
504         }
505 };
506
507 static void bfin_crypto_crc_done_task(unsigned long data)
508 {
509         struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
510
511         bfin_crypto_crc_handle_queue(crc, NULL);
512 }
513
514 static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
515 {
516         struct bfin_crypto_crc *crc = dev_id;
517         u32 reg;
518
519         if (readl(&crc->regs->status) & DCNTEXP) {
520                 writel(DCNTEXP, &crc->regs->status);
521
522                 /* prepare results */
523                 put_unaligned_le32(readl(&crc->regs->result),
524                         crc->req->result);
525
526                 reg = readl(&crc->regs->control);
527                 writel(reg & ~BLKEN, &crc->regs->control);
528                 crc->busy = 0;
529
530                 if (crc->req->base.complete)
531                         crc->req->base.complete(&crc->req->base, 0);
532
533                 tasklet_schedule(&crc->done_task);
534
535                 return IRQ_HANDLED;
536         } else
537                 return IRQ_NONE;
538 }
539
540 #ifdef CONFIG_PM
541 /**
542  *      bfin_crypto_crc_suspend - suspend crc device
543  *      @pdev: device being suspended
544  *      @state: requested suspend state
545  */
546 static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
547 {
548         struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
549         int i = 100000;
550
551         while ((readl(&crc->regs->control) & BLKEN) && --i)
552                 cpu_relax();
553
554         if (i == 0)
555                 return -EBUSY;
556
557         return 0;
558 }
559 #else
560 # define bfin_crypto_crc_suspend NULL
561 #endif
562
563 #define bfin_crypto_crc_resume NULL
564
565 /**
566  *      bfin_crypto_crc_probe - Initialize module
567  *
568  */
569 static int bfin_crypto_crc_probe(struct platform_device *pdev)
570 {
571         struct device *dev = &pdev->dev;
572         struct resource *res;
573         struct bfin_crypto_crc *crc;
574         unsigned int timeout = 100000;
575         int ret;
576
577         crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
578         if (!crc) {
579                 dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
580                 return -ENOMEM;
581         }
582
583         crc->dev = dev;
584
585         INIT_LIST_HEAD(&crc->list);
586         spin_lock_init(&crc->lock);
587         tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
588         crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
589
590         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
591         if (res == NULL) {
592                 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
593                 return -ENOENT;
594         }
595
596         crc->regs = devm_ioremap_resource(dev, res);
597         if (IS_ERR((void *)crc->regs)) {
598                 dev_err(&pdev->dev, "Cannot map CRC IO\n");
599                 return PTR_ERR((void *)crc->regs);
600         }
601
602         crc->irq = platform_get_irq(pdev, 0);
603         if (crc->irq < 0) {
604                 dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
605                 return -ENOENT;
606         }
607
608         ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
609                         IRQF_SHARED, dev_name(dev), crc);
610         if (ret) {
611                 dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
612                 return ret;
613         }
614
615         res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
616         if (res == NULL) {
617                 dev_err(&pdev->dev, "No CRC DMA channel specified\n");
618                 return -ENOENT;
619         }
620         crc->dma_ch = res->start;
621
622         ret = request_dma(crc->dma_ch, dev_name(dev));
623         if (ret) {
624                 dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
625                 return ret;
626         }
627
628         crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
629         if (crc->sg_cpu == NULL) {
630                 ret = -ENOMEM;
631                 goto out_error_dma;
632         }
633         /*
634          * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle  +
635          * 1 last + 1 next dma descriptors
636          */
637         crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
638         crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
639                         * ((CRC_MAX_DMA_DESC + 1) << 1);
640
641         writel(0, &crc->regs->control);
642         crc->poly = (u32)pdev->dev.platform_data;
643         writel(crc->poly, &crc->regs->poly);
644
645         while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
646                 cpu_relax();
647
648         if (timeout == 0)
649                 dev_info(&pdev->dev, "init crc poly timeout\n");
650
651         platform_set_drvdata(pdev, crc);
652
653         spin_lock(&crc_list.lock);
654         list_add(&crc->list, &crc_list.dev_list);
655         spin_unlock(&crc_list.lock);
656
657         if (list_is_singular(&crc_list.dev_list)) {
658                 ret = crypto_register_ahash(&algs);
659                 if (ret) {
660                         dev_err(&pdev->dev,
661                                 "Can't register crypto ahash device\n");
662                         goto out_error_dma;
663                 }
664         }
665
666         dev_info(&pdev->dev, "initialized\n");
667
668         return 0;
669
670 out_error_dma:
671         if (crc->sg_cpu)
672                 dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
673         free_dma(crc->dma_ch);
674
675         return ret;
676 }
677
678 /**
679  *      bfin_crypto_crc_remove - Initialize module
680  *
681  */
682 static int bfin_crypto_crc_remove(struct platform_device *pdev)
683 {
684         struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
685
686         if (!crc)
687                 return -ENODEV;
688
689         spin_lock(&crc_list.lock);
690         list_del(&crc->list);
691         spin_unlock(&crc_list.lock);
692
693         crypto_unregister_ahash(&algs);
694         tasklet_kill(&crc->done_task);
695         free_dma(crc->dma_ch);
696
697         return 0;
698 }
699
700 static struct platform_driver bfin_crypto_crc_driver = {
701         .probe     = bfin_crypto_crc_probe,
702         .remove    = bfin_crypto_crc_remove,
703         .suspend   = bfin_crypto_crc_suspend,
704         .resume    = bfin_crypto_crc_resume,
705         .driver    = {
706                 .name  = DRIVER_NAME,
707         },
708 };
709
710 /**
711  *      bfin_crypto_crc_mod_init - Initialize module
712  *
713  *      Checks the module params and registers the platform driver.
714  *      Real work is in the platform probe function.
715  */
716 static int __init bfin_crypto_crc_mod_init(void)
717 {
718         int ret;
719
720         pr_info("Blackfin hardware CRC crypto driver\n");
721
722         INIT_LIST_HEAD(&crc_list.dev_list);
723         spin_lock_init(&crc_list.lock);
724
725         ret = platform_driver_register(&bfin_crypto_crc_driver);
726         if (ret) {
727                 pr_err("unable to register driver\n");
728                 return ret;
729         }
730
731         return 0;
732 }
733
734 /**
735  *      bfin_crypto_crc_mod_exit - Deinitialize module
736  */
737 static void __exit bfin_crypto_crc_mod_exit(void)
738 {
739         platform_driver_unregister(&bfin_crypto_crc_driver);
740 }
741
742 module_init(bfin_crypto_crc_mod_init);
743 module_exit(bfin_crypto_crc_mod_exit);
744
745 MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
746 MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
747 MODULE_LICENSE("GPL");