4 * Support Blackfin CRC HW acceleration.
6 * Copyright 2012 Analog Devices Inc.
8 * Licensed under the GPL-2.
11 #include <linux/err.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/scatterlist.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
24 #include <linux/crypto.h>
25 #include <linux/cryptohash.h>
26 #include <crypto/scatterwalk.h>
27 #include <crypto/algapi.h>
28 #include <crypto/hash.h>
29 #include <crypto/internal/hash.h>
30 #include <asm/unaligned.h>
33 #include <asm/portmux.h>
38 #define CRC_CCRYPTO_QUEUE_LENGTH 5
40 #define DRIVER_NAME "bfin-hmac-crc"
41 #define CHKSUM_DIGEST_SIZE 4
42 #define CHKSUM_BLOCK_SIZE 1
44 #define CRC_MAX_DMA_DESC 100
46 #define CRC_CRYPTO_STATE_UPDATE 1
47 #define CRC_CRYPTO_STATE_FINALUPDATE 2
48 #define CRC_CRYPTO_STATE_FINISH 3
50 struct bfin_crypto_crc {
51 struct list_head list;
58 struct crc_register *regs;
60 struct ahash_request *req; /* current request in operation */
61 struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
62 dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
64 dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */
66 struct tasklet_struct done_task;
67 struct crypto_queue queue; /* waiting requests */
69 u8 busy:1; /* crc device in operation flag */
72 static struct bfin_crypto_crc_list {
73 struct list_head dev_list;
77 struct bfin_crypto_crc_reqctx {
78 struct bfin_crypto_crc *crc;
80 unsigned int total; /* total request bytes */
81 size_t sg_buflen; /* bytes for this update */
82 unsigned int sg_nents;
83 struct scatterlist *sg; /* sg list head for this update*/
84 struct scatterlist bufsl[2]; /* chained sg list */
88 u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
89 u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
94 struct bfin_crypto_crc_ctx {
95 struct bfin_crypto_crc *crc;
100 * get element in scatter list by given index
102 static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
105 struct scatterlist *sg = NULL;
108 for_each_sg(sg_list, sg, nents, i)
115 static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
117 writel(0, &crc->regs->datacntrld);
118 writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
119 writel(key, &crc->regs->curresult);
121 /* setup CRC interrupts */
122 writel(CMPERRI | DCNTEXPI, &crc->regs->status);
123 writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
128 static int bfin_crypto_crc_init(struct ahash_request *req)
130 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
131 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
132 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
133 struct bfin_crypto_crc *crc;
135 dev_dbg(ctx->crc->dev, "crc_init\n");
136 spin_lock_bh(&crc_list.lock);
137 list_for_each_entry(crc, &crc_list.dev_list, list) {
141 spin_unlock_bh(&crc_list.lock);
143 if (sg_nents(req->src) > CRC_MAX_DMA_DESC) {
144 dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
150 ctx->bufnext_len = 0;
151 ctx->buflast_len = 0;
156 /* init crc results */
157 put_unaligned_le32(crc_ctx->key, req->result);
159 dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
160 crypto_ahash_digestsize(tfm));
162 return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
165 static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
167 struct scatterlist *sg;
168 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
170 unsigned long dma_config;
171 unsigned int dma_count;
172 unsigned int dma_addr;
173 unsigned int mid_dma_count = 0;
176 dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
178 for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
179 dma_addr = sg_dma_address(sg);
180 /* deduce extra bytes in last sg */
182 dma_count = sg_dma_len(sg) - ctx->bufnext_len;
184 dma_count = sg_dma_len(sg);
187 /* Append last middle dma buffer to 4 bytes with first
188 bytes in current sg buffer. Move addr of current
189 sg and deduce the length of current sg.
191 memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
193 CHKSUM_DIGEST_SIZE - mid_dma_count);
194 dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
195 dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
197 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
198 DMAEN | PSIZE_32 | WDSIZE_32;
200 /* setup new dma descriptor for next middle dma */
201 crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
202 crc->sg_cpu[i].cfg = dma_config;
203 crc->sg_cpu[i].x_count = 1;
204 crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
205 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
206 "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
207 i, crc->sg_cpu[i].start_addr,
208 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
209 crc->sg_cpu[i].x_modify);
213 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
214 /* chop current sg dma len to multiple of 32 bits */
215 mid_dma_count = dma_count % 4;
218 if (dma_addr % 4 == 0) {
219 dma_config |= WDSIZE_32;
222 } else if (dma_addr % 2 == 0) {
223 dma_config |= WDSIZE_16;
227 dma_config |= WDSIZE_8;
231 crc->sg_cpu[i].start_addr = dma_addr;
232 crc->sg_cpu[i].cfg = dma_config;
233 crc->sg_cpu[i].x_count = dma_count;
234 crc->sg_cpu[i].x_modify = dma_mod;
235 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
236 "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
237 i, crc->sg_cpu[i].start_addr,
238 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
239 crc->sg_cpu[i].x_modify);
243 /* copy extra bytes to next middle dma buffer */
244 memcpy(crc->sg_mid_buf + (i << 2),
245 (u8*)sg_virt(sg) + (dma_count << 2),
250 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
251 /* For final update req, append the buffer for next update as well*/
252 if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
253 ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
254 crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
255 CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
256 crc->sg_cpu[i].cfg = dma_config;
257 crc->sg_cpu[i].x_count = 1;
258 crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
259 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
260 "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
261 i, crc->sg_cpu[i].start_addr,
262 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
263 crc->sg_cpu[i].x_modify);
270 /* Set the last descriptor to stop mode */
271 crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
272 crc->sg_cpu[i - 1].cfg |= DI_EN;
273 set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
274 set_dma_x_count(crc->dma_ch, 0);
275 set_dma_x_modify(crc->dma_ch, 0);
276 set_dma_config(crc->dma_ch, dma_config);
279 static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
280 struct ahash_request *req)
282 struct crypto_async_request *async_req, *backlog;
283 struct bfin_crypto_crc_reqctx *ctx;
284 struct scatterlist *sg;
287 unsigned int nextlen;
291 spin_lock_irqsave(&crc->lock, flags);
293 ret = ahash_enqueue_request(&crc->queue, req);
295 spin_unlock_irqrestore(&crc->lock, flags);
298 backlog = crypto_get_backlog(&crc->queue);
299 async_req = crypto_dequeue_request(&crc->queue);
302 spin_unlock_irqrestore(&crc->lock, flags);
308 backlog->complete(backlog, -EINPROGRESS);
310 req = ahash_request_cast(async_req);
312 ctx = ahash_request_ctx(req);
317 dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
318 ctx->flag, req->nbytes);
320 if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
321 if (ctx->bufnext_len == 0) {
326 /* Pack last crc update buffer to 32bit */
327 memset(ctx->bufnext + ctx->bufnext_len, 0,
328 CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
330 /* Pack small data which is less than 32bit to buffer for next update. */
331 if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
332 memcpy(ctx->bufnext + ctx->bufnext_len,
333 sg_virt(req->src), req->nbytes);
334 ctx->bufnext_len += req->nbytes;
335 if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
344 if (ctx->bufnext_len) {
345 /* Chain in extra bytes of last update */
346 ctx->buflast_len = ctx->bufnext_len;
347 memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
349 nsg = ctx->sg_buflen ? 2 : 1;
350 sg_init_table(ctx->bufsl, nsg);
351 sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
353 sg_chain(ctx->bufsl, nsg, req->src);
354 ctx->sg = ctx->bufsl;
358 /* Chop crc buffer size to multiple of 32 bit */
359 nsg = sg_nents(ctx->sg);
361 ctx->sg_buflen = ctx->buflast_len + req->nbytes;
362 ctx->bufnext_len = ctx->sg_buflen % 4;
363 ctx->sg_buflen &= ~0x3;
365 if (ctx->bufnext_len) {
366 /* copy extra bytes to buffer for next update */
367 memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
368 nextlen = ctx->bufnext_len;
369 for (i = nsg - 1; i >= 0; i--) {
370 sg = sg_get(ctx->sg, nsg, i);
371 j = min(nextlen, sg_dma_len(sg));
372 memcpy(ctx->bufnext + nextlen - j,
373 sg_virt(sg) + sg_dma_len(sg) - j, j);
374 if (j == sg_dma_len(sg))
384 if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
385 ctx->flag == CRC_CRYPTO_STATE_FINISH))
386 ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
388 /* set CRC data count before start DMA */
389 writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
391 /* setup and enable CRC DMA */
392 bfin_crypto_crc_config_dma(crc);
394 /* finally kick off CRC operation */
395 reg = readl(&crc->regs->control);
396 writel(reg | BLKEN, &crc->regs->control);
401 static int bfin_crypto_crc_update(struct ahash_request *req)
403 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
408 dev_dbg(ctx->crc->dev, "crc_update\n");
409 ctx->total += req->nbytes;
410 ctx->flag = CRC_CRYPTO_STATE_UPDATE;
412 return bfin_crypto_crc_handle_queue(ctx->crc, req);
415 static int bfin_crypto_crc_final(struct ahash_request *req)
417 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
418 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
419 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
421 dev_dbg(ctx->crc->dev, "crc_final\n");
422 ctx->flag = CRC_CRYPTO_STATE_FINISH;
425 return bfin_crypto_crc_handle_queue(ctx->crc, req);
428 static int bfin_crypto_crc_finup(struct ahash_request *req)
430 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
431 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
432 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
434 dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
435 ctx->total += req->nbytes;
436 ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
439 return bfin_crypto_crc_handle_queue(ctx->crc, req);
442 static int bfin_crypto_crc_digest(struct ahash_request *req)
446 ret = bfin_crypto_crc_init(req);
450 return bfin_crypto_crc_finup(req);
453 static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
456 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
458 dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
459 if (keylen != CHKSUM_DIGEST_SIZE) {
460 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
464 crc_ctx->key = get_unaligned_le32(key);
469 static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
471 struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
474 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
475 sizeof(struct bfin_crypto_crc_reqctx));
480 static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
484 static struct ahash_alg algs = {
485 .init = bfin_crypto_crc_init,
486 .update = bfin_crypto_crc_update,
487 .final = bfin_crypto_crc_final,
488 .finup = bfin_crypto_crc_finup,
489 .digest = bfin_crypto_crc_digest,
490 .setkey = bfin_crypto_crc_setkey,
491 .halg.digestsize = CHKSUM_DIGEST_SIZE,
493 .cra_name = "hmac(crc32)",
494 .cra_driver_name = DRIVER_NAME,
496 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
498 .cra_blocksize = CHKSUM_BLOCK_SIZE,
499 .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
501 .cra_module = THIS_MODULE,
502 .cra_init = bfin_crypto_crc_cra_init,
503 .cra_exit = bfin_crypto_crc_cra_exit,
507 static void bfin_crypto_crc_done_task(unsigned long data)
509 struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
511 bfin_crypto_crc_handle_queue(crc, NULL);
514 static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
516 struct bfin_crypto_crc *crc = dev_id;
519 if (readl(&crc->regs->status) & DCNTEXP) {
520 writel(DCNTEXP, &crc->regs->status);
522 /* prepare results */
523 put_unaligned_le32(readl(&crc->regs->result),
526 reg = readl(&crc->regs->control);
527 writel(reg & ~BLKEN, &crc->regs->control);
530 if (crc->req->base.complete)
531 crc->req->base.complete(&crc->req->base, 0);
533 tasklet_schedule(&crc->done_task);
542 * bfin_crypto_crc_suspend - suspend crc device
543 * @pdev: device being suspended
544 * @state: requested suspend state
546 static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
548 struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
551 while ((readl(&crc->regs->control) & BLKEN) && --i)
560 # define bfin_crypto_crc_suspend NULL
563 #define bfin_crypto_crc_resume NULL
566 * bfin_crypto_crc_probe - Initialize module
569 static int bfin_crypto_crc_probe(struct platform_device *pdev)
571 struct device *dev = &pdev->dev;
572 struct resource *res;
573 struct bfin_crypto_crc *crc;
574 unsigned int timeout = 100000;
577 crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
579 dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
585 INIT_LIST_HEAD(&crc->list);
586 spin_lock_init(&crc->lock);
587 tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
588 crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
590 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
592 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
596 crc->regs = devm_ioremap_resource(dev, res);
597 if (IS_ERR((void *)crc->regs)) {
598 dev_err(&pdev->dev, "Cannot map CRC IO\n");
599 return PTR_ERR((void *)crc->regs);
602 crc->irq = platform_get_irq(pdev, 0);
604 dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
608 ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
609 IRQF_SHARED, dev_name(dev), crc);
611 dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
615 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
617 dev_err(&pdev->dev, "No CRC DMA channel specified\n");
620 crc->dma_ch = res->start;
622 ret = request_dma(crc->dma_ch, dev_name(dev));
624 dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
628 crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
629 if (crc->sg_cpu == NULL) {
634 * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle +
635 * 1 last + 1 next dma descriptors
637 crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
638 crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
639 * ((CRC_MAX_DMA_DESC + 1) << 1);
641 writel(0, &crc->regs->control);
642 crc->poly = (u32)pdev->dev.platform_data;
643 writel(crc->poly, &crc->regs->poly);
645 while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
649 dev_info(&pdev->dev, "init crc poly timeout\n");
651 platform_set_drvdata(pdev, crc);
653 spin_lock(&crc_list.lock);
654 list_add(&crc->list, &crc_list.dev_list);
655 spin_unlock(&crc_list.lock);
657 if (list_is_singular(&crc_list.dev_list)) {
658 ret = crypto_register_ahash(&algs);
661 "Can't register crypto ahash device\n");
666 dev_info(&pdev->dev, "initialized\n");
672 dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
673 free_dma(crc->dma_ch);
679 * bfin_crypto_crc_remove - Initialize module
682 static int bfin_crypto_crc_remove(struct platform_device *pdev)
684 struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
689 spin_lock(&crc_list.lock);
690 list_del(&crc->list);
691 spin_unlock(&crc_list.lock);
693 crypto_unregister_ahash(&algs);
694 tasklet_kill(&crc->done_task);
695 free_dma(crc->dma_ch);
700 static struct platform_driver bfin_crypto_crc_driver = {
701 .probe = bfin_crypto_crc_probe,
702 .remove = bfin_crypto_crc_remove,
703 .suspend = bfin_crypto_crc_suspend,
704 .resume = bfin_crypto_crc_resume,
711 * bfin_crypto_crc_mod_init - Initialize module
713 * Checks the module params and registers the platform driver.
714 * Real work is in the platform probe function.
716 static int __init bfin_crypto_crc_mod_init(void)
720 pr_info("Blackfin hardware CRC crypto driver\n");
722 INIT_LIST_HEAD(&crc_list.dev_list);
723 spin_lock_init(&crc_list.lock);
725 ret = platform_driver_register(&bfin_crypto_crc_driver);
727 pr_err("unable to register driver\n");
735 * bfin_crypto_crc_mod_exit - Deinitialize module
737 static void __exit bfin_crypto_crc_mod_exit(void)
739 platform_driver_unregister(&bfin_crypto_crc_driver);
742 module_init(bfin_crypto_crc_mod_init);
743 module_exit(bfin_crypto_crc_mod_exit);
745 MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
746 MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
747 MODULE_LICENSE("GPL");