2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/hw_random.h>
22 #include <linux/cpu.h>
24 #include <asm/cpu_device_id.h>
26 #include <linux/ccp.h>
30 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
31 MODULE_LICENSE("GPL");
32 MODULE_VERSION("1.0.0");
33 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
35 struct ccp_tasklet_data {
36 struct completion completion;
41 static struct ccp_device *ccp_dev;
42 static inline struct ccp_device *ccp_get_device(void)
47 static inline void ccp_add_device(struct ccp_device *ccp)
52 static inline void ccp_del_device(struct ccp_device *ccp)
58 * ccp_present - check if a CCP device is present
60 * Returns zero if a CCP device is present, -ENODEV otherwise.
69 EXPORT_SYMBOL_GPL(ccp_present);
72 * ccp_enqueue_cmd - queue an operation for processing by the CCP
74 * @cmd: ccp_cmd struct to be processed
76 * Queue a cmd to be processed by the CCP. If queueing the cmd
77 * would exceed the defined length of the cmd queue the cmd will
78 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
79 * result in a return code of -EBUSY.
81 * The callback routine specified in the ccp_cmd struct will be
82 * called to notify the caller of completion (if the cmd was not
83 * backlogged) or advancement out of the backlog. If the cmd has
84 * advanced out of the backlog the "err" value of the callback
85 * will be -EINPROGRESS. Any other "err" value during callback is
86 * the result of the operation.
88 * The cmd has been successfully queued if:
89 * the return code is -EINPROGRESS or
90 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
92 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
94 struct ccp_device *ccp = ccp_get_device();
102 /* Caller must supply a callback routine */
108 spin_lock_irqsave(&ccp->cmd_lock, flags);
110 i = ccp->cmd_q_count;
112 if (ccp->cmd_count >= MAX_CMD_QLEN) {
114 if (cmd->flags & CCP_CMD_MAY_BACKLOG)
115 list_add_tail(&cmd->entry, &ccp->backlog);
119 list_add_tail(&cmd->entry, &ccp->cmd);
121 /* Find an idle queue */
122 if (!ccp->suspending) {
123 for (i = 0; i < ccp->cmd_q_count; i++) {
124 if (ccp->cmd_q[i].active)
132 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
134 /* If we found an idle queue, wake it up */
135 if (i < ccp->cmd_q_count)
136 wake_up_process(ccp->cmd_q[i].kthread);
140 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
142 static void ccp_do_cmd_backlog(struct work_struct *work)
144 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
145 struct ccp_device *ccp = cmd->ccp;
149 cmd->callback(cmd->data, -EINPROGRESS);
151 spin_lock_irqsave(&ccp->cmd_lock, flags);
154 list_add_tail(&cmd->entry, &ccp->cmd);
156 /* Find an idle queue */
157 for (i = 0; i < ccp->cmd_q_count; i++) {
158 if (ccp->cmd_q[i].active)
164 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
166 /* If we found an idle queue, wake it up */
167 if (i < ccp->cmd_q_count)
168 wake_up_process(ccp->cmd_q[i].kthread);
171 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
173 struct ccp_device *ccp = cmd_q->ccp;
174 struct ccp_cmd *cmd = NULL;
175 struct ccp_cmd *backlog = NULL;
178 spin_lock_irqsave(&ccp->cmd_lock, flags);
182 if (ccp->suspending) {
183 cmd_q->suspended = 1;
185 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
186 wake_up_interruptible(&ccp->suspend_queue);
191 if (ccp->cmd_count) {
194 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
195 list_del(&cmd->entry);
200 if (!list_empty(&ccp->backlog)) {
201 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
203 list_del(&backlog->entry);
206 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
209 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
210 schedule_work(&backlog->work);
216 static void ccp_do_cmd_complete(unsigned long data)
218 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
219 struct ccp_cmd *cmd = tdata->cmd;
221 cmd->callback(cmd->data, cmd->ret);
222 complete(&tdata->completion);
225 static int ccp_cmd_queue_thread(void *data)
227 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
229 struct ccp_tasklet_data tdata;
230 struct tasklet_struct tasklet;
232 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
234 set_current_state(TASK_INTERRUPTIBLE);
235 while (!kthread_should_stop()) {
238 set_current_state(TASK_INTERRUPTIBLE);
240 cmd = ccp_dequeue_cmd(cmd_q);
244 __set_current_state(TASK_RUNNING);
246 /* Execute the command */
247 cmd->ret = ccp_run_cmd(cmd_q, cmd);
249 /* Schedule the completion callback */
251 init_completion(&tdata.completion);
252 tasklet_schedule(&tasklet);
253 wait_for_completion(&tdata.completion);
256 __set_current_state(TASK_RUNNING);
261 static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
263 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
265 int len = min_t(int, sizeof(trng_value), max);
268 * Locking is provided by the caller so we can update device
269 * hwrng-related fields safely
271 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
273 /* Zero is returned if not data is available or if a
274 * bad-entropy error is present. Assume an error if
275 * we exceed TRNG_RETRIES reads of zero.
277 if (ccp->hwrng_retries++ > TRNG_RETRIES)
283 /* Reset the counter and save the rng value */
284 ccp->hwrng_retries = 0;
285 memcpy(data, &trng_value, len);
291 * ccp_alloc_struct - allocate and initialize the ccp_device struct
293 * @dev: device struct of the CCP
295 struct ccp_device *ccp_alloc_struct(struct device *dev)
297 struct ccp_device *ccp;
299 ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
301 dev_err(dev, "unable to allocate device struct\n");
306 INIT_LIST_HEAD(&ccp->cmd);
307 INIT_LIST_HEAD(&ccp->backlog);
309 spin_lock_init(&ccp->cmd_lock);
310 mutex_init(&ccp->req_mutex);
311 mutex_init(&ccp->ksb_mutex);
312 ccp->ksb_count = KSB_COUNT;
319 * ccp_init - initialize the CCP device
321 * @ccp: ccp_device struct
323 int ccp_init(struct ccp_device *ccp)
325 struct device *dev = ccp->dev;
326 struct ccp_cmd_queue *cmd_q;
327 struct dma_pool *dma_pool;
328 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
329 unsigned int qmr, qim, i;
332 /* Find available queues */
334 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
335 for (i = 0; i < MAX_HW_QUEUES; i++) {
336 if (!(qmr & (1 << i)))
339 /* Allocate a dma pool for this queue */
340 snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
341 dma_pool = dma_pool_create(dma_pool_name, dev,
342 CCP_DMAPOOL_MAX_SIZE,
343 CCP_DMAPOOL_ALIGN, 0);
345 dev_err(dev, "unable to allocate dma pool\n");
350 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
355 cmd_q->dma_pool = dma_pool;
357 /* Reserve 2 KSB regions for the queue */
358 cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
359 cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
362 /* Preset some register values and masks that are queue
365 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
366 (CMD_Q_STATUS_INCR * i);
367 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
368 (CMD_Q_STATUS_INCR * i);
369 cmd_q->int_ok = 1 << (i * 2);
370 cmd_q->int_err = 1 << ((i * 2) + 1);
372 cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
374 init_waitqueue_head(&cmd_q->int_queue);
376 /* Build queue interrupt mask (two interrupts per queue) */
377 qim |= cmd_q->int_ok | cmd_q->int_err;
380 /* For arm64 set the recommended queue cache settings */
381 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
382 (CMD_Q_CACHE_INC * i));
385 dev_dbg(dev, "queue #%u available\n", i);
387 if (ccp->cmd_q_count == 0) {
388 dev_notice(dev, "no command queues available\n");
392 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
394 /* Disable and clear interrupts until ready */
395 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
396 for (i = 0; i < ccp->cmd_q_count; i++) {
397 cmd_q = &ccp->cmd_q[i];
399 ioread32(cmd_q->reg_int_status);
400 ioread32(cmd_q->reg_status);
402 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
405 ret = ccp->get_irq(ccp);
407 dev_err(dev, "unable to allocate an IRQ\n");
411 /* Initialize the queues used to wait for KSB space and suspend */
412 init_waitqueue_head(&ccp->ksb_queue);
413 init_waitqueue_head(&ccp->suspend_queue);
415 /* Create a kthread for each queue */
416 for (i = 0; i < ccp->cmd_q_count; i++) {
417 struct task_struct *kthread;
419 cmd_q = &ccp->cmd_q[i];
421 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
422 "ccp-q%u", cmd_q->id);
423 if (IS_ERR(kthread)) {
424 dev_err(dev, "error creating queue thread (%ld)\n",
426 ret = PTR_ERR(kthread);
430 cmd_q->kthread = kthread;
431 wake_up_process(kthread);
434 /* Register the RNG */
435 ccp->hwrng.name = "ccp-rng";
436 ccp->hwrng.read = ccp_trng_read;
437 ret = hwrng_register(&ccp->hwrng);
439 dev_err(dev, "error registering hwrng (%d)\n", ret);
443 /* Make the device struct available before enabling interrupts */
446 /* Enable interrupts */
447 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
452 for (i = 0; i < ccp->cmd_q_count; i++)
453 if (ccp->cmd_q[i].kthread)
454 kthread_stop(ccp->cmd_q[i].kthread);
459 for (i = 0; i < ccp->cmd_q_count; i++)
460 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
466 * ccp_destroy - tear down the CCP device
468 * @ccp: ccp_device struct
470 void ccp_destroy(struct ccp_device *ccp)
472 struct ccp_cmd_queue *cmd_q;
476 /* Remove general access to the device struct */
479 /* Unregister the RNG */
480 hwrng_unregister(&ccp->hwrng);
482 /* Stop the queue kthreads */
483 for (i = 0; i < ccp->cmd_q_count; i++)
484 if (ccp->cmd_q[i].kthread)
485 kthread_stop(ccp->cmd_q[i].kthread);
487 /* Build queue interrupt mask (two interrupt masks per queue) */
489 for (i = 0; i < ccp->cmd_q_count; i++) {
490 cmd_q = &ccp->cmd_q[i];
491 qim |= cmd_q->int_ok | cmd_q->int_err;
494 /* Disable and clear interrupts */
495 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
496 for (i = 0; i < ccp->cmd_q_count; i++) {
497 cmd_q = &ccp->cmd_q[i];
499 ioread32(cmd_q->reg_int_status);
500 ioread32(cmd_q->reg_status);
502 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
506 for (i = 0; i < ccp->cmd_q_count; i++)
507 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
509 /* Flush the cmd and backlog queue */
510 while (!list_empty(&ccp->cmd)) {
511 /* Invoke the callback directly with an error code */
512 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
513 list_del(&cmd->entry);
514 cmd->callback(cmd->data, -ENODEV);
516 while (!list_empty(&ccp->backlog)) {
517 /* Invoke the callback directly with an error code */
518 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
519 list_del(&cmd->entry);
520 cmd->callback(cmd->data, -ENODEV);
525 * ccp_irq_handler - handle interrupts generated by the CCP device
527 * @irq: the irq associated with the interrupt
528 * @data: the data value supplied when the irq was created
530 irqreturn_t ccp_irq_handler(int irq, void *data)
532 struct device *dev = data;
533 struct ccp_device *ccp = dev_get_drvdata(dev);
534 struct ccp_cmd_queue *cmd_q;
538 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
540 for (i = 0; i < ccp->cmd_q_count; i++) {
541 cmd_q = &ccp->cmd_q[i];
543 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
545 cmd_q->int_status = status;
546 cmd_q->q_status = ioread32(cmd_q->reg_status);
547 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
549 /* On error, only save the first error value */
550 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
551 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
555 /* Acknowledge the interrupt and wake the kthread */
556 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
557 wake_up_interruptible(&cmd_q->int_queue);
565 bool ccp_queues_suspended(struct ccp_device *ccp)
567 unsigned int suspended = 0;
571 spin_lock_irqsave(&ccp->cmd_lock, flags);
573 for (i = 0; i < ccp->cmd_q_count; i++)
574 if (ccp->cmd_q[i].suspended)
577 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
579 return ccp->cmd_q_count == suspended;
584 static const struct x86_cpu_id ccp_support[] = {
585 { X86_VENDOR_AMD, 22, },
589 static int __init ccp_mod_init(void)
592 struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
595 if (!x86_match_cpu(ccp_support))
598 switch (cpuinfo->x86) {
600 if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
603 ret = ccp_pci_init();
607 /* Don't leave the driver loaded if init failed */
608 if (!ccp_get_device()) {
622 ret = ccp_platform_init();
626 /* Don't leave the driver loaded if init failed */
627 if (!ccp_get_device()) {
638 static void __exit ccp_mod_exit(void)
641 struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
643 switch (cpuinfo->x86) {
655 module_init(ccp_mod_init);
656 module_exit(ccp_mod_exit);