2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
34 #include <linux/i7300_idle.h>
36 #include "registers.h"
39 #include "../dmaengine.h"
41 int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
47 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
49 * @data: interrupt data
51 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
53 struct ioatdma_device *instance = data;
54 struct ioat_chan_common *chan;
55 unsigned long attnstatus;
59 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
61 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
64 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
65 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
69 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
70 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
71 chan = ioat_chan_by_index(instance, bit);
72 if (test_bit(IOAT_RUN, &chan->state))
73 tasklet_schedule(&chan->cleanup_task);
76 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
81 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
83 * @data: interrupt data
85 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
87 struct ioat_chan_common *chan = data;
89 if (test_bit(IOAT_RUN, &chan->state))
90 tasklet_schedule(&chan->cleanup_task);
95 /* common channel initialization */
96 void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
98 struct dma_device *dma = &device->common;
99 struct dma_chan *c = &chan->common;
100 unsigned long data = (unsigned long) c;
102 chan->device = device;
103 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
104 spin_lock_init(&chan->cleanup_lock);
105 chan->common.device = dma;
106 dma_cookie_init(&chan->common);
107 list_add_tail(&chan->common.device_node, &dma->channels);
108 device->idx[idx] = chan;
109 init_timer(&chan->timer);
110 chan->timer.function = device->timer_fn;
111 chan->timer.data = data;
112 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
115 void ioat_stop(struct ioat_chan_common *chan)
117 struct ioatdma_device *device = chan->device;
118 struct pci_dev *pdev = device->pdev;
119 int chan_id = chan_num(chan);
120 struct msix_entry *msix;
122 /* 1/ stop irq from firing tasklets
123 * 2/ stop the tasklet from re-arming irqs
125 clear_bit(IOAT_RUN, &chan->state);
127 /* flush inflight interrupts */
128 switch (device->irq_mode) {
130 msix = &device->msix_entries[chan_id];
131 synchronize_irq(msix->vector);
135 synchronize_irq(pdev->irq);
141 /* flush inflight timers */
142 del_timer_sync(&chan->timer);
144 /* flush inflight tasklet runs */
145 tasklet_kill(&chan->cleanup_task);
147 /* final cleanup now that everything is quiesced and can't re-arm */
148 device->cleanup_fn((unsigned long) &chan->common);
151 dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
153 dma_addr_t phys_complete;
156 completion = *chan->completion;
157 phys_complete = ioat_chansts_to_addr(completion);
159 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
160 (unsigned long long) phys_complete);
162 if (is_ioat_halted(completion)) {
163 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
164 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
167 /* TODO do something to salvage the situation */
170 return phys_complete;
173 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
174 dma_addr_t *phys_complete)
176 *phys_complete = ioat_get_current_completion(chan);
177 if (*phys_complete == chan->last_completion)
179 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
180 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
186 ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
187 struct dma_tx_state *txstate)
189 struct ioat_chan_common *chan = to_chan_common(c);
190 struct ioatdma_device *device = chan->device;
193 ret = dma_cookie_status(c, cookie, txstate);
194 if (ret == DMA_COMPLETE)
197 device->cleanup_fn((unsigned long) c);
199 return dma_cookie_status(c, cookie, txstate);
203 * Perform a IOAT transaction to verify the HW works.
205 #define IOAT_TEST_SIZE 2000
207 static void ioat_dma_test_callback(void *dma_async_param)
209 struct completion *cmp = dma_async_param;
215 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
216 * @device: device to be tested
218 int ioat_dma_self_test(struct ioatdma_device *device)
223 struct dma_device *dma = &device->common;
224 struct device *dev = &device->pdev->dev;
225 struct dma_chan *dma_chan;
226 struct dma_async_tx_descriptor *tx;
227 dma_addr_t dma_dest, dma_src;
230 struct completion cmp;
234 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
237 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
243 /* Fill in src buffer */
244 for (i = 0; i < IOAT_TEST_SIZE; i++)
247 /* Start copy, using first DMA channel */
248 dma_chan = container_of(dma->channels.next, struct dma_chan,
250 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
251 dev_err(dev, "selftest cannot allocate chan resource\n");
256 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
257 if (dma_mapping_error(dev, dma_src)) {
258 dev_err(dev, "mapping src buffer failed\n");
261 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
262 if (dma_mapping_error(dev, dma_dest)) {
263 dev_err(dev, "mapping dest buffer failed\n");
266 flags = DMA_PREP_INTERRUPT;
267 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
268 IOAT_TEST_SIZE, flags);
270 dev_err(dev, "Self-test prep failed, disabling\n");
276 init_completion(&cmp);
277 tx->callback = ioat_dma_test_callback;
278 tx->callback_param = &cmp;
279 cookie = tx->tx_submit(tx);
281 dev_err(dev, "Self-test setup failed, disabling\n");
285 dma->device_issue_pending(dma_chan);
287 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
290 dma->device_tx_status(dma_chan, cookie, NULL)
292 dev_err(dev, "Self-test copy timed out, disabling\n");
296 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
297 dev_err(dev, "Self-test copy failed compare, disabling\n");
303 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
305 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
307 dma->device_free_chan_resources(dma_chan);
314 static char ioat_interrupt_style[32] = "msix";
315 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
316 sizeof(ioat_interrupt_style), 0644);
317 MODULE_PARM_DESC(ioat_interrupt_style,
318 "set ioat interrupt style: msix (default), msi, intx");
321 * ioat_dma_setup_interrupts - setup interrupt handler
322 * @device: ioat device
324 int ioat_dma_setup_interrupts(struct ioatdma_device *device)
326 struct ioat_chan_common *chan;
327 struct pci_dev *pdev = device->pdev;
328 struct device *dev = &pdev->dev;
329 struct msix_entry *msix;
334 if (!strcmp(ioat_interrupt_style, "msix"))
336 if (!strcmp(ioat_interrupt_style, "msi"))
338 if (!strcmp(ioat_interrupt_style, "intx"))
340 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
344 /* The number of MSI-X vectors should equal the number of channels */
345 msixcnt = device->common.chancnt;
346 for (i = 0; i < msixcnt; i++)
347 device->msix_entries[i].entry = i;
349 err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
353 for (i = 0; i < msixcnt; i++) {
354 msix = &device->msix_entries[i];
355 chan = ioat_chan_by_index(device, i);
356 err = devm_request_irq(dev, msix->vector,
357 ioat_dma_do_interrupt_msix, 0,
360 for (j = 0; j < i; j++) {
361 msix = &device->msix_entries[j];
362 chan = ioat_chan_by_index(device, j);
363 devm_free_irq(dev, msix->vector, chan);
368 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
369 device->irq_mode = IOAT_MSIX;
373 err = pci_enable_msi(pdev);
377 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
380 pci_disable_msi(pdev);
383 device->irq_mode = IOAT_MSI;
387 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
388 IRQF_SHARED, "ioat-intx", device);
392 device->irq_mode = IOAT_INTX;
394 if (device->intr_quirk)
395 device->intr_quirk(device);
396 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
397 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
401 /* Disable all interrupt generation */
402 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
403 device->irq_mode = IOAT_NOIRQ;
404 dev_err(dev, "no usable interrupts\n");
407 EXPORT_SYMBOL(ioat_dma_setup_interrupts);
409 static void ioat_disable_interrupts(struct ioatdma_device *device)
411 /* Disable all interrupt generation */
412 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
415 int ioat_probe(struct ioatdma_device *device)
418 struct dma_device *dma = &device->common;
419 struct pci_dev *pdev = device->pdev;
420 struct device *dev = &pdev->dev;
422 /* DMA coherent memory pool for DMA descriptor allocations */
423 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
424 sizeof(struct ioat_dma_descriptor),
426 if (!device->dma_pool) {
431 device->completion_pool = pci_pool_create("completion_pool", pdev,
432 sizeof(u64), SMP_CACHE_BYTES,
435 if (!device->completion_pool) {
437 goto err_completion_pool;
440 device->enumerate_channels(device);
442 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
443 dma->dev = &pdev->dev;
446 dev_err(dev, "channel enumeration error\n");
447 goto err_setup_interrupts;
450 err = ioat_dma_setup_interrupts(device);
452 goto err_setup_interrupts;
454 err = device->self_test(device);
461 ioat_disable_interrupts(device);
462 err_setup_interrupts:
463 pci_pool_destroy(device->completion_pool);
465 pci_pool_destroy(device->dma_pool);
470 int ioat_register(struct ioatdma_device *device)
472 int err = dma_async_device_register(&device->common);
475 ioat_disable_interrupts(device);
476 pci_pool_destroy(device->completion_pool);
477 pci_pool_destroy(device->dma_pool);
483 static ssize_t cap_show(struct dma_chan *c, char *page)
485 struct dma_device *dma = c->device;
487 return sprintf(page, "copy%s%s%s%s%s\n",
488 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
489 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
490 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
491 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
492 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
495 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
497 static ssize_t version_show(struct dma_chan *c, char *page)
499 struct dma_device *dma = c->device;
500 struct ioatdma_device *device = to_ioatdma_device(dma);
502 return sprintf(page, "%d.%d\n",
503 device->version >> 4, device->version & 0xf);
505 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
508 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
510 struct ioat_sysfs_entry *entry;
511 struct ioat_chan_common *chan;
513 entry = container_of(attr, struct ioat_sysfs_entry, attr);
514 chan = container_of(kobj, struct ioat_chan_common, kobj);
518 return entry->show(&chan->common, page);
521 const struct sysfs_ops ioat_sysfs_ops = {
522 .show = ioat_attr_show,
525 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
527 struct dma_device *dma = &device->common;
530 list_for_each_entry(c, &dma->channels, device_node) {
531 struct ioat_chan_common *chan = to_chan_common(c);
532 struct kobject *parent = &c->dev->device.kobj;
535 err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
537 dev_warn(to_dev(chan),
538 "sysfs init error (%d), continuing...\n", err);
539 kobject_put(&chan->kobj);
540 set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
545 void ioat_kobject_del(struct ioatdma_device *device)
547 struct dma_device *dma = &device->common;
550 list_for_each_entry(c, &dma->channels, device_node) {
551 struct ioat_chan_common *chan = to_chan_common(c);
553 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
554 kobject_del(&chan->kobj);
555 kobject_put(&chan->kobj);
560 void ioat_dma_remove(struct ioatdma_device *device)
562 struct dma_device *dma = &device->common;
564 ioat_disable_interrupts(device);
566 ioat_kobject_del(device);
568 dma_async_device_unregister(dma);
570 pci_pool_destroy(device->dma_pool);
571 pci_pool_destroy(device->completion_pool);
573 INIT_LIST_HEAD(&dma->channels);