]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/dma/ioat/dma.c
dmaengine: ioatdma: remove ioat1 specific code
[karo-tx-linux.git] / drivers / dma / ioat / dma.c
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2015 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * The full GNU General Public License is included in this distribution in
15  * the file called "COPYING".
16  *
17  */
18
19 /*
20  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
21  * copy operations.
22  */
23
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
34 #include <linux/i7300_idle.h>
35 #include "dma.h"
36 #include "registers.h"
37 #include "hw.h"
38
39 #include "../dmaengine.h"
40
41 int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44                  "high-water mark for pushing ioat descriptors (default: 4)");
45
46 /**
47  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
48  * @irq: interrupt id
49  * @data: interrupt data
50  */
51 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
52 {
53         struct ioatdma_device *instance = data;
54         struct ioat_chan_common *chan;
55         unsigned long attnstatus;
56         int bit;
57         u8 intrctrl;
58
59         intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
60
61         if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
62                 return IRQ_NONE;
63
64         if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
65                 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
66                 return IRQ_NONE;
67         }
68
69         attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
70         for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
71                 chan = ioat_chan_by_index(instance, bit);
72                 if (test_bit(IOAT_RUN, &chan->state))
73                         tasklet_schedule(&chan->cleanup_task);
74         }
75
76         writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
77         return IRQ_HANDLED;
78 }
79
80 /**
81  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
82  * @irq: interrupt id
83  * @data: interrupt data
84  */
85 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
86 {
87         struct ioat_chan_common *chan = data;
88
89         if (test_bit(IOAT_RUN, &chan->state))
90                 tasklet_schedule(&chan->cleanup_task);
91
92         return IRQ_HANDLED;
93 }
94
95 /* common channel initialization */
96 void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
97 {
98         struct dma_device *dma = &device->common;
99         struct dma_chan *c = &chan->common;
100         unsigned long data = (unsigned long) c;
101
102         chan->device = device;
103         chan->reg_base = device->reg_base + (0x80 * (idx + 1));
104         spin_lock_init(&chan->cleanup_lock);
105         chan->common.device = dma;
106         dma_cookie_init(&chan->common);
107         list_add_tail(&chan->common.device_node, &dma->channels);
108         device->idx[idx] = chan;
109         init_timer(&chan->timer);
110         chan->timer.function = device->timer_fn;
111         chan->timer.data = data;
112         tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
113 }
114
115 void ioat_stop(struct ioat_chan_common *chan)
116 {
117         struct ioatdma_device *device = chan->device;
118         struct pci_dev *pdev = device->pdev;
119         int chan_id = chan_num(chan);
120         struct msix_entry *msix;
121
122         /* 1/ stop irq from firing tasklets
123          * 2/ stop the tasklet from re-arming irqs
124          */
125         clear_bit(IOAT_RUN, &chan->state);
126
127         /* flush inflight interrupts */
128         switch (device->irq_mode) {
129         case IOAT_MSIX:
130                 msix = &device->msix_entries[chan_id];
131                 synchronize_irq(msix->vector);
132                 break;
133         case IOAT_MSI:
134         case IOAT_INTX:
135                 synchronize_irq(pdev->irq);
136                 break;
137         default:
138                 break;
139         }
140
141         /* flush inflight timers */
142         del_timer_sync(&chan->timer);
143
144         /* flush inflight tasklet runs */
145         tasklet_kill(&chan->cleanup_task);
146
147         /* final cleanup now that everything is quiesced and can't re-arm */
148         device->cleanup_fn((unsigned long) &chan->common);
149 }
150
151 dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
152 {
153         dma_addr_t phys_complete;
154         u64 completion;
155
156         completion = *chan->completion;
157         phys_complete = ioat_chansts_to_addr(completion);
158
159         dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
160                 (unsigned long long) phys_complete);
161
162         if (is_ioat_halted(completion)) {
163                 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
164                 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
165                         chanerr);
166
167                 /* TODO do something to salvage the situation */
168         }
169
170         return phys_complete;
171 }
172
173 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
174                            dma_addr_t *phys_complete)
175 {
176         *phys_complete = ioat_get_current_completion(chan);
177         if (*phys_complete == chan->last_completion)
178                 return false;
179         clear_bit(IOAT_COMPLETION_ACK, &chan->state);
180         mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
181
182         return true;
183 }
184
185 enum dma_status
186 ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
187                    struct dma_tx_state *txstate)
188 {
189         struct ioat_chan_common *chan = to_chan_common(c);
190         struct ioatdma_device *device = chan->device;
191         enum dma_status ret;
192
193         ret = dma_cookie_status(c, cookie, txstate);
194         if (ret == DMA_COMPLETE)
195                 return ret;
196
197         device->cleanup_fn((unsigned long) c);
198
199         return dma_cookie_status(c, cookie, txstate);
200 }
201
202 /*
203  * Perform a IOAT transaction to verify the HW works.
204  */
205 #define IOAT_TEST_SIZE 2000
206
207 static void ioat_dma_test_callback(void *dma_async_param)
208 {
209         struct completion *cmp = dma_async_param;
210
211         complete(cmp);
212 }
213
214 /**
215  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
216  * @device: device to be tested
217  */
218 int ioat_dma_self_test(struct ioatdma_device *device)
219 {
220         int i;
221         u8 *src;
222         u8 *dest;
223         struct dma_device *dma = &device->common;
224         struct device *dev = &device->pdev->dev;
225         struct dma_chan *dma_chan;
226         struct dma_async_tx_descriptor *tx;
227         dma_addr_t dma_dest, dma_src;
228         dma_cookie_t cookie;
229         int err = 0;
230         struct completion cmp;
231         unsigned long tmo;
232         unsigned long flags;
233
234         src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
235         if (!src)
236                 return -ENOMEM;
237         dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
238         if (!dest) {
239                 kfree(src);
240                 return -ENOMEM;
241         }
242
243         /* Fill in src buffer */
244         for (i = 0; i < IOAT_TEST_SIZE; i++)
245                 src[i] = (u8)i;
246
247         /* Start copy, using first DMA channel */
248         dma_chan = container_of(dma->channels.next, struct dma_chan,
249                                 device_node);
250         if (dma->device_alloc_chan_resources(dma_chan) < 1) {
251                 dev_err(dev, "selftest cannot allocate chan resource\n");
252                 err = -ENODEV;
253                 goto out;
254         }
255
256         dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
257         if (dma_mapping_error(dev, dma_src)) {
258                 dev_err(dev, "mapping src buffer failed\n");
259                 goto free_resources;
260         }
261         dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
262         if (dma_mapping_error(dev, dma_dest)) {
263                 dev_err(dev, "mapping dest buffer failed\n");
264                 goto unmap_src;
265         }
266         flags = DMA_PREP_INTERRUPT;
267         tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
268                                                    IOAT_TEST_SIZE, flags);
269         if (!tx) {
270                 dev_err(dev, "Self-test prep failed, disabling\n");
271                 err = -ENODEV;
272                 goto unmap_dma;
273         }
274
275         async_tx_ack(tx);
276         init_completion(&cmp);
277         tx->callback = ioat_dma_test_callback;
278         tx->callback_param = &cmp;
279         cookie = tx->tx_submit(tx);
280         if (cookie < 0) {
281                 dev_err(dev, "Self-test setup failed, disabling\n");
282                 err = -ENODEV;
283                 goto unmap_dma;
284         }
285         dma->device_issue_pending(dma_chan);
286
287         tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
288
289         if (tmo == 0 ||
290             dma->device_tx_status(dma_chan, cookie, NULL)
291                                         != DMA_COMPLETE) {
292                 dev_err(dev, "Self-test copy timed out, disabling\n");
293                 err = -ENODEV;
294                 goto unmap_dma;
295         }
296         if (memcmp(src, dest, IOAT_TEST_SIZE)) {
297                 dev_err(dev, "Self-test copy failed compare, disabling\n");
298                 err = -ENODEV;
299                 goto free_resources;
300         }
301
302 unmap_dma:
303         dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
304 unmap_src:
305         dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
306 free_resources:
307         dma->device_free_chan_resources(dma_chan);
308 out:
309         kfree(src);
310         kfree(dest);
311         return err;
312 }
313
314 static char ioat_interrupt_style[32] = "msix";
315 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
316                     sizeof(ioat_interrupt_style), 0644);
317 MODULE_PARM_DESC(ioat_interrupt_style,
318                  "set ioat interrupt style: msix (default), msi, intx");
319
320 /**
321  * ioat_dma_setup_interrupts - setup interrupt handler
322  * @device: ioat device
323  */
324 int ioat_dma_setup_interrupts(struct ioatdma_device *device)
325 {
326         struct ioat_chan_common *chan;
327         struct pci_dev *pdev = device->pdev;
328         struct device *dev = &pdev->dev;
329         struct msix_entry *msix;
330         int i, j, msixcnt;
331         int err = -EINVAL;
332         u8 intrctrl = 0;
333
334         if (!strcmp(ioat_interrupt_style, "msix"))
335                 goto msix;
336         if (!strcmp(ioat_interrupt_style, "msi"))
337                 goto msi;
338         if (!strcmp(ioat_interrupt_style, "intx"))
339                 goto intx;
340         dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
341         goto err_no_irq;
342
343 msix:
344         /* The number of MSI-X vectors should equal the number of channels */
345         msixcnt = device->common.chancnt;
346         for (i = 0; i < msixcnt; i++)
347                 device->msix_entries[i].entry = i;
348
349         err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
350         if (err)
351                 goto msi;
352
353         for (i = 0; i < msixcnt; i++) {
354                 msix = &device->msix_entries[i];
355                 chan = ioat_chan_by_index(device, i);
356                 err = devm_request_irq(dev, msix->vector,
357                                        ioat_dma_do_interrupt_msix, 0,
358                                        "ioat-msix", chan);
359                 if (err) {
360                         for (j = 0; j < i; j++) {
361                                 msix = &device->msix_entries[j];
362                                 chan = ioat_chan_by_index(device, j);
363                                 devm_free_irq(dev, msix->vector, chan);
364                         }
365                         goto msi;
366                 }
367         }
368         intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
369         device->irq_mode = IOAT_MSIX;
370         goto done;
371
372 msi:
373         err = pci_enable_msi(pdev);
374         if (err)
375                 goto intx;
376
377         err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
378                                "ioat-msi", device);
379         if (err) {
380                 pci_disable_msi(pdev);
381                 goto intx;
382         }
383         device->irq_mode = IOAT_MSI;
384         goto done;
385
386 intx:
387         err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
388                                IRQF_SHARED, "ioat-intx", device);
389         if (err)
390                 goto err_no_irq;
391
392         device->irq_mode = IOAT_INTX;
393 done:
394         if (device->intr_quirk)
395                 device->intr_quirk(device);
396         intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
397         writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
398         return 0;
399
400 err_no_irq:
401         /* Disable all interrupt generation */
402         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
403         device->irq_mode = IOAT_NOIRQ;
404         dev_err(dev, "no usable interrupts\n");
405         return err;
406 }
407 EXPORT_SYMBOL(ioat_dma_setup_interrupts);
408
409 static void ioat_disable_interrupts(struct ioatdma_device *device)
410 {
411         /* Disable all interrupt generation */
412         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
413 }
414
415 int ioat_probe(struct ioatdma_device *device)
416 {
417         int err = -ENODEV;
418         struct dma_device *dma = &device->common;
419         struct pci_dev *pdev = device->pdev;
420         struct device *dev = &pdev->dev;
421
422         /* DMA coherent memory pool for DMA descriptor allocations */
423         device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
424                                            sizeof(struct ioat_dma_descriptor),
425                                            64, 0);
426         if (!device->dma_pool) {
427                 err = -ENOMEM;
428                 goto err_dma_pool;
429         }
430
431         device->completion_pool = pci_pool_create("completion_pool", pdev,
432                                                   sizeof(u64), SMP_CACHE_BYTES,
433                                                   SMP_CACHE_BYTES);
434
435         if (!device->completion_pool) {
436                 err = -ENOMEM;
437                 goto err_completion_pool;
438         }
439
440         device->enumerate_channels(device);
441
442         dma_cap_set(DMA_MEMCPY, dma->cap_mask);
443         dma->dev = &pdev->dev;
444
445         if (!dma->chancnt) {
446                 dev_err(dev, "channel enumeration error\n");
447                 goto err_setup_interrupts;
448         }
449
450         err = ioat_dma_setup_interrupts(device);
451         if (err)
452                 goto err_setup_interrupts;
453
454         err = device->self_test(device);
455         if (err)
456                 goto err_self_test;
457
458         return 0;
459
460 err_self_test:
461         ioat_disable_interrupts(device);
462 err_setup_interrupts:
463         pci_pool_destroy(device->completion_pool);
464 err_completion_pool:
465         pci_pool_destroy(device->dma_pool);
466 err_dma_pool:
467         return err;
468 }
469
470 int ioat_register(struct ioatdma_device *device)
471 {
472         int err = dma_async_device_register(&device->common);
473
474         if (err) {
475                 ioat_disable_interrupts(device);
476                 pci_pool_destroy(device->completion_pool);
477                 pci_pool_destroy(device->dma_pool);
478         }
479
480         return err;
481 }
482
483 static ssize_t cap_show(struct dma_chan *c, char *page)
484 {
485         struct dma_device *dma = c->device;
486
487         return sprintf(page, "copy%s%s%s%s%s\n",
488                        dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
489                        dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
490                        dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
491                        dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
492                        dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
493
494 }
495 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
496
497 static ssize_t version_show(struct dma_chan *c, char *page)
498 {
499         struct dma_device *dma = c->device;
500         struct ioatdma_device *device = to_ioatdma_device(dma);
501
502         return sprintf(page, "%d.%d\n",
503                        device->version >> 4, device->version & 0xf);
504 }
505 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
506
507 static ssize_t
508 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
509 {
510         struct ioat_sysfs_entry *entry;
511         struct ioat_chan_common *chan;
512
513         entry = container_of(attr, struct ioat_sysfs_entry, attr);
514         chan = container_of(kobj, struct ioat_chan_common, kobj);
515
516         if (!entry->show)
517                 return -EIO;
518         return entry->show(&chan->common, page);
519 }
520
521 const struct sysfs_ops ioat_sysfs_ops = {
522         .show   = ioat_attr_show,
523 };
524
525 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
526 {
527         struct dma_device *dma = &device->common;
528         struct dma_chan *c;
529
530         list_for_each_entry(c, &dma->channels, device_node) {
531                 struct ioat_chan_common *chan = to_chan_common(c);
532                 struct kobject *parent = &c->dev->device.kobj;
533                 int err;
534
535                 err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
536                 if (err) {
537                         dev_warn(to_dev(chan),
538                                  "sysfs init error (%d), continuing...\n", err);
539                         kobject_put(&chan->kobj);
540                         set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
541                 }
542         }
543 }
544
545 void ioat_kobject_del(struct ioatdma_device *device)
546 {
547         struct dma_device *dma = &device->common;
548         struct dma_chan *c;
549
550         list_for_each_entry(c, &dma->channels, device_node) {
551                 struct ioat_chan_common *chan = to_chan_common(c);
552
553                 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
554                         kobject_del(&chan->kobj);
555                         kobject_put(&chan->kobj);
556                 }
557         }
558 }
559
560 void ioat_dma_remove(struct ioatdma_device *device)
561 {
562         struct dma_device *dma = &device->common;
563
564         ioat_disable_interrupts(device);
565
566         ioat_kobject_del(device);
567
568         dma_async_device_unregister(dma);
569
570         pci_pool_destroy(device->dma_pool);
571         pci_pool_destroy(device->completion_pool);
572
573         INIT_LIST_HEAD(&dma->channels);
574 }