2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
38 #include "registers.h"
41 int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
46 /* internal functions */
47 static void ioat1_cleanup(struct ioat_dma_chan *ioat);
48 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
51 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
53 * @data: interrupt data
55 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
57 struct ioatdma_device *instance = data;
58 struct ioat_chan_common *chan;
59 unsigned long attnstatus;
63 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
65 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
68 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
69 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
73 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
74 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
75 chan = ioat_chan_by_index(instance, bit);
76 tasklet_schedule(&chan->cleanup_task);
79 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
84 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
86 * @data: interrupt data
88 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
90 struct ioat_chan_common *chan = data;
92 tasklet_schedule(&chan->cleanup_task);
97 static void ioat1_cleanup_tasklet(unsigned long data);
99 /* common channel initialization */
100 void ioat_init_channel(struct ioatdma_device *device,
101 struct ioat_chan_common *chan, int idx,
102 void (*timer_fn)(unsigned long),
103 void (*tasklet)(unsigned long),
106 struct dma_device *dma = &device->common;
108 chan->device = device;
109 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma;
112 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan;
114 init_timer(&chan->timer);
115 chan->timer.function = timer_fn;
116 chan->timer.data = ioat;
117 tasklet_init(&chan->cleanup_task, tasklet, ioat);
118 tasklet_disable(&chan->cleanup_task);
121 static void ioat1_timer_event(unsigned long data);
124 * ioat1_dma_enumerate_channels - find and initialize the device's channels
125 * @device: the device to be enumerated
127 static int ioat1_enumerate_channels(struct ioatdma_device *device)
132 struct ioat_dma_chan *ioat;
133 struct device *dev = &device->pdev->dev;
134 struct dma_device *dma = &device->common;
136 INIT_LIST_HEAD(&dma->channels);
137 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
138 dma->chancnt &= 0x1f; /* bits [4:0] valid */
139 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
140 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
141 dma->chancnt, ARRAY_SIZE(device->idx));
142 dma->chancnt = ARRAY_SIZE(device->idx);
144 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
145 xfercap_scale &= 0x1f; /* bits [4:0] valid */
146 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
147 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
149 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
150 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
153 for (i = 0; i < dma->chancnt; i++) {
154 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
158 ioat_init_channel(device, &ioat->base, i,
160 ioat1_cleanup_tasklet,
161 (unsigned long) ioat);
162 ioat->xfercap = xfercap;
163 spin_lock_init(&ioat->desc_lock);
164 INIT_LIST_HEAD(&ioat->free_desc);
165 INIT_LIST_HEAD(&ioat->used_desc);
172 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
174 * @chan: DMA channel handle
177 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
179 void __iomem *reg_base = ioat->base.reg_base;
181 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
182 __func__, ioat->pending);
184 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
187 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
189 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
191 if (ioat->pending > 0) {
192 spin_lock_bh(&ioat->desc_lock);
193 __ioat1_dma_memcpy_issue_pending(ioat);
194 spin_unlock_bh(&ioat->desc_lock);
199 * ioat1_reset_channel - restart a channel
200 * @ioat: IOAT DMA channel handle
202 static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
204 struct ioat_chan_common *chan = &ioat->base;
205 void __iomem *reg_base = chan->reg_base;
206 u32 chansts, chanerr;
208 dev_warn(to_dev(chan), "reset\n");
209 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
210 chansts = *chan->completion & IOAT_CHANSTS_STATUS;
212 dev_err(to_dev(chan),
213 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
214 chan_num(chan), chansts, chanerr);
215 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
219 * whack it upside the head with a reset
220 * and wait for things to settle out.
221 * force the pending count to a really big negative
222 * to make sure no one forces an issue_pending
223 * while we're waiting.
226 ioat->pending = INT_MIN;
227 writeb(IOAT_CHANCMD_RESET,
228 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
229 set_bit(IOAT_RESET_PENDING, &chan->state);
230 mod_timer(&chan->timer, jiffies + RESET_DELAY);
233 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
235 struct dma_chan *c = tx->chan;
236 struct ioat_dma_chan *ioat = to_ioat_chan(c);
237 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
238 struct ioat_chan_common *chan = &ioat->base;
239 struct ioat_desc_sw *first;
240 struct ioat_desc_sw *chain_tail;
243 spin_lock_bh(&ioat->desc_lock);
244 /* cookie incr and addition to used_list must be atomic */
251 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
253 /* write address into NextDescriptor field of last desc in chain */
254 first = to_ioat_desc(tx->tx_list.next);
255 chain_tail = to_ioat_desc(ioat->used_desc.prev);
256 /* make descriptor updates globally visible before chaining */
258 chain_tail->hw->next = first->txd.phys;
259 list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
260 dump_desc_dbg(ioat, chain_tail);
261 dump_desc_dbg(ioat, first);
263 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
264 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
266 ioat->active += desc->hw->tx_cnt;
267 ioat->pending += desc->hw->tx_cnt;
268 if (ioat->pending >= ioat_pending_level)
269 __ioat1_dma_memcpy_issue_pending(ioat);
270 spin_unlock_bh(&ioat->desc_lock);
276 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
277 * @ioat: the channel supplying the memory pool for the descriptors
278 * @flags: allocation flags
280 static struct ioat_desc_sw *
281 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
283 struct ioat_dma_descriptor *desc;
284 struct ioat_desc_sw *desc_sw;
285 struct ioatdma_device *ioatdma_device;
288 ioatdma_device = ioat->base.device;
289 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
293 desc_sw = kzalloc(sizeof(*desc_sw), flags);
294 if (unlikely(!desc_sw)) {
295 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
299 memset(desc, 0, sizeof(*desc));
301 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
302 desc_sw->txd.tx_submit = ioat1_tx_submit;
304 desc_sw->txd.phys = phys;
305 set_desc_id(desc_sw, -1);
310 static int ioat_initial_desc_count = 256;
311 module_param(ioat_initial_desc_count, int, 0644);
312 MODULE_PARM_DESC(ioat_initial_desc_count,
313 "ioat1: initial descriptors per channel (default: 256)");
315 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
316 * @chan: the channel to be filled out
318 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
320 struct ioat_dma_chan *ioat = to_ioat_chan(c);
321 struct ioat_chan_common *chan = &ioat->base;
322 struct ioat_desc_sw *desc;
327 /* have we already been set up? */
328 if (!list_empty(&ioat->free_desc))
329 return ioat->desccount;
331 /* Setup register to interrupt and write completion status on error */
332 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
334 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
336 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
337 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
340 /* Allocate descriptors */
341 for (i = 0; i < ioat_initial_desc_count; i++) {
342 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
344 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
347 set_desc_id(desc, i);
348 list_add_tail(&desc->node, &tmp_list);
350 spin_lock_bh(&ioat->desc_lock);
352 list_splice(&tmp_list, &ioat->free_desc);
353 spin_unlock_bh(&ioat->desc_lock);
355 /* allocate a completion writeback area */
356 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
357 chan->completion = pci_pool_alloc(chan->device->completion_pool,
358 GFP_KERNEL, &chan->completion_dma);
359 memset(chan->completion, 0, sizeof(*chan->completion));
360 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
361 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
362 writel(((u64) chan->completion_dma) >> 32,
363 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
365 tasklet_enable(&chan->cleanup_task);
366 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
367 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
368 __func__, ioat->desccount);
369 return ioat->desccount;
373 * ioat1_dma_free_chan_resources - release all the descriptors
374 * @chan: the channel to be cleaned
376 static void ioat1_dma_free_chan_resources(struct dma_chan *c)
378 struct ioat_dma_chan *ioat = to_ioat_chan(c);
379 struct ioat_chan_common *chan = &ioat->base;
380 struct ioatdma_device *ioatdma_device = chan->device;
381 struct ioat_desc_sw *desc, *_desc;
382 int in_use_descs = 0;
384 /* Before freeing channel resources first check
385 * if they have been previously allocated for this channel.
387 if (ioat->desccount == 0)
390 tasklet_disable(&chan->cleanup_task);
391 del_timer_sync(&chan->timer);
394 /* Delay 100ms after reset to allow internal DMA logic to quiesce
395 * before removing DMA descriptor resources.
397 writeb(IOAT_CHANCMD_RESET,
398 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
401 spin_lock_bh(&ioat->desc_lock);
402 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
403 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
404 __func__, desc_id(desc));
405 dump_desc_dbg(ioat, desc);
407 list_del(&desc->node);
408 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
412 list_for_each_entry_safe(desc, _desc,
413 &ioat->free_desc, node) {
414 list_del(&desc->node);
415 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
419 spin_unlock_bh(&ioat->desc_lock);
421 pci_pool_free(ioatdma_device->completion_pool,
423 chan->completion_dma);
425 /* one is ok since we left it on there on purpose */
426 if (in_use_descs > 1)
427 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
430 chan->last_completion = 0;
431 chan->completion_dma = 0;
437 * ioat1_dma_get_next_descriptor - return the next available descriptor
438 * @ioat: IOAT DMA channel handle
440 * Gets the next descriptor from the chain, and must be called with the
441 * channel's desc_lock held. Allocates more descriptors if the channel
444 static struct ioat_desc_sw *
445 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
447 struct ioat_desc_sw *new;
449 if (!list_empty(&ioat->free_desc)) {
450 new = to_ioat_desc(ioat->free_desc.next);
451 list_del(&new->node);
453 /* try to get another desc */
454 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
456 dev_err(to_dev(&ioat->base), "alloc failed\n");
460 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
461 __func__, desc_id(new));
466 static struct dma_async_tx_descriptor *
467 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
468 dma_addr_t dma_src, size_t len, unsigned long flags)
470 struct ioat_dma_chan *ioat = to_ioat_chan(c);
471 struct ioat_desc_sw *desc;
474 dma_addr_t src = dma_src;
475 dma_addr_t dest = dma_dest;
476 size_t total_len = len;
477 struct ioat_dma_descriptor *hw = NULL;
480 spin_lock_bh(&ioat->desc_lock);
481 desc = ioat1_dma_get_next_descriptor(ioat);
487 copy = min_t(size_t, len, ioat->xfercap);
495 list_add_tail(&desc->node, &chain);
501 struct ioat_desc_sw *next;
503 async_tx_ack(&desc->txd);
504 next = ioat1_dma_get_next_descriptor(ioat);
505 hw->next = next ? next->txd.phys : 0;
506 dump_desc_dbg(ioat, desc);
513 struct ioat_chan_common *chan = &ioat->base;
515 dev_err(to_dev(chan),
516 "chan%d - get_next_desc failed\n", chan_num(chan));
517 list_splice(&chain, &ioat->free_desc);
518 spin_unlock_bh(&ioat->desc_lock);
521 spin_unlock_bh(&ioat->desc_lock);
523 desc->txd.flags = flags;
524 desc->len = total_len;
525 list_splice(&chain, &desc->txd.tx_list);
526 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
527 hw->ctl_f.compl_write = 1;
529 dump_desc_dbg(ioat, desc);
534 static void ioat1_cleanup_tasklet(unsigned long data)
536 struct ioat_dma_chan *chan = (void *)data;
539 writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
542 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
543 size_t len, struct ioat_dma_descriptor *hw)
545 struct pci_dev *pdev = chan->device->pdev;
546 size_t offset = len - hw->size;
548 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
549 ioat_unmap(pdev, hw->dst_addr - offset, len,
550 PCI_DMA_FROMDEVICE, flags, 1);
552 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
553 ioat_unmap(pdev, hw->src_addr - offset, len,
554 PCI_DMA_TODEVICE, flags, 0);
557 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
559 unsigned long phys_complete;
562 completion = *chan->completion;
563 phys_complete = ioat_chansts_to_addr(completion);
565 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
566 (unsigned long long) phys_complete);
568 if (is_ioat_halted(completion)) {
569 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
570 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
573 /* TODO do something to salvage the situation */
576 return phys_complete;
579 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
580 unsigned long *phys_complete)
582 *phys_complete = ioat_get_current_completion(chan);
583 if (*phys_complete == chan->last_completion)
585 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
586 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
591 static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
593 struct ioat_chan_common *chan = &ioat->base;
594 struct list_head *_desc, *n;
595 struct dma_async_tx_descriptor *tx;
597 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
598 __func__, phys_complete);
599 list_for_each_safe(_desc, n, &ioat->used_desc) {
600 struct ioat_desc_sw *desc;
603 desc = list_entry(_desc, typeof(*desc), node);
606 * Incoming DMA requests may use multiple descriptors,
607 * due to exceeding xfercap, perhaps. If so, only the
608 * last one will have a cookie, and require unmapping.
610 dump_desc_dbg(ioat, desc);
612 chan->completed_cookie = tx->cookie;
614 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
615 ioat->active -= desc->hw->tx_cnt;
617 tx->callback(tx->callback_param);
622 if (tx->phys != phys_complete) {
624 * a completed entry, but not the last, so clean
625 * up if the client is done with the descriptor
627 if (async_tx_test_ack(tx))
628 list_move_tail(&desc->node, &ioat->free_desc);
631 * last used desc. Do not remove, so we can
635 /* if nothing else is pending, cancel the
638 if (n == &ioat->used_desc) {
639 dev_dbg(to_dev(chan),
640 "%s cancel completion timeout\n",
642 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
645 /* TODO check status bits? */
650 chan->last_completion = phys_complete;
654 * ioat1_cleanup - cleanup up finished descriptors
655 * @chan: ioat channel to be cleaned up
657 * To prevent lock contention we defer cleanup when the locks are
658 * contended with a terminal timeout that forces cleanup and catches
659 * completion notification errors.
661 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
663 struct ioat_chan_common *chan = &ioat->base;
664 unsigned long phys_complete;
666 prefetch(chan->completion);
668 if (!spin_trylock_bh(&chan->cleanup_lock))
671 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
672 spin_unlock_bh(&chan->cleanup_lock);
676 if (!spin_trylock_bh(&ioat->desc_lock)) {
677 spin_unlock_bh(&chan->cleanup_lock);
681 __cleanup(ioat, phys_complete);
683 spin_unlock_bh(&ioat->desc_lock);
684 spin_unlock_bh(&chan->cleanup_lock);
687 static void ioat1_timer_event(unsigned long data)
689 struct ioat_dma_chan *ioat = (void *) data;
690 struct ioat_chan_common *chan = &ioat->base;
692 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
694 spin_lock_bh(&chan->cleanup_lock);
695 if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
696 struct ioat_desc_sw *desc;
698 spin_lock_bh(&ioat->desc_lock);
700 /* restart active descriptors */
701 desc = to_ioat_desc(ioat->used_desc.prev);
702 ioat_set_chainaddr(ioat, desc->txd.phys);
706 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
707 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
708 spin_unlock_bh(&ioat->desc_lock);
709 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
710 unsigned long phys_complete;
712 spin_lock_bh(&ioat->desc_lock);
713 /* if we haven't made progress and we have already
714 * acknowledged a pending completion once, then be more
715 * forceful with a restart
717 if (ioat_cleanup_preamble(chan, &phys_complete))
718 __cleanup(ioat, phys_complete);
719 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
720 ioat1_reset_channel(ioat);
722 u64 status = ioat_chansts(chan);
724 /* manually update the last completion address */
725 if (ioat_chansts_to_addr(status) != 0)
726 *chan->completion = status;
728 set_bit(IOAT_COMPLETION_ACK, &chan->state);
729 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
731 spin_unlock_bh(&ioat->desc_lock);
733 spin_unlock_bh(&chan->cleanup_lock);
736 static enum dma_status
737 ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
738 dma_cookie_t *done, dma_cookie_t *used)
740 struct ioat_dma_chan *ioat = to_ioat_chan(c);
742 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
747 return ioat_is_complete(c, cookie, done, used);
750 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
752 struct ioat_chan_common *chan = &ioat->base;
753 struct ioat_desc_sw *desc;
754 struct ioat_dma_descriptor *hw;
756 spin_lock_bh(&ioat->desc_lock);
758 desc = ioat1_dma_get_next_descriptor(ioat);
761 dev_err(to_dev(chan),
762 "Unable to start null desc - get next desc failed\n");
763 spin_unlock_bh(&ioat->desc_lock);
770 hw->ctl_f.int_en = 1;
771 hw->ctl_f.compl_write = 1;
772 /* set size to non-zero value (channel returns error when size is 0) */
773 hw->size = NULL_DESC_BUFFER_SIZE;
776 async_tx_ack(&desc->txd);
778 list_add_tail(&desc->node, &ioat->used_desc);
779 dump_desc_dbg(ioat, desc);
781 ioat_set_chainaddr(ioat, desc->txd.phys);
783 spin_unlock_bh(&ioat->desc_lock);
787 * Perform a IOAT transaction to verify the HW works.
789 #define IOAT_TEST_SIZE 2000
791 static void __devinit ioat_dma_test_callback(void *dma_async_param)
793 struct completion *cmp = dma_async_param;
799 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
800 * @device: device to be tested
802 static int __devinit ioat_dma_self_test(struct ioatdma_device *device)
807 struct dma_device *dma = &device->common;
808 struct device *dev = &device->pdev->dev;
809 struct dma_chan *dma_chan;
810 struct dma_async_tx_descriptor *tx;
811 dma_addr_t dma_dest, dma_src;
814 struct completion cmp;
818 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
821 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
827 /* Fill in src buffer */
828 for (i = 0; i < IOAT_TEST_SIZE; i++)
831 /* Start copy, using first DMA channel */
832 dma_chan = container_of(dma->channels.next, struct dma_chan,
834 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
835 dev_err(dev, "selftest cannot allocate chan resource\n");
840 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
841 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
842 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
844 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
845 IOAT_TEST_SIZE, flags);
847 dev_err(dev, "Self-test prep failed, disabling\n");
853 init_completion(&cmp);
854 tx->callback = ioat_dma_test_callback;
855 tx->callback_param = &cmp;
856 cookie = tx->tx_submit(tx);
858 dev_err(dev, "Self-test setup failed, disabling\n");
862 dma->device_issue_pending(dma_chan);
864 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
867 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
869 dev_err(dev, "Self-test copy timed out, disabling\n");
873 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
874 dev_err(dev, "Self-test copy failed compare, disabling\n");
880 dma->device_free_chan_resources(dma_chan);
887 static char ioat_interrupt_style[32] = "msix";
888 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
889 sizeof(ioat_interrupt_style), 0644);
890 MODULE_PARM_DESC(ioat_interrupt_style,
891 "set ioat interrupt style: msix (default), "
892 "msix-single-vector, msi, intx)");
895 * ioat_dma_setup_interrupts - setup interrupt handler
896 * @device: ioat device
898 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
900 struct ioat_chan_common *chan;
901 struct pci_dev *pdev = device->pdev;
902 struct device *dev = &pdev->dev;
903 struct msix_entry *msix;
908 if (!strcmp(ioat_interrupt_style, "msix"))
910 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
911 goto msix_single_vector;
912 if (!strcmp(ioat_interrupt_style, "msi"))
914 if (!strcmp(ioat_interrupt_style, "intx"))
916 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
920 /* The number of MSI-X vectors should equal the number of channels */
921 msixcnt = device->common.chancnt;
922 for (i = 0; i < msixcnt; i++)
923 device->msix_entries[i].entry = i;
925 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
929 goto msix_single_vector;
931 for (i = 0; i < msixcnt; i++) {
932 msix = &device->msix_entries[i];
933 chan = ioat_chan_by_index(device, i);
934 err = devm_request_irq(dev, msix->vector,
935 ioat_dma_do_interrupt_msix, 0,
938 for (j = 0; j < i; j++) {
939 msix = &device->msix_entries[j];
940 chan = ioat_chan_by_index(device, j);
941 devm_free_irq(dev, msix->vector, chan);
943 goto msix_single_vector;
946 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
950 msix = &device->msix_entries[0];
952 err = pci_enable_msix(pdev, device->msix_entries, 1);
956 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
957 "ioat-msix", device);
959 pci_disable_msix(pdev);
965 err = pci_enable_msi(pdev);
969 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
972 pci_disable_msi(pdev);
978 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
979 IRQF_SHARED, "ioat-intx", device);
984 if (device->intr_quirk)
985 device->intr_quirk(device);
986 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
987 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
991 /* Disable all interrupt generation */
992 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
993 dev_err(dev, "no usable interrupts\n");
997 static void ioat_disable_interrupts(struct ioatdma_device *device)
999 /* Disable all interrupt generation */
1000 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1003 int __devinit ioat_probe(struct ioatdma_device *device)
1006 struct dma_device *dma = &device->common;
1007 struct pci_dev *pdev = device->pdev;
1008 struct device *dev = &pdev->dev;
1010 /* DMA coherent memory pool for DMA descriptor allocations */
1011 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1012 sizeof(struct ioat_dma_descriptor),
1014 if (!device->dma_pool) {
1019 device->completion_pool = pci_pool_create("completion_pool", pdev,
1020 sizeof(u64), SMP_CACHE_BYTES,
1023 if (!device->completion_pool) {
1025 goto err_completion_pool;
1028 device->enumerate_channels(device);
1030 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1031 dma->dev = &pdev->dev;
1033 if (!dma->chancnt) {
1034 dev_err(dev, "zero channels detected\n");
1035 goto err_setup_interrupts;
1038 err = ioat_dma_setup_interrupts(device);
1040 goto err_setup_interrupts;
1042 err = ioat_dma_self_test(device);
1049 ioat_disable_interrupts(device);
1050 err_setup_interrupts:
1051 pci_pool_destroy(device->completion_pool);
1052 err_completion_pool:
1053 pci_pool_destroy(device->dma_pool);
1058 int __devinit ioat_register(struct ioatdma_device *device)
1060 int err = dma_async_device_register(&device->common);
1063 ioat_disable_interrupts(device);
1064 pci_pool_destroy(device->completion_pool);
1065 pci_pool_destroy(device->dma_pool);
1071 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1072 static void ioat1_intr_quirk(struct ioatdma_device *device)
1074 struct pci_dev *pdev = device->pdev;
1077 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1078 if (pdev->msi_enabled)
1079 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1081 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1082 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1085 static ssize_t ring_size_show(struct dma_chan *c, char *page)
1087 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1089 return sprintf(page, "%d\n", ioat->desccount);
1091 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
1093 static ssize_t ring_active_show(struct dma_chan *c, char *page)
1095 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1097 return sprintf(page, "%d\n", ioat->active);
1099 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
1101 static ssize_t cap_show(struct dma_chan *c, char *page)
1103 struct dma_device *dma = c->device;
1105 return sprintf(page, "copy%s%s%s%s%s%s\n",
1106 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
1107 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
1108 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
1109 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
1110 dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "",
1111 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
1114 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
1116 static ssize_t version_show(struct dma_chan *c, char *page)
1118 struct dma_device *dma = c->device;
1119 struct ioatdma_device *device = to_ioatdma_device(dma);
1121 return sprintf(page, "%d.%d\n",
1122 device->version >> 4, device->version & 0xf);
1124 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
1126 static struct attribute *ioat1_attrs[] = {
1127 &ring_size_attr.attr,
1128 &ring_active_attr.attr,
1129 &ioat_cap_attr.attr,
1130 &ioat_version_attr.attr,
1135 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1137 struct ioat_sysfs_entry *entry;
1138 struct ioat_chan_common *chan;
1140 entry = container_of(attr, struct ioat_sysfs_entry, attr);
1141 chan = container_of(kobj, struct ioat_chan_common, kobj);
1145 return entry->show(&chan->common, page);
1148 struct sysfs_ops ioat_sysfs_ops = {
1149 .show = ioat_attr_show,
1152 static struct kobj_type ioat1_ktype = {
1153 .sysfs_ops = &ioat_sysfs_ops,
1154 .default_attrs = ioat1_attrs,
1157 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
1159 struct dma_device *dma = &device->common;
1162 list_for_each_entry(c, &dma->channels, device_node) {
1163 struct ioat_chan_common *chan = to_chan_common(c);
1164 struct kobject *parent = &c->dev->device.kobj;
1167 err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
1169 dev_warn(to_dev(chan),
1170 "sysfs init error (%d), continuing...\n", err);
1171 kobject_put(&chan->kobj);
1172 set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
1177 void ioat_kobject_del(struct ioatdma_device *device)
1179 struct dma_device *dma = &device->common;
1182 list_for_each_entry(c, &dma->channels, device_node) {
1183 struct ioat_chan_common *chan = to_chan_common(c);
1185 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
1186 kobject_del(&chan->kobj);
1187 kobject_put(&chan->kobj);
1192 int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1194 struct pci_dev *pdev = device->pdev;
1195 struct dma_device *dma;
1198 device->intr_quirk = ioat1_intr_quirk;
1199 device->enumerate_channels = ioat1_enumerate_channels;
1200 dma = &device->common;
1201 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1202 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1203 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1204 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1205 dma->device_is_tx_complete = ioat1_dma_is_complete;
1207 err = ioat_probe(device);
1210 ioat_set_tcp_copy_break(4096);
1211 err = ioat_register(device);
1214 ioat_kobject_add(device, &ioat1_ktype);
1217 device->dca = ioat_dca_init(pdev, device->reg_base);
1222 void __devexit ioat_dma_remove(struct ioatdma_device *device)
1224 struct dma_device *dma = &device->common;
1226 ioat_disable_interrupts(device);
1228 ioat_kobject_del(device);
1230 dma_async_device_unregister(dma);
1232 pci_pool_destroy(device->dma_pool);
1233 pci_pool_destroy(device->completion_pool);
1235 INIT_LIST_HEAD(&dma->channels);