2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
24 #include <linux/dmaengine.h>
29 extern int ioat_pending_level;
32 * workaround for IOAT ver.3.0 null descriptor issue
33 * (channel returns error when size is 0)
35 #define NULL_DESC_BUFFER_SIZE 1
37 #define IOAT_MAX_ORDER 16
38 #define ioat_get_alloc_order() \
39 (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
41 /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
42 * @base: common ioat channel parameters
43 * @xfercap_log; log2 of channel max transfer length (for fast division)
44 * @head: allocated index
45 * @issued: hardware notification point
46 * @tail: cleanup index
47 * @pending: lock free indicator for issued != head
48 * @dmacount: identical to 'head' except for occasionally resetting to zero
49 * @alloc_order: log2 of the number of allocated descriptors
50 * @ring: software ring buffer implementation of hardware ring
51 * @ring_lock: protects ring attributes
53 struct ioat2_dma_chan {
54 struct ioat_chan_common base;
62 struct ioat_ring_ent **ring;
66 static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
68 struct ioat_chan_common *chan = to_chan_common(c);
70 return container_of(chan, struct ioat2_dma_chan, base);
73 static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat)
75 return (1 << ioat->alloc_order) - 1;
78 /* count of descriptors in flight with the engine */
79 static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
81 return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat);
84 /* count of descriptors pending submission to hardware */
85 static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
87 return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat);
90 static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
92 u16 num_descs = ioat2_ring_mask(ioat) + 1;
93 u16 active = ioat2_ring_active(ioat);
95 BUG_ON(active > num_descs);
97 return num_descs - active;
100 /* assumes caller already checked space */
101 static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
104 return ioat->head - len;
107 static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
109 u16 num_descs = len >> ioat->xfercap_log;
111 num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
115 struct ioat_ring_ent {
116 struct ioat_dma_descriptor *hw;
117 struct dma_async_tx_descriptor txd;
124 static inline struct ioat_ring_ent *
125 ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
127 return ioat->ring[idx & ioat2_ring_mask(ioat)];
130 static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
132 struct ioat_chan_common *chan = &ioat->base;
134 writel(addr & 0x00000000FFFFFFFF,
135 chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
137 chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
140 int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
141 int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
142 struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
143 struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
144 #endif /* IOATDMA_V2_H */