]> git.karo-electronics.de Git - linux-beck.git/blob - arch/x86/include/asm/dma-mapping.h
kmemcheck: add DMA hooks
[linux-beck.git] / arch / x86 / include / asm / dma-mapping.h
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5  * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6  * Documentation/DMA-API.txt for documentation.
7  */
8
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <linux/dma-attrs.h>
13 #include <asm/io.h>
14 #include <asm/swiotlb.h>
15 #include <asm-generic/dma-coherent.h>
16
17 extern dma_addr_t bad_dma_address;
18 extern int iommu_merge;
19 extern struct device x86_dma_fallback_dev;
20 extern int panic_on_overflow;
21
22 extern struct dma_map_ops *dma_ops;
23
24 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
25 {
26 #ifdef CONFIG_X86_32
27         return dma_ops;
28 #else
29         if (unlikely(!dev) || !dev->archdata.dma_ops)
30                 return dma_ops;
31         else
32                 return dev->archdata.dma_ops;
33 #endif
34 }
35
36 /* Make sure we keep the same behaviour */
37 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
38 {
39         struct dma_map_ops *ops = get_dma_ops(dev);
40         if (ops->mapping_error)
41                 return ops->mapping_error(dev, dma_addr);
42
43         return (dma_addr == bad_dma_address);
44 }
45
46 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
47 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
48 #define dma_is_consistent(d, h) (1)
49
50 extern int dma_supported(struct device *hwdev, u64 mask);
51 extern int dma_set_mask(struct device *dev, u64 mask);
52
53 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
54                                         dma_addr_t *dma_addr, gfp_t flag);
55
56 static inline dma_addr_t
57 dma_map_single(struct device *hwdev, void *ptr, size_t size,
58                enum dma_data_direction dir)
59 {
60         struct dma_map_ops *ops = get_dma_ops(hwdev);
61         dma_addr_t addr;
62
63         BUG_ON(!valid_dma_direction(dir));
64         kmemcheck_mark_initialized(ptr, size);
65         addr = ops->map_page(hwdev, virt_to_page(ptr),
66                              (unsigned long)ptr & ~PAGE_MASK, size,
67                              dir, NULL);
68         debug_dma_map_page(hwdev, virt_to_page(ptr),
69                            (unsigned long)ptr & ~PAGE_MASK, size,
70                            dir, addr, true);
71         return addr;
72 }
73
74 static inline void
75 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
76                  enum dma_data_direction dir)
77 {
78         struct dma_map_ops *ops = get_dma_ops(dev);
79
80         BUG_ON(!valid_dma_direction(dir));
81         if (ops->unmap_page)
82                 ops->unmap_page(dev, addr, size, dir, NULL);
83         debug_dma_unmap_page(dev, addr, size, dir, true);
84 }
85
86 static inline int
87 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
88            int nents, enum dma_data_direction dir)
89 {
90         struct dma_map_ops *ops = get_dma_ops(hwdev);
91         int ents;
92
93         BUG_ON(!valid_dma_direction(dir));
94         ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
95         debug_dma_map_sg(hwdev, sg, nents, ents, dir);
96
97         return ents;
98 }
99
100 static inline void
101 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
102              enum dma_data_direction dir)
103 {
104         struct dma_map_ops *ops = get_dma_ops(hwdev);
105
106         BUG_ON(!valid_dma_direction(dir));
107         debug_dma_unmap_sg(hwdev, sg, nents, dir);
108         if (ops->unmap_sg)
109                 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
110 }
111
112 static inline void
113 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
114                         size_t size, enum dma_data_direction dir)
115 {
116         struct dma_map_ops *ops = get_dma_ops(hwdev);
117
118         BUG_ON(!valid_dma_direction(dir));
119         if (ops->sync_single_for_cpu)
120                 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
121         debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
122         flush_write_buffers();
123 }
124
125 static inline void
126 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
127                            size_t size, enum dma_data_direction dir)
128 {
129         struct dma_map_ops *ops = get_dma_ops(hwdev);
130
131         BUG_ON(!valid_dma_direction(dir));
132         if (ops->sync_single_for_device)
133                 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
134         debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
135         flush_write_buffers();
136 }
137
138 static inline void
139 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
140                               unsigned long offset, size_t size,
141                               enum dma_data_direction dir)
142 {
143         struct dma_map_ops *ops = get_dma_ops(hwdev);
144
145         BUG_ON(!valid_dma_direction(dir));
146         if (ops->sync_single_range_for_cpu)
147                 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
148                                                size, dir);
149         debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
150                                             offset, size, dir);
151         flush_write_buffers();
152 }
153
154 static inline void
155 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
156                                  unsigned long offset, size_t size,
157                                  enum dma_data_direction dir)
158 {
159         struct dma_map_ops *ops = get_dma_ops(hwdev);
160
161         BUG_ON(!valid_dma_direction(dir));
162         if (ops->sync_single_range_for_device)
163                 ops->sync_single_range_for_device(hwdev, dma_handle,
164                                                   offset, size, dir);
165         debug_dma_sync_single_range_for_device(hwdev, dma_handle,
166                                                offset, size, dir);
167         flush_write_buffers();
168 }
169
170 static inline void
171 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
172                     int nelems, enum dma_data_direction dir)
173 {
174         struct dma_map_ops *ops = get_dma_ops(hwdev);
175
176         BUG_ON(!valid_dma_direction(dir));
177         if (ops->sync_sg_for_cpu)
178                 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
179         debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
180         flush_write_buffers();
181 }
182
183 static inline void
184 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
185                        int nelems, enum dma_data_direction dir)
186 {
187         struct dma_map_ops *ops = get_dma_ops(hwdev);
188
189         BUG_ON(!valid_dma_direction(dir));
190         if (ops->sync_sg_for_device)
191                 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
192         debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
193
194         flush_write_buffers();
195 }
196
197 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
198                                       size_t offset, size_t size,
199                                       enum dma_data_direction dir)
200 {
201         struct dma_map_ops *ops = get_dma_ops(dev);
202         dma_addr_t addr;
203
204         BUG_ON(!valid_dma_direction(dir));
205         addr = ops->map_page(dev, page, offset, size, dir, NULL);
206         debug_dma_map_page(dev, page, offset, size, dir, addr, false);
207
208         return addr;
209 }
210
211 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
212                                   size_t size, enum dma_data_direction dir)
213 {
214         struct dma_map_ops *ops = get_dma_ops(dev);
215
216         BUG_ON(!valid_dma_direction(dir));
217         if (ops->unmap_page)
218                 ops->unmap_page(dev, addr, size, dir, NULL);
219         debug_dma_unmap_page(dev, addr, size, dir, false);
220 }
221
222 static inline void
223 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
224         enum dma_data_direction dir)
225 {
226         flush_write_buffers();
227 }
228
229 static inline int dma_get_cache_alignment(void)
230 {
231         /* no easy way to get cache size on all x86, so return the
232          * maximum possible, to be safe */
233         return boot_cpu_data.x86_clflush_size;
234 }
235
236 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
237                                                     gfp_t gfp)
238 {
239         unsigned long dma_mask = 0;
240
241         dma_mask = dev->coherent_dma_mask;
242         if (!dma_mask)
243                 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
244
245         return dma_mask;
246 }
247
248 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
249 {
250         unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
251
252         if (dma_mask <= DMA_BIT_MASK(24))
253                 gfp |= GFP_DMA;
254 #ifdef CONFIG_X86_64
255         if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
256                 gfp |= GFP_DMA32;
257 #endif
258        return gfp;
259 }
260
261 static inline void *
262 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
263                 gfp_t gfp)
264 {
265         struct dma_map_ops *ops = get_dma_ops(dev);
266         void *memory;
267
268         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
269
270         if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
271                 return memory;
272
273         if (!dev) {
274                 dev = &x86_dma_fallback_dev;
275                 gfp |= GFP_DMA;
276         }
277
278         if (!is_device_dma_capable(dev))
279                 return NULL;
280
281         if (!ops->alloc_coherent)
282                 return NULL;
283
284         memory = ops->alloc_coherent(dev, size, dma_handle,
285                                      dma_alloc_coherent_gfp_flags(dev, gfp));
286         debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
287
288         return memory;
289 }
290
291 static inline void dma_free_coherent(struct device *dev, size_t size,
292                                      void *vaddr, dma_addr_t bus)
293 {
294         struct dma_map_ops *ops = get_dma_ops(dev);
295
296         WARN_ON(irqs_disabled());       /* for portability */
297
298         if (dma_release_from_coherent(dev, get_order(size), vaddr))
299                 return;
300
301         debug_dma_free_coherent(dev, size, vaddr, bus);
302         if (ops->free_coherent)
303                 ops->free_coherent(dev, size, vaddr, bus);
304 }
305
306 #endif