1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
5 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <linux/dma-attrs.h>
14 #include <asm/swiotlb.h>
15 #include <asm-generic/dma-coherent.h>
17 extern dma_addr_t bad_dma_address;
18 extern int iommu_merge;
19 extern struct device x86_dma_fallback_dev;
20 extern int panic_on_overflow;
22 extern struct dma_map_ops *dma_ops;
24 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
29 if (unlikely(!dev) || !dev->archdata.dma_ops)
32 return dev->archdata.dma_ops;
36 /* Make sure we keep the same behaviour */
37 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
39 struct dma_map_ops *ops = get_dma_ops(dev);
40 if (ops->mapping_error)
41 return ops->mapping_error(dev, dma_addr);
43 return (dma_addr == bad_dma_address);
46 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
47 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
48 #define dma_is_consistent(d, h) (1)
50 extern int dma_supported(struct device *hwdev, u64 mask);
51 extern int dma_set_mask(struct device *dev, u64 mask);
53 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
54 dma_addr_t *dma_addr, gfp_t flag);
56 static inline dma_addr_t
57 dma_map_single(struct device *hwdev, void *ptr, size_t size,
58 enum dma_data_direction dir)
60 struct dma_map_ops *ops = get_dma_ops(hwdev);
63 BUG_ON(!valid_dma_direction(dir));
64 kmemcheck_mark_initialized(ptr, size);
65 addr = ops->map_page(hwdev, virt_to_page(ptr),
66 (unsigned long)ptr & ~PAGE_MASK, size,
68 debug_dma_map_page(hwdev, virt_to_page(ptr),
69 (unsigned long)ptr & ~PAGE_MASK, size,
75 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
76 enum dma_data_direction dir)
78 struct dma_map_ops *ops = get_dma_ops(dev);
80 BUG_ON(!valid_dma_direction(dir));
82 ops->unmap_page(dev, addr, size, dir, NULL);
83 debug_dma_unmap_page(dev, addr, size, dir, true);
87 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
88 int nents, enum dma_data_direction dir)
90 struct dma_map_ops *ops = get_dma_ops(hwdev);
93 BUG_ON(!valid_dma_direction(dir));
94 ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
95 debug_dma_map_sg(hwdev, sg, nents, ents, dir);
101 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
102 enum dma_data_direction dir)
104 struct dma_map_ops *ops = get_dma_ops(hwdev);
106 BUG_ON(!valid_dma_direction(dir));
107 debug_dma_unmap_sg(hwdev, sg, nents, dir);
109 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
113 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
114 size_t size, enum dma_data_direction dir)
116 struct dma_map_ops *ops = get_dma_ops(hwdev);
118 BUG_ON(!valid_dma_direction(dir));
119 if (ops->sync_single_for_cpu)
120 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
121 debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
122 flush_write_buffers();
126 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
127 size_t size, enum dma_data_direction dir)
129 struct dma_map_ops *ops = get_dma_ops(hwdev);
131 BUG_ON(!valid_dma_direction(dir));
132 if (ops->sync_single_for_device)
133 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
134 debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
135 flush_write_buffers();
139 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
140 unsigned long offset, size_t size,
141 enum dma_data_direction dir)
143 struct dma_map_ops *ops = get_dma_ops(hwdev);
145 BUG_ON(!valid_dma_direction(dir));
146 if (ops->sync_single_range_for_cpu)
147 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
149 debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
151 flush_write_buffers();
155 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
156 unsigned long offset, size_t size,
157 enum dma_data_direction dir)
159 struct dma_map_ops *ops = get_dma_ops(hwdev);
161 BUG_ON(!valid_dma_direction(dir));
162 if (ops->sync_single_range_for_device)
163 ops->sync_single_range_for_device(hwdev, dma_handle,
165 debug_dma_sync_single_range_for_device(hwdev, dma_handle,
167 flush_write_buffers();
171 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
172 int nelems, enum dma_data_direction dir)
174 struct dma_map_ops *ops = get_dma_ops(hwdev);
176 BUG_ON(!valid_dma_direction(dir));
177 if (ops->sync_sg_for_cpu)
178 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
179 debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
180 flush_write_buffers();
184 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
185 int nelems, enum dma_data_direction dir)
187 struct dma_map_ops *ops = get_dma_ops(hwdev);
189 BUG_ON(!valid_dma_direction(dir));
190 if (ops->sync_sg_for_device)
191 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
192 debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
194 flush_write_buffers();
197 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
198 size_t offset, size_t size,
199 enum dma_data_direction dir)
201 struct dma_map_ops *ops = get_dma_ops(dev);
204 BUG_ON(!valid_dma_direction(dir));
205 addr = ops->map_page(dev, page, offset, size, dir, NULL);
206 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
211 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
212 size_t size, enum dma_data_direction dir)
214 struct dma_map_ops *ops = get_dma_ops(dev);
216 BUG_ON(!valid_dma_direction(dir));
218 ops->unmap_page(dev, addr, size, dir, NULL);
219 debug_dma_unmap_page(dev, addr, size, dir, false);
223 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
224 enum dma_data_direction dir)
226 flush_write_buffers();
229 static inline int dma_get_cache_alignment(void)
231 /* no easy way to get cache size on all x86, so return the
232 * maximum possible, to be safe */
233 return boot_cpu_data.x86_clflush_size;
236 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
239 unsigned long dma_mask = 0;
241 dma_mask = dev->coherent_dma_mask;
243 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
248 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
250 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
252 if (dma_mask <= DMA_BIT_MASK(24))
255 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
262 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
265 struct dma_map_ops *ops = get_dma_ops(dev);
268 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
270 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
274 dev = &x86_dma_fallback_dev;
278 if (!is_device_dma_capable(dev))
281 if (!ops->alloc_coherent)
284 memory = ops->alloc_coherent(dev, size, dma_handle,
285 dma_alloc_coherent_gfp_flags(dev, gfp));
286 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
291 static inline void dma_free_coherent(struct device *dev, size_t size,
292 void *vaddr, dma_addr_t bus)
294 struct dma_map_ops *ops = get_dma_ops(dev);
296 WARN_ON(irqs_disabled()); /* for portability */
298 if (dma_release_from_coherent(dev, get_order(size), vaddr))
301 debug_dma_free_coherent(dev, size, vaddr, bus);
302 if (ops->free_coherent)
303 ops->free_coherent(dev, size, vaddr, bus);