1 #ifndef _X8664_DMA_MAPPING_H
2 #define _X8664_DMA_MAPPING_H 1
4 extern dma_addr_t bad_dma_address;
5 extern int iommu_merge;
7 static inline int dma_mapping_error(dma_addr_t dma_addr)
9 if (dma_ops->mapping_error)
10 return dma_ops->mapping_error(dma_addr);
12 return (dma_addr == bad_dma_address);
15 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
16 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
18 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
19 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
21 extern void *dma_alloc_coherent(struct device *dev, size_t size,
22 dma_addr_t *dma_handle, gfp_t gfp);
23 extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
24 dma_addr_t dma_handle);
26 #define dma_map_page(dev,page,offset,size,dir) \
27 dma_map_single((dev), page_address(page)+(offset), (size), (dir))
29 #define dma_unmap_page dma_unmap_single
32 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
33 size_t size, int direction)
35 BUG_ON(!valid_dma_direction(direction));
36 if (dma_ops->sync_single_for_cpu)
37 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
39 flush_write_buffers();
43 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
44 size_t size, int direction)
46 BUG_ON(!valid_dma_direction(direction));
47 if (dma_ops->sync_single_for_device)
48 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
50 flush_write_buffers();
54 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
55 unsigned long offset, size_t size, int direction)
57 BUG_ON(!valid_dma_direction(direction));
58 if (dma_ops->sync_single_range_for_cpu) {
59 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
62 flush_write_buffers();
66 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
67 unsigned long offset, size_t size, int direction)
69 BUG_ON(!valid_dma_direction(direction));
70 if (dma_ops->sync_single_range_for_device)
71 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
72 offset, size, direction);
74 flush_write_buffers();
78 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
79 int nelems, int direction)
81 BUG_ON(!valid_dma_direction(direction));
82 if (dma_ops->sync_sg_for_cpu)
83 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
84 flush_write_buffers();
88 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
89 int nelems, int direction)
91 BUG_ON(!valid_dma_direction(direction));
92 if (dma_ops->sync_sg_for_device) {
93 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
96 flush_write_buffers();
100 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
102 BUG_ON(!valid_dma_direction(direction));
103 return dma_ops->map_sg(hwdev, sg, nents, direction);
107 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
110 BUG_ON(!valid_dma_direction(direction));
111 dma_ops->unmap_sg(hwdev, sg, nents, direction);
114 extern int dma_supported(struct device *hwdev, u64 mask);
116 /* same for gart, swiotlb, and nommu */
117 static inline int dma_get_cache_alignment(void)
119 return boot_cpu_data.x86_clflush_size;
122 #define dma_is_consistent(d, h) 1
124 extern int dma_set_mask(struct device *dev, u64 mask);
127 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
128 enum dma_data_direction dir)
130 flush_write_buffers();
133 extern struct device fallback_dev;
134 extern int panic_on_overflow;
136 #endif /* _X8664_DMA_MAPPING_H */