1 #ifndef _X8664_DMA_MAPPING_H
2 #define _X8664_DMA_MAPPING_H 1
4 extern dma_addr_t bad_dma_address;
5 extern const struct dma_mapping_ops* dma_ops;
6 extern int iommu_merge;
8 static inline int dma_mapping_error(dma_addr_t dma_addr)
10 if (dma_ops->mapping_error)
11 return dma_ops->mapping_error(dma_addr);
13 return (dma_addr == bad_dma_address);
16 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
17 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
19 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
20 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
22 extern void *dma_alloc_coherent(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
25 dma_addr_t dma_handle);
27 static inline dma_addr_t
28 dma_map_single(struct device *hwdev, void *ptr, size_t size,
31 BUG_ON(!valid_dma_direction(direction));
32 return dma_ops->map_single(hwdev, ptr, size, direction);
36 dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
39 BUG_ON(!valid_dma_direction(direction));
40 dma_ops->unmap_single(dev, addr, size, direction);
43 #define dma_map_page(dev,page,offset,size,dir) \
44 dma_map_single((dev), page_address(page)+(offset), (size), (dir))
46 #define dma_unmap_page dma_unmap_single
49 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
50 size_t size, int direction)
52 BUG_ON(!valid_dma_direction(direction));
53 if (dma_ops->sync_single_for_cpu)
54 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
56 flush_write_buffers();
60 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
61 size_t size, int direction)
63 BUG_ON(!valid_dma_direction(direction));
64 if (dma_ops->sync_single_for_device)
65 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
67 flush_write_buffers();
71 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
72 unsigned long offset, size_t size, int direction)
74 BUG_ON(!valid_dma_direction(direction));
75 if (dma_ops->sync_single_range_for_cpu) {
76 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
79 flush_write_buffers();
83 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
84 unsigned long offset, size_t size, int direction)
86 BUG_ON(!valid_dma_direction(direction));
87 if (dma_ops->sync_single_range_for_device)
88 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
89 offset, size, direction);
91 flush_write_buffers();
95 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
96 int nelems, int direction)
98 BUG_ON(!valid_dma_direction(direction));
99 if (dma_ops->sync_sg_for_cpu)
100 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
101 flush_write_buffers();
105 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
106 int nelems, int direction)
108 BUG_ON(!valid_dma_direction(direction));
109 if (dma_ops->sync_sg_for_device) {
110 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
113 flush_write_buffers();
117 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
119 BUG_ON(!valid_dma_direction(direction));
120 return dma_ops->map_sg(hwdev, sg, nents, direction);
124 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
127 BUG_ON(!valid_dma_direction(direction));
128 dma_ops->unmap_sg(hwdev, sg, nents, direction);
131 extern int dma_supported(struct device *hwdev, u64 mask);
133 /* same for gart, swiotlb, and nommu */
134 static inline int dma_get_cache_alignment(void)
136 return boot_cpu_data.x86_clflush_size;
139 #define dma_is_consistent(d, h) 1
141 extern int dma_set_mask(struct device *dev, u64 mask);
144 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
145 enum dma_data_direction dir)
147 flush_write_buffers();
150 extern struct device fallback_dev;
151 extern int panic_on_overflow;
153 #endif /* _X8664_DMA_MAPPING_H */