1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
5 #include <linux/scatterlist.h>
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14 void *dma_alloc_coherent(struct device *dev, size_t size,
15 dma_addr_t *dma_handle, gfp_t flag);
17 void dma_free_coherent(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
21 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
22 enum dma_data_direction direction)
24 BUG_ON(!valid_dma_direction(direction));
28 dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
29 enum dma_data_direction direction)
31 struct scatterlist *sg;
34 BUG_ON(!valid_dma_direction(direction));
35 WARN_ON(nents == 0 || sglist[0].length == 0);
37 for_each_sg(sglist, sg, nents, i) {
40 sg->dma_address = sg_phys(sg);
43 flush_write_buffers();
47 static inline dma_addr_t
48 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
49 size_t size, enum dma_data_direction direction)
51 BUG_ON(!valid_dma_direction(direction));
52 return page_to_phys(page) + offset;
56 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
57 enum dma_data_direction direction)
59 BUG_ON(!valid_dma_direction(direction));
64 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
65 enum dma_data_direction direction)
67 BUG_ON(!valid_dma_direction(direction));
71 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
72 enum dma_data_direction direction)
77 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
78 enum dma_data_direction direction)
80 flush_write_buffers();
84 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
85 unsigned long offset, size_t size,
86 enum dma_data_direction direction)
91 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
92 unsigned long offset, size_t size,
93 enum dma_data_direction direction)
95 flush_write_buffers();
99 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
100 enum dma_data_direction direction)
105 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
106 enum dma_data_direction direction)
108 flush_write_buffers();
112 dma_mapping_error(dma_addr_t dma_addr)
117 extern int forbid_dac;
120 dma_supported(struct device *dev, u64 mask)
123 * we fall back to GFP_DMA when the mask isn't all 1s,
124 * so we can't guarantee allocations that must be
125 * within a tighter range than GFP_DMA..
127 if(mask < 0x00ffffff)
130 /* Work around chipset bugs */
131 if (forbid_dac > 0 && mask > 0xffffffffULL)
138 dma_set_mask(struct device *dev, u64 mask)
140 if(!dev->dma_mask || !dma_supported(dev, mask))
143 *dev->dma_mask = mask;
149 dma_get_cache_alignment(void)
151 /* no easy way to get cache size on all x86, so return the
152 * maximum possible, to be safe */
153 return (1 << INTERNODE_CACHE_SHIFT);
156 #define dma_is_consistent(d, h) (1)
159 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
160 enum dma_data_direction direction)
162 flush_write_buffers();
165 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
167 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
168 dma_addr_t device_addr, size_t size, int flags);
171 dma_release_declared_memory(struct device *dev);
174 dma_mark_declared_memory_occupied(struct device *dev,
175 dma_addr_t device_addr, size_t size);