2 * Copyright (C) 2004 IBM
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
7 #ifndef _ASM_DMA_MAPPING_H
8 #define _ASM_DMA_MAPPING_H
10 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
12 enum dma_data_direction direction)
14 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
17 dma_ops->unmap_single(dev, dma_address, size, direction);
20 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
21 int nents, enum dma_data_direction direction)
23 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
26 return dma_ops->map_sg(dev, sg, nents, direction);
29 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
31 enum dma_data_direction direction)
33 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
36 dma_ops->unmap_sg(dev, sg, nhwentries, direction);
41 * Available generic sets of operations
43 extern struct dma_mapping_ops dma_iommu_ops;
44 extern struct dma_mapping_ops dma_direct_ops;
46 extern unsigned long dma_direct_offset;
48 #else /* CONFIG_PPC64 */
50 #define dma_supported(dev, mask) (1)
52 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
54 if (!dev->dma_mask || !dma_supported(dev, mask))
57 *dev->dma_mask = dma_mask;
62 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
63 dma_addr_t * dma_handle,
66 #ifdef CONFIG_NOT_COHERENT_CACHE
67 return __dma_alloc_coherent(size, dma_handle, gfp);
70 /* ignore region specifiers */
71 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
73 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
76 ret = (void *)__get_free_pages(gfp, get_order(size));
80 *dma_handle = virt_to_bus(ret);
88 dma_free_coherent(struct device *dev, size_t size, void *vaddr,
89 dma_addr_t dma_handle)
91 #ifdef CONFIG_NOT_COHERENT_CACHE
92 __dma_free_coherent(size, vaddr);
94 free_pages((unsigned long)vaddr, get_order(size));
98 static inline dma_addr_t
99 dma_map_single(struct device *dev, void *ptr, size_t size,
100 enum dma_data_direction direction)
102 BUG_ON(direction == DMA_NONE);
104 __dma_sync(ptr, size, direction);
106 return virt_to_bus(ptr);
109 static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
111 enum dma_data_direction direction)
116 static inline dma_addr_t
117 dma_map_page(struct device *dev, struct page *page,
118 unsigned long offset, size_t size,
119 enum dma_data_direction direction)
121 BUG_ON(direction == DMA_NONE);
123 __dma_sync_page(page, offset, size, direction);
125 return page_to_bus(page) + offset;
128 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
130 enum dma_data_direction direction)
136 dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
137 enum dma_data_direction direction)
139 struct scatterlist *sg;
142 BUG_ON(direction == DMA_NONE);
144 for_each_sg(sgl, sg, nents, i) {
146 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
147 sg->dma_address = page_to_bus(sg->page) + sg->offset;
153 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
155 enum dma_data_direction direction)
157 /* We don't do anything here. */
160 #endif /* CONFIG_PPC64 */
162 static inline void dma_sync_single_for_cpu(struct device *dev,
163 dma_addr_t dma_handle, size_t size,
164 enum dma_data_direction direction)
166 BUG_ON(direction == DMA_NONE);
167 __dma_sync(bus_to_virt(dma_handle), size, direction);
170 static inline void dma_sync_single_for_device(struct device *dev,
171 dma_addr_t dma_handle, size_t size,
172 enum dma_data_direction direction)
174 BUG_ON(direction == DMA_NONE);
175 __dma_sync(bus_to_virt(dma_handle), size, direction);
178 static inline void dma_sync_sg_for_cpu(struct device *dev,
179 struct scatterlist *sgl, int nents,
180 enum dma_data_direction direction)
182 struct scatterlist *sg;
185 BUG_ON(direction == DMA_NONE);
187 for_each_sg(sgl, sg, nents, i)
188 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
191 static inline void dma_sync_sg_for_device(struct device *dev,
192 struct scatterlist *sgl, int nents,
193 enum dma_data_direction direction)
195 struct scatterlist *sg;
198 BUG_ON(direction == DMA_NONE);
200 for_each_sg(sgl, sg, nents, i)
201 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
204 static inline int dma_mapping_error(dma_addr_t dma_addr)
207 return (dma_addr == DMA_ERROR_CODE);
213 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
214 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
215 #ifdef CONFIG_NOT_COHERENT_CACHE
216 #define dma_is_consistent(d, h) (0)
218 #define dma_is_consistent(d, h) (1)
221 static inline int dma_get_cache_alignment(void)
224 /* no easy way to get cache size on all processors, so return
225 * the maximum possible, to be safe */
226 return (1 << INTERNODE_CACHE_SHIFT);
229 * Each processor family will define its own L1_CACHE_SHIFT,
230 * L1_CACHE_BYTES wraps to this, so this is always safe.
232 return L1_CACHE_BYTES;
236 static inline void dma_sync_single_range_for_cpu(struct device *dev,
237 dma_addr_t dma_handle, unsigned long offset, size_t size,
238 enum dma_data_direction direction)
240 /* just sync everything for now */
241 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
244 static inline void dma_sync_single_range_for_device(struct device *dev,
245 dma_addr_t dma_handle, unsigned long offset, size_t size,
246 enum dma_data_direction direction)
248 /* just sync everything for now */
249 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
252 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
253 enum dma_data_direction direction)
255 BUG_ON(direction == DMA_NONE);
256 __dma_sync(vaddr, size, (int)direction);
259 #endif /* __KERNEL__ */
260 #endif /* _ASM_DMA_MAPPING_H */