2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses.
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/lmb.h>
13 #include <asm/abs_addr.h>
16 * Generic direct DMA implementation
18 * This implementation supports a per-device offset that can be applied if
19 * the address at which memory is visible to devices is not 0. Platform code
20 * can set archdata.dma_data to an unsigned long holding the offset. By
21 * default the offset is PCI_DRAM_OFFSET.
24 unsigned long get_dma_direct_offset(struct device *dev)
27 return (unsigned long)dev->archdata.dma_data;
29 return PCI_DRAM_OFFSET;
32 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
33 dma_addr_t *dma_handle, gfp_t flag)
36 #ifdef CONFIG_NOT_COHERENT_CACHE
37 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
40 *dma_handle += get_dma_direct_offset(dev);
44 int node = dev_to_node(dev);
46 /* ignore region specifiers */
47 flag &= ~(__GFP_HIGHMEM);
49 page = alloc_pages_node(node, flag, get_order(size));
52 ret = page_address(page);
54 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
60 void dma_direct_free_coherent(struct device *dev, size_t size,
61 void *vaddr, dma_addr_t dma_handle)
63 #ifdef CONFIG_NOT_COHERENT_CACHE
64 __dma_free_coherent(size, vaddr);
66 free_pages((unsigned long)vaddr, get_order(size));
70 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
71 int nents, enum dma_data_direction direction,
72 struct dma_attrs *attrs)
74 struct scatterlist *sg;
77 for_each_sg(sgl, sg, nents, i) {
78 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
79 sg->dma_length = sg->length;
80 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
86 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
87 int nents, enum dma_data_direction direction,
88 struct dma_attrs *attrs)
92 static int dma_direct_dma_supported(struct device *dev, u64 mask)
95 /* Could be improved so platforms can set the limit in case
96 * they have limited DMA windows
98 return mask >= (lmb_end_of_DRAM() - 1);
104 static inline dma_addr_t dma_direct_map_page(struct device *dev,
106 unsigned long offset,
108 enum dma_data_direction dir,
109 struct dma_attrs *attrs)
111 BUG_ON(dir == DMA_NONE);
112 __dma_sync_page(page, offset, size, dir);
113 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
116 static inline void dma_direct_unmap_page(struct device *dev,
117 dma_addr_t dma_address,
119 enum dma_data_direction direction,
120 struct dma_attrs *attrs)
124 #ifdef CONFIG_NOT_COHERENT_CACHE
125 static inline void dma_direct_sync_sg(struct device *dev,
126 struct scatterlist *sgl, int nents,
127 enum dma_data_direction direction)
129 struct scatterlist *sg;
132 for_each_sg(sgl, sg, nents, i)
133 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
136 static inline void dma_direct_sync_single_range(struct device *dev,
137 dma_addr_t dma_handle, unsigned long offset, size_t size,
138 enum dma_data_direction direction)
140 __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
144 struct dma_map_ops dma_direct_ops = {
145 .alloc_coherent = dma_direct_alloc_coherent,
146 .free_coherent = dma_direct_free_coherent,
147 .map_sg = dma_direct_map_sg,
148 .unmap_sg = dma_direct_unmap_sg,
149 .dma_supported = dma_direct_dma_supported,
150 .map_page = dma_direct_map_page,
151 .unmap_page = dma_direct_unmap_page,
152 #ifdef CONFIG_NOT_COHERENT_CACHE
153 .sync_single_range_for_cpu = dma_direct_sync_single_range,
154 .sync_single_range_for_device = dma_direct_sync_single_range,
155 .sync_sg_for_cpu = dma_direct_sync_sg,
156 .sync_sg_for_device = dma_direct_sync_sg,
159 EXPORT_SYMBOL(dma_direct_ops);
161 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
163 static int __init dma_init(void)
165 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
169 fs_initcall(dma_init);