]> git.karo-electronics.de Git - karo-tx-linux.git/blob - include/asm-x86/dma-mapping_32.h
x86: move dma_unmap_single to common header
[karo-tx-linux.git] / include / asm-x86 / dma-mapping_32.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 #include <linux/mm.h>
5 #include <linux/scatterlist.h>
6
7 #include <asm/cache.h>
8 #include <asm/io.h>
9 #include <asm/bug.h>
10
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14 void *dma_alloc_coherent(struct device *dev, size_t size,
15                            dma_addr_t *dma_handle, gfp_t flag);
16
17 void dma_free_coherent(struct device *dev, size_t size,
18                          void *vaddr, dma_addr_t dma_handle);
19
20 static inline int
21 dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
22            enum dma_data_direction direction)
23 {
24         struct scatterlist *sg;
25         int i;
26
27         BUG_ON(!valid_dma_direction(direction));
28         WARN_ON(nents == 0 || sglist[0].length == 0);
29
30         for_each_sg(sglist, sg, nents, i) {
31                 BUG_ON(!sg_page(sg));
32
33                 sg->dma_address = sg_phys(sg);
34         }
35
36         flush_write_buffers();
37         return nents;
38 }
39
40 static inline dma_addr_t
41 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
42              size_t size, enum dma_data_direction direction)
43 {
44         BUG_ON(!valid_dma_direction(direction));
45         return page_to_phys(page) + offset;
46 }
47
48 static inline void
49 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
50                enum dma_data_direction direction)
51 {
52         BUG_ON(!valid_dma_direction(direction));
53 }
54
55
56 static inline void
57 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
58              enum dma_data_direction direction)
59 {
60         BUG_ON(!valid_dma_direction(direction));
61 }
62
63 static inline void
64 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
65                         enum dma_data_direction direction)
66 {
67 }
68
69 static inline void
70 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
71                         enum dma_data_direction direction)
72 {
73         flush_write_buffers();
74 }
75
76 static inline void
77 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
78                               unsigned long offset, size_t size,
79                               enum dma_data_direction direction)
80 {
81 }
82
83 static inline void
84 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
85                                  unsigned long offset, size_t size,
86                                  enum dma_data_direction direction)
87 {
88         flush_write_buffers();
89 }
90
91 static inline void
92 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
93                     enum dma_data_direction direction)
94 {
95 }
96
97 static inline void
98 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
99                     enum dma_data_direction direction)
100 {
101         flush_write_buffers();
102 }
103
104 static inline int
105 dma_mapping_error(dma_addr_t dma_addr)
106 {
107         return 0;
108 }
109
110 extern int forbid_dac;
111
112 static inline int
113 dma_supported(struct device *dev, u64 mask)
114 {
115         /*
116          * we fall back to GFP_DMA when the mask isn't all 1s,
117          * so we can't guarantee allocations that must be
118          * within a tighter range than GFP_DMA..
119          */
120         if(mask < 0x00ffffff)
121                 return 0;
122
123         /* Work around chipset bugs */
124         if (forbid_dac > 0 && mask > 0xffffffffULL)
125                 return 0;
126
127         return 1;
128 }
129
130 static inline int
131 dma_set_mask(struct device *dev, u64 mask)
132 {
133         if(!dev->dma_mask || !dma_supported(dev, mask))
134                 return -EIO;
135
136         *dev->dma_mask = mask;
137
138         return 0;
139 }
140
141 static inline int
142 dma_get_cache_alignment(void)
143 {
144         /* no easy way to get cache size on all x86, so return the
145          * maximum possible, to be safe */
146         return (1 << INTERNODE_CACHE_SHIFT);
147 }
148
149 #define dma_is_consistent(d, h) (1)
150
151 static inline void
152 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
153                enum dma_data_direction direction)
154 {
155         flush_write_buffers();
156 }
157
158 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
159 extern int
160 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
161                             dma_addr_t device_addr, size_t size, int flags);
162
163 extern void
164 dma_release_declared_memory(struct device *dev);
165
166 extern void *
167 dma_mark_declared_memory_occupied(struct device *dev,
168                                   dma_addr_t device_addr, size_t size);
169
170 #endif