]> git.karo-electronics.de Git - mv-sheeva.git/blob - include/asm-x86/dma-mapping_64.h
x86: implement dma_map_single through dma_ops
[mv-sheeva.git] / include / asm-x86 / dma-mapping_64.h
1 #ifndef _X8664_DMA_MAPPING_H
2 #define _X8664_DMA_MAPPING_H 1
3
4 extern dma_addr_t bad_dma_address;
5 extern int iommu_merge;
6
7 static inline int dma_mapping_error(dma_addr_t dma_addr)
8 {
9         if (dma_ops->mapping_error)
10                 return dma_ops->mapping_error(dma_addr);
11
12         return (dma_addr == bad_dma_address);
13 }
14
15 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
16 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
17
18 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
19 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
20
21 extern void *dma_alloc_coherent(struct device *dev, size_t size,
22                                 dma_addr_t *dma_handle, gfp_t gfp);
23 extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
24                               dma_addr_t dma_handle);
25
26 static inline void
27 dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
28                  int direction)
29 {
30         BUG_ON(!valid_dma_direction(direction));
31         dma_ops->unmap_single(dev, addr, size, direction);
32 }
33
34 #define dma_map_page(dev,page,offset,size,dir) \
35         dma_map_single((dev), page_address(page)+(offset), (size), (dir))
36
37 #define dma_unmap_page dma_unmap_single
38
39 static inline void
40 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
41                         size_t size, int direction)
42 {
43         BUG_ON(!valid_dma_direction(direction));
44         if (dma_ops->sync_single_for_cpu)
45                 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
46                                              direction);
47         flush_write_buffers();
48 }
49
50 static inline void
51 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
52                            size_t size, int direction)
53 {
54         BUG_ON(!valid_dma_direction(direction));
55         if (dma_ops->sync_single_for_device)
56                 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
57                                                 direction);
58         flush_write_buffers();
59 }
60
61 static inline void
62 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
63                               unsigned long offset, size_t size, int direction)
64 {
65         BUG_ON(!valid_dma_direction(direction));
66         if (dma_ops->sync_single_range_for_cpu) {
67                 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
68         }
69
70         flush_write_buffers();
71 }
72
73 static inline void
74 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
75                                  unsigned long offset, size_t size, int direction)
76 {
77         BUG_ON(!valid_dma_direction(direction));
78         if (dma_ops->sync_single_range_for_device)
79                 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
80                                                       offset, size, direction);
81
82         flush_write_buffers();
83 }
84
85 static inline void
86 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
87                     int nelems, int direction)
88 {
89         BUG_ON(!valid_dma_direction(direction));
90         if (dma_ops->sync_sg_for_cpu)
91                 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
92         flush_write_buffers();
93 }
94
95 static inline void
96 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
97                        int nelems, int direction)
98 {
99         BUG_ON(!valid_dma_direction(direction));
100         if (dma_ops->sync_sg_for_device) {
101                 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
102         }
103
104         flush_write_buffers();
105 }
106
107 static inline int
108 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
109 {
110         BUG_ON(!valid_dma_direction(direction));
111         return dma_ops->map_sg(hwdev, sg, nents, direction);
112 }
113
114 static inline void
115 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
116              int direction)
117 {
118         BUG_ON(!valid_dma_direction(direction));
119         dma_ops->unmap_sg(hwdev, sg, nents, direction);
120 }
121
122 extern int dma_supported(struct device *hwdev, u64 mask);
123
124 /* same for gart, swiotlb, and nommu */
125 static inline int dma_get_cache_alignment(void)
126 {
127         return boot_cpu_data.x86_clflush_size;
128 }
129
130 #define dma_is_consistent(d, h) 1
131
132 extern int dma_set_mask(struct device *dev, u64 mask);
133
134 static inline void
135 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
136         enum dma_data_direction dir)
137 {
138         flush_write_buffers();
139 }
140
141 extern struct device fallback_dev;
142 extern int panic_on_overflow;
143
144 #endif /* _X8664_DMA_MAPPING_H */