1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
63 #define DMA_ATTR_NO_WARN (1UL << 8)
66 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
67 * accessible at an elevated privilege level (and ideally inaccessible or
68 * at least read-only at lesser-privileged levels).
70 #define DMA_ATTR_PRIVILEGED (1UL << 9)
73 * A dma_addr_t can hold any valid DMA or bus address for the platform.
74 * It can be given to a device to use as a DMA source or target. A CPU cannot
75 * reference a dma_addr_t directly because there may be translation between
76 * its physical address space and the bus address space.
79 void* (*alloc)(struct device *dev, size_t size,
80 dma_addr_t *dma_handle, gfp_t gfp,
82 void (*free)(struct device *dev, size_t size,
83 void *vaddr, dma_addr_t dma_handle,
85 int (*mmap)(struct device *, struct vm_area_struct *,
86 void *, dma_addr_t, size_t,
89 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
90 dma_addr_t, size_t, unsigned long attrs);
92 dma_addr_t (*map_page)(struct device *dev, struct page *page,
93 unsigned long offset, size_t size,
94 enum dma_data_direction dir,
96 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
97 size_t size, enum dma_data_direction dir,
100 * map_sg returns 0 on error and a value > 0 on success.
101 * It should never return a value < 0.
103 int (*map_sg)(struct device *dev, struct scatterlist *sg,
104 int nents, enum dma_data_direction dir,
105 unsigned long attrs);
106 void (*unmap_sg)(struct device *dev,
107 struct scatterlist *sg, int nents,
108 enum dma_data_direction dir,
109 unsigned long attrs);
110 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
111 size_t size, enum dma_data_direction dir,
112 unsigned long attrs);
113 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
114 size_t size, enum dma_data_direction dir,
115 unsigned long attrs);
116 void (*sync_single_for_cpu)(struct device *dev,
117 dma_addr_t dma_handle, size_t size,
118 enum dma_data_direction dir);
119 void (*sync_single_for_device)(struct device *dev,
120 dma_addr_t dma_handle, size_t size,
121 enum dma_data_direction dir);
122 void (*sync_sg_for_cpu)(struct device *dev,
123 struct scatterlist *sg, int nents,
124 enum dma_data_direction dir);
125 void (*sync_sg_for_device)(struct device *dev,
126 struct scatterlist *sg, int nents,
127 enum dma_data_direction dir);
128 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
129 int (*dma_supported)(struct device *dev, u64 mask);
130 int (*set_dma_mask)(struct device *dev, u64 mask);
131 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
132 u64 (*get_required_mask)(struct device *dev);
137 extern struct dma_map_ops dma_noop_ops;
139 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
141 #define DMA_MASK_NONE 0x0ULL
143 static inline int valid_dma_direction(int dma_direction)
145 return ((dma_direction == DMA_BIDIRECTIONAL) ||
146 (dma_direction == DMA_TO_DEVICE) ||
147 (dma_direction == DMA_FROM_DEVICE));
150 static inline int is_device_dma_capable(struct device *dev)
152 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
155 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
157 * These three functions are only for dma allocator.
158 * Don't use them in device drivers.
160 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
161 dma_addr_t *dma_handle, void **ret);
162 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
164 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
165 void *cpu_addr, size_t size, int *ret);
167 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
168 #define dma_release_from_coherent(dev, order, vaddr) (0)
169 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
170 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
172 #ifdef CONFIG_HAS_DMA
173 #include <asm/dma-mapping.h>
176 * Define the dma api to allow compilation but not linking of
177 * dma dependent code. Code that depends on the dma-mapping
178 * API needs to set 'depends on HAS_DMA' in its Kconfig
180 extern struct dma_map_ops bad_dma_ops;
181 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
187 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
189 enum dma_data_direction dir,
192 struct dma_map_ops *ops = get_dma_ops(dev);
195 kmemcheck_mark_initialized(ptr, size);
196 BUG_ON(!valid_dma_direction(dir));
197 addr = ops->map_page(dev, virt_to_page(ptr),
198 offset_in_page(ptr), size,
200 debug_dma_map_page(dev, virt_to_page(ptr),
201 offset_in_page(ptr), size,
206 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
208 enum dma_data_direction dir,
211 struct dma_map_ops *ops = get_dma_ops(dev);
213 BUG_ON(!valid_dma_direction(dir));
215 ops->unmap_page(dev, addr, size, dir, attrs);
216 debug_dma_unmap_page(dev, addr, size, dir, true);
220 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
221 * It should never return a value < 0.
223 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
224 int nents, enum dma_data_direction dir,
227 struct dma_map_ops *ops = get_dma_ops(dev);
229 struct scatterlist *s;
231 for_each_sg(sg, s, nents, i)
232 kmemcheck_mark_initialized(sg_virt(s), s->length);
233 BUG_ON(!valid_dma_direction(dir));
234 ents = ops->map_sg(dev, sg, nents, dir, attrs);
236 debug_dma_map_sg(dev, sg, nents, ents, dir);
241 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
242 int nents, enum dma_data_direction dir,
245 struct dma_map_ops *ops = get_dma_ops(dev);
247 BUG_ON(!valid_dma_direction(dir));
248 debug_dma_unmap_sg(dev, sg, nents, dir);
250 ops->unmap_sg(dev, sg, nents, dir, attrs);
253 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
255 size_t offset, size_t size,
256 enum dma_data_direction dir,
259 struct dma_map_ops *ops = get_dma_ops(dev);
262 kmemcheck_mark_initialized(page_address(page) + offset, size);
263 BUG_ON(!valid_dma_direction(dir));
264 addr = ops->map_page(dev, page, offset, size, dir, attrs);
265 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
270 static inline void dma_unmap_page_attrs(struct device *dev,
271 dma_addr_t addr, size_t size,
272 enum dma_data_direction dir,
275 struct dma_map_ops *ops = get_dma_ops(dev);
277 BUG_ON(!valid_dma_direction(dir));
279 ops->unmap_page(dev, addr, size, dir, attrs);
280 debug_dma_unmap_page(dev, addr, size, dir, false);
283 static inline dma_addr_t dma_map_resource(struct device *dev,
284 phys_addr_t phys_addr,
286 enum dma_data_direction dir,
289 struct dma_map_ops *ops = get_dma_ops(dev);
292 BUG_ON(!valid_dma_direction(dir));
294 /* Don't allow RAM to be mapped */
295 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
298 if (ops->map_resource)
299 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
301 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
306 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
307 size_t size, enum dma_data_direction dir,
310 struct dma_map_ops *ops = get_dma_ops(dev);
312 BUG_ON(!valid_dma_direction(dir));
313 if (ops->unmap_resource)
314 ops->unmap_resource(dev, addr, size, dir, attrs);
315 debug_dma_unmap_resource(dev, addr, size, dir);
318 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
320 enum dma_data_direction dir)
322 struct dma_map_ops *ops = get_dma_ops(dev);
324 BUG_ON(!valid_dma_direction(dir));
325 if (ops->sync_single_for_cpu)
326 ops->sync_single_for_cpu(dev, addr, size, dir);
327 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
330 static inline void dma_sync_single_for_device(struct device *dev,
331 dma_addr_t addr, size_t size,
332 enum dma_data_direction dir)
334 struct dma_map_ops *ops = get_dma_ops(dev);
336 BUG_ON(!valid_dma_direction(dir));
337 if (ops->sync_single_for_device)
338 ops->sync_single_for_device(dev, addr, size, dir);
339 debug_dma_sync_single_for_device(dev, addr, size, dir);
342 static inline void dma_sync_single_range_for_cpu(struct device *dev,
344 unsigned long offset,
346 enum dma_data_direction dir)
348 const struct dma_map_ops *ops = get_dma_ops(dev);
350 BUG_ON(!valid_dma_direction(dir));
351 if (ops->sync_single_for_cpu)
352 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
353 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
356 static inline void dma_sync_single_range_for_device(struct device *dev,
358 unsigned long offset,
360 enum dma_data_direction dir)
362 const struct dma_map_ops *ops = get_dma_ops(dev);
364 BUG_ON(!valid_dma_direction(dir));
365 if (ops->sync_single_for_device)
366 ops->sync_single_for_device(dev, addr + offset, size, dir);
367 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
371 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
372 int nelems, enum dma_data_direction dir)
374 struct dma_map_ops *ops = get_dma_ops(dev);
376 BUG_ON(!valid_dma_direction(dir));
377 if (ops->sync_sg_for_cpu)
378 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
379 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
383 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
384 int nelems, enum dma_data_direction dir)
386 struct dma_map_ops *ops = get_dma_ops(dev);
388 BUG_ON(!valid_dma_direction(dir));
389 if (ops->sync_sg_for_device)
390 ops->sync_sg_for_device(dev, sg, nelems, dir);
391 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
395 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
396 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
397 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
398 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
399 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
400 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
402 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
403 void *cpu_addr, dma_addr_t dma_addr, size_t size);
405 void *dma_common_contiguous_remap(struct page *page, size_t size,
406 unsigned long vm_flags,
407 pgprot_t prot, const void *caller);
409 void *dma_common_pages_remap(struct page **pages, size_t size,
410 unsigned long vm_flags, pgprot_t prot,
412 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
415 * dma_mmap_attrs - map a coherent DMA allocation into user space
416 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
417 * @vma: vm_area_struct describing requested user mapping
418 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
419 * @handle: device-view address returned from dma_alloc_attrs
420 * @size: size of memory originally requested in dma_alloc_attrs
421 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
423 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
424 * into user space. The coherent DMA buffer must not be freed by the
425 * driver until the user space mapping has been released.
428 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
429 dma_addr_t dma_addr, size_t size, unsigned long attrs)
431 struct dma_map_ops *ops = get_dma_ops(dev);
434 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
435 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
438 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
441 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
442 void *cpu_addr, dma_addr_t dma_addr, size_t size);
445 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
446 dma_addr_t dma_addr, size_t size,
449 struct dma_map_ops *ops = get_dma_ops(dev);
451 if (ops->get_sgtable)
452 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
454 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
457 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
459 #ifndef arch_dma_alloc_attrs
460 #define arch_dma_alloc_attrs(dev, flag) (true)
463 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
464 dma_addr_t *dma_handle, gfp_t flag,
467 struct dma_map_ops *ops = get_dma_ops(dev);
472 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
475 if (!arch_dma_alloc_attrs(&dev, &flag))
480 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
481 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
485 static inline void dma_free_attrs(struct device *dev, size_t size,
486 void *cpu_addr, dma_addr_t dma_handle,
489 struct dma_map_ops *ops = get_dma_ops(dev);
492 WARN_ON(irqs_disabled());
494 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
497 if (!ops->free || !cpu_addr)
500 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
501 ops->free(dev, size, cpu_addr, dma_handle, attrs);
504 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
505 dma_addr_t *dma_handle, gfp_t flag)
507 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
510 static inline void dma_free_coherent(struct device *dev, size_t size,
511 void *cpu_addr, dma_addr_t dma_handle)
513 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
516 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
517 dma_addr_t *dma_handle, gfp_t gfp)
519 return dma_alloc_attrs(dev, size, dma_handle, gfp,
520 DMA_ATTR_NON_CONSISTENT);
523 static inline void dma_free_noncoherent(struct device *dev, size_t size,
524 void *cpu_addr, dma_addr_t dma_handle)
526 dma_free_attrs(dev, size, cpu_addr, dma_handle,
527 DMA_ATTR_NON_CONSISTENT);
530 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
532 debug_dma_mapping_error(dev, dma_addr);
534 if (get_dma_ops(dev)->mapping_error)
535 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
537 #ifdef DMA_ERROR_CODE
538 return dma_addr == DMA_ERROR_CODE;
544 #ifndef HAVE_ARCH_DMA_SUPPORTED
545 static inline int dma_supported(struct device *dev, u64 mask)
547 struct dma_map_ops *ops = get_dma_ops(dev);
551 if (!ops->dma_supported)
553 return ops->dma_supported(dev, mask);
557 #ifndef HAVE_ARCH_DMA_SET_MASK
558 static inline int dma_set_mask(struct device *dev, u64 mask)
560 struct dma_map_ops *ops = get_dma_ops(dev);
562 if (ops->set_dma_mask)
563 return ops->set_dma_mask(dev, mask);
565 if (!dev->dma_mask || !dma_supported(dev, mask))
567 *dev->dma_mask = mask;
572 static inline u64 dma_get_mask(struct device *dev)
574 if (dev && dev->dma_mask && *dev->dma_mask)
575 return *dev->dma_mask;
576 return DMA_BIT_MASK(32);
579 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
580 int dma_set_coherent_mask(struct device *dev, u64 mask);
582 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
584 if (!dma_supported(dev, mask))
586 dev->coherent_dma_mask = mask;
592 * Set both the DMA mask and the coherent DMA mask to the same thing.
593 * Note that we don't check the return value from dma_set_coherent_mask()
594 * as the DMA API guarantees that the coherent DMA mask can be set to
595 * the same or smaller than the streaming DMA mask.
597 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
599 int rc = dma_set_mask(dev, mask);
601 dma_set_coherent_mask(dev, mask);
606 * Similar to the above, except it deals with the case where the device
607 * does not have dev->dma_mask appropriately setup.
609 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
611 dev->dma_mask = &dev->coherent_dma_mask;
612 return dma_set_mask_and_coherent(dev, mask);
615 extern u64 dma_get_required_mask(struct device *dev);
617 #ifndef arch_setup_dma_ops
618 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
619 u64 size, const struct iommu_ops *iommu,
623 #ifndef arch_teardown_dma_ops
624 static inline void arch_teardown_dma_ops(struct device *dev) { }
627 static inline unsigned int dma_get_max_seg_size(struct device *dev)
629 if (dev->dma_parms && dev->dma_parms->max_segment_size)
630 return dev->dma_parms->max_segment_size;
634 static inline unsigned int dma_set_max_seg_size(struct device *dev,
637 if (dev->dma_parms) {
638 dev->dma_parms->max_segment_size = size;
644 static inline unsigned long dma_get_seg_boundary(struct device *dev)
646 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
647 return dev->dma_parms->segment_boundary_mask;
648 return DMA_BIT_MASK(32);
651 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
653 if (dev->dma_parms) {
654 dev->dma_parms->segment_boundary_mask = mask;
661 static inline unsigned long dma_max_pfn(struct device *dev)
663 return *dev->dma_mask >> PAGE_SHIFT;
667 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
668 dma_addr_t *dma_handle, gfp_t flag)
670 void *ret = dma_alloc_coherent(dev, size, dma_handle,
675 #ifdef CONFIG_HAS_DMA
676 static inline int dma_get_cache_alignment(void)
678 #ifdef ARCH_DMA_MINALIGN
679 return ARCH_DMA_MINALIGN;
685 /* flags for the coherent memory api */
686 #define DMA_MEMORY_MAP 0x01
687 #define DMA_MEMORY_IO 0x02
688 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
689 #define DMA_MEMORY_EXCLUSIVE 0x08
691 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
692 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
693 dma_addr_t device_addr, size_t size, int flags);
694 void dma_release_declared_memory(struct device *dev);
695 void *dma_mark_declared_memory_occupied(struct device *dev,
696 dma_addr_t device_addr, size_t size);
699 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
700 dma_addr_t device_addr, size_t size, int flags)
706 dma_release_declared_memory(struct device *dev)
711 dma_mark_declared_memory_occupied(struct device *dev,
712 dma_addr_t device_addr, size_t size)
714 return ERR_PTR(-EBUSY);
716 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
721 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
722 dma_addr_t *dma_handle, gfp_t gfp);
723 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
724 dma_addr_t dma_handle);
725 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
726 dma_addr_t *dma_handle, gfp_t gfp);
727 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
728 dma_addr_t dma_handle);
729 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
730 extern int dmam_declare_coherent_memory(struct device *dev,
731 phys_addr_t phys_addr,
732 dma_addr_t device_addr, size_t size,
734 extern void dmam_release_declared_memory(struct device *dev);
735 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
736 static inline int dmam_declare_coherent_memory(struct device *dev,
737 phys_addr_t phys_addr, dma_addr_t device_addr,
738 size_t size, gfp_t gfp)
743 static inline void dmam_release_declared_memory(struct device *dev)
746 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
748 static inline void *dma_alloc_wc(struct device *dev, size_t size,
749 dma_addr_t *dma_addr, gfp_t gfp)
751 return dma_alloc_attrs(dev, size, dma_addr, gfp,
752 DMA_ATTR_WRITE_COMBINE);
754 #ifndef dma_alloc_writecombine
755 #define dma_alloc_writecombine dma_alloc_wc
758 static inline void dma_free_wc(struct device *dev, size_t size,
759 void *cpu_addr, dma_addr_t dma_addr)
761 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
762 DMA_ATTR_WRITE_COMBINE);
764 #ifndef dma_free_writecombine
765 #define dma_free_writecombine dma_free_wc
768 static inline int dma_mmap_wc(struct device *dev,
769 struct vm_area_struct *vma,
770 void *cpu_addr, dma_addr_t dma_addr,
773 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
774 DMA_ATTR_WRITE_COMBINE);
776 #ifndef dma_mmap_writecombine
777 #define dma_mmap_writecombine dma_mmap_wc
780 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
781 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
782 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
783 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
784 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
785 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
786 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
788 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
789 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
790 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
791 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
792 #define dma_unmap_len(PTR, LEN_NAME) (0)
793 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)