1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
63 #define DMA_ATTR_NO_WARN (1UL << 8)
66 * A dma_addr_t can hold any valid DMA or bus address for the platform.
67 * It can be given to a device to use as a DMA source or target. A CPU cannot
68 * reference a dma_addr_t directly because there may be translation between
69 * its physical address space and the bus address space.
72 void* (*alloc)(struct device *dev, size_t size,
73 dma_addr_t *dma_handle, gfp_t gfp,
75 void (*free)(struct device *dev, size_t size,
76 void *vaddr, dma_addr_t dma_handle,
78 int (*mmap)(struct device *, struct vm_area_struct *,
79 void *, dma_addr_t, size_t,
82 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
83 dma_addr_t, size_t, unsigned long attrs);
85 dma_addr_t (*map_page)(struct device *dev, struct page *page,
86 unsigned long offset, size_t size,
87 enum dma_data_direction dir,
89 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
90 size_t size, enum dma_data_direction dir,
93 * map_sg returns 0 on error and a value > 0 on success.
94 * It should never return a value < 0.
96 int (*map_sg)(struct device *dev, struct scatterlist *sg,
97 int nents, enum dma_data_direction dir,
99 void (*unmap_sg)(struct device *dev,
100 struct scatterlist *sg, int nents,
101 enum dma_data_direction dir,
102 unsigned long attrs);
103 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
104 size_t size, enum dma_data_direction dir,
105 unsigned long attrs);
106 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
107 size_t size, enum dma_data_direction dir,
108 unsigned long attrs);
109 void (*sync_single_for_cpu)(struct device *dev,
110 dma_addr_t dma_handle, size_t size,
111 enum dma_data_direction dir);
112 void (*sync_single_for_device)(struct device *dev,
113 dma_addr_t dma_handle, size_t size,
114 enum dma_data_direction dir);
115 void (*sync_sg_for_cpu)(struct device *dev,
116 struct scatterlist *sg, int nents,
117 enum dma_data_direction dir);
118 void (*sync_sg_for_device)(struct device *dev,
119 struct scatterlist *sg, int nents,
120 enum dma_data_direction dir);
121 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
122 int (*dma_supported)(struct device *dev, u64 mask);
123 int (*set_dma_mask)(struct device *dev, u64 mask);
124 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
125 u64 (*get_required_mask)(struct device *dev);
130 extern struct dma_map_ops dma_noop_ops;
132 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
134 #define DMA_MASK_NONE 0x0ULL
136 static inline int valid_dma_direction(int dma_direction)
138 return ((dma_direction == DMA_BIDIRECTIONAL) ||
139 (dma_direction == DMA_TO_DEVICE) ||
140 (dma_direction == DMA_FROM_DEVICE));
143 static inline int is_device_dma_capable(struct device *dev)
145 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
148 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
150 * These three functions are only for dma allocator.
151 * Don't use them in device drivers.
153 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
154 dma_addr_t *dma_handle, void **ret);
155 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
157 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
158 void *cpu_addr, size_t size, int *ret);
160 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
161 #define dma_release_from_coherent(dev, order, vaddr) (0)
162 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
163 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
165 #ifdef CONFIG_HAS_DMA
166 #include <asm/dma-mapping.h>
169 * Define the dma api to allow compilation but not linking of
170 * dma dependent code. Code that depends on the dma-mapping
171 * API needs to set 'depends on HAS_DMA' in its Kconfig
173 extern struct dma_map_ops bad_dma_ops;
174 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
180 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
182 enum dma_data_direction dir,
185 struct dma_map_ops *ops = get_dma_ops(dev);
188 kmemcheck_mark_initialized(ptr, size);
189 BUG_ON(!valid_dma_direction(dir));
190 addr = ops->map_page(dev, virt_to_page(ptr),
191 offset_in_page(ptr), size,
193 debug_dma_map_page(dev, virt_to_page(ptr),
194 offset_in_page(ptr), size,
199 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
201 enum dma_data_direction dir,
204 struct dma_map_ops *ops = get_dma_ops(dev);
206 BUG_ON(!valid_dma_direction(dir));
208 ops->unmap_page(dev, addr, size, dir, attrs);
209 debug_dma_unmap_page(dev, addr, size, dir, true);
213 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
214 * It should never return a value < 0.
216 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
217 int nents, enum dma_data_direction dir,
220 struct dma_map_ops *ops = get_dma_ops(dev);
222 struct scatterlist *s;
224 for_each_sg(sg, s, nents, i)
225 kmemcheck_mark_initialized(sg_virt(s), s->length);
226 BUG_ON(!valid_dma_direction(dir));
227 ents = ops->map_sg(dev, sg, nents, dir, attrs);
229 debug_dma_map_sg(dev, sg, nents, ents, dir);
234 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
235 int nents, enum dma_data_direction dir,
238 struct dma_map_ops *ops = get_dma_ops(dev);
240 BUG_ON(!valid_dma_direction(dir));
241 debug_dma_unmap_sg(dev, sg, nents, dir);
243 ops->unmap_sg(dev, sg, nents, dir, attrs);
246 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
247 size_t offset, size_t size,
248 enum dma_data_direction dir)
250 struct dma_map_ops *ops = get_dma_ops(dev);
253 kmemcheck_mark_initialized(page_address(page) + offset, size);
254 BUG_ON(!valid_dma_direction(dir));
255 addr = ops->map_page(dev, page, offset, size, dir, 0);
256 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
261 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
262 size_t size, enum dma_data_direction dir)
264 struct dma_map_ops *ops = get_dma_ops(dev);
266 BUG_ON(!valid_dma_direction(dir));
268 ops->unmap_page(dev, addr, size, dir, 0);
269 debug_dma_unmap_page(dev, addr, size, dir, false);
272 static inline dma_addr_t dma_map_resource(struct device *dev,
273 phys_addr_t phys_addr,
275 enum dma_data_direction dir,
278 struct dma_map_ops *ops = get_dma_ops(dev);
281 BUG_ON(!valid_dma_direction(dir));
283 /* Don't allow RAM to be mapped */
284 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
287 if (ops->map_resource)
288 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
290 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
295 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
296 size_t size, enum dma_data_direction dir,
299 struct dma_map_ops *ops = get_dma_ops(dev);
301 BUG_ON(!valid_dma_direction(dir));
302 if (ops->unmap_resource)
303 ops->unmap_resource(dev, addr, size, dir, attrs);
304 debug_dma_unmap_resource(dev, addr, size, dir);
307 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
309 enum dma_data_direction dir)
311 struct dma_map_ops *ops = get_dma_ops(dev);
313 BUG_ON(!valid_dma_direction(dir));
314 if (ops->sync_single_for_cpu)
315 ops->sync_single_for_cpu(dev, addr, size, dir);
316 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
319 static inline void dma_sync_single_for_device(struct device *dev,
320 dma_addr_t addr, size_t size,
321 enum dma_data_direction dir)
323 struct dma_map_ops *ops = get_dma_ops(dev);
325 BUG_ON(!valid_dma_direction(dir));
326 if (ops->sync_single_for_device)
327 ops->sync_single_for_device(dev, addr, size, dir);
328 debug_dma_sync_single_for_device(dev, addr, size, dir);
331 static inline void dma_sync_single_range_for_cpu(struct device *dev,
333 unsigned long offset,
335 enum dma_data_direction dir)
337 const struct dma_map_ops *ops = get_dma_ops(dev);
339 BUG_ON(!valid_dma_direction(dir));
340 if (ops->sync_single_for_cpu)
341 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
342 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
345 static inline void dma_sync_single_range_for_device(struct device *dev,
347 unsigned long offset,
349 enum dma_data_direction dir)
351 const struct dma_map_ops *ops = get_dma_ops(dev);
353 BUG_ON(!valid_dma_direction(dir));
354 if (ops->sync_single_for_device)
355 ops->sync_single_for_device(dev, addr + offset, size, dir);
356 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
360 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
361 int nelems, enum dma_data_direction dir)
363 struct dma_map_ops *ops = get_dma_ops(dev);
365 BUG_ON(!valid_dma_direction(dir));
366 if (ops->sync_sg_for_cpu)
367 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
368 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
372 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
373 int nelems, enum dma_data_direction dir)
375 struct dma_map_ops *ops = get_dma_ops(dev);
377 BUG_ON(!valid_dma_direction(dir));
378 if (ops->sync_sg_for_device)
379 ops->sync_sg_for_device(dev, sg, nelems, dir);
380 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
384 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
385 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
386 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
387 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
389 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
390 void *cpu_addr, dma_addr_t dma_addr, size_t size);
392 void *dma_common_contiguous_remap(struct page *page, size_t size,
393 unsigned long vm_flags,
394 pgprot_t prot, const void *caller);
396 void *dma_common_pages_remap(struct page **pages, size_t size,
397 unsigned long vm_flags, pgprot_t prot,
399 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
402 * dma_mmap_attrs - map a coherent DMA allocation into user space
403 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
404 * @vma: vm_area_struct describing requested user mapping
405 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
406 * @handle: device-view address returned from dma_alloc_attrs
407 * @size: size of memory originally requested in dma_alloc_attrs
408 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
410 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
411 * into user space. The coherent DMA buffer must not be freed by the
412 * driver until the user space mapping has been released.
415 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
416 dma_addr_t dma_addr, size_t size, unsigned long attrs)
418 struct dma_map_ops *ops = get_dma_ops(dev);
421 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
422 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
425 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
428 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
429 void *cpu_addr, dma_addr_t dma_addr, size_t size);
432 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
433 dma_addr_t dma_addr, size_t size,
436 struct dma_map_ops *ops = get_dma_ops(dev);
438 if (ops->get_sgtable)
439 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
441 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
444 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
446 #ifndef arch_dma_alloc_attrs
447 #define arch_dma_alloc_attrs(dev, flag) (true)
450 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
451 dma_addr_t *dma_handle, gfp_t flag,
454 struct dma_map_ops *ops = get_dma_ops(dev);
459 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
462 if (!arch_dma_alloc_attrs(&dev, &flag))
467 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
468 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
472 static inline void dma_free_attrs(struct device *dev, size_t size,
473 void *cpu_addr, dma_addr_t dma_handle,
476 struct dma_map_ops *ops = get_dma_ops(dev);
479 WARN_ON(irqs_disabled());
481 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
484 if (!ops->free || !cpu_addr)
487 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
488 ops->free(dev, size, cpu_addr, dma_handle, attrs);
491 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
492 dma_addr_t *dma_handle, gfp_t flag)
494 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
497 static inline void dma_free_coherent(struct device *dev, size_t size,
498 void *cpu_addr, dma_addr_t dma_handle)
500 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
503 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
504 dma_addr_t *dma_handle, gfp_t gfp)
506 return dma_alloc_attrs(dev, size, dma_handle, gfp,
507 DMA_ATTR_NON_CONSISTENT);
510 static inline void dma_free_noncoherent(struct device *dev, size_t size,
511 void *cpu_addr, dma_addr_t dma_handle)
513 dma_free_attrs(dev, size, cpu_addr, dma_handle,
514 DMA_ATTR_NON_CONSISTENT);
517 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
519 debug_dma_mapping_error(dev, dma_addr);
521 if (get_dma_ops(dev)->mapping_error)
522 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
524 #ifdef DMA_ERROR_CODE
525 return dma_addr == DMA_ERROR_CODE;
531 #ifndef HAVE_ARCH_DMA_SUPPORTED
532 static inline int dma_supported(struct device *dev, u64 mask)
534 struct dma_map_ops *ops = get_dma_ops(dev);
538 if (!ops->dma_supported)
540 return ops->dma_supported(dev, mask);
544 #ifndef HAVE_ARCH_DMA_SET_MASK
545 static inline int dma_set_mask(struct device *dev, u64 mask)
547 struct dma_map_ops *ops = get_dma_ops(dev);
549 if (ops->set_dma_mask)
550 return ops->set_dma_mask(dev, mask);
552 if (!dev->dma_mask || !dma_supported(dev, mask))
554 *dev->dma_mask = mask;
559 static inline u64 dma_get_mask(struct device *dev)
561 if (dev && dev->dma_mask && *dev->dma_mask)
562 return *dev->dma_mask;
563 return DMA_BIT_MASK(32);
566 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
567 int dma_set_coherent_mask(struct device *dev, u64 mask);
569 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
571 if (!dma_supported(dev, mask))
573 dev->coherent_dma_mask = mask;
579 * Set both the DMA mask and the coherent DMA mask to the same thing.
580 * Note that we don't check the return value from dma_set_coherent_mask()
581 * as the DMA API guarantees that the coherent DMA mask can be set to
582 * the same or smaller than the streaming DMA mask.
584 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
586 int rc = dma_set_mask(dev, mask);
588 dma_set_coherent_mask(dev, mask);
593 * Similar to the above, except it deals with the case where the device
594 * does not have dev->dma_mask appropriately setup.
596 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
598 dev->dma_mask = &dev->coherent_dma_mask;
599 return dma_set_mask_and_coherent(dev, mask);
602 extern u64 dma_get_required_mask(struct device *dev);
604 #ifndef arch_setup_dma_ops
605 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
606 u64 size, const struct iommu_ops *iommu,
610 #ifndef arch_teardown_dma_ops
611 static inline void arch_teardown_dma_ops(struct device *dev) { }
614 static inline unsigned int dma_get_max_seg_size(struct device *dev)
616 if (dev->dma_parms && dev->dma_parms->max_segment_size)
617 return dev->dma_parms->max_segment_size;
621 static inline unsigned int dma_set_max_seg_size(struct device *dev,
624 if (dev->dma_parms) {
625 dev->dma_parms->max_segment_size = size;
631 static inline unsigned long dma_get_seg_boundary(struct device *dev)
633 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
634 return dev->dma_parms->segment_boundary_mask;
635 return DMA_BIT_MASK(32);
638 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
640 if (dev->dma_parms) {
641 dev->dma_parms->segment_boundary_mask = mask;
648 static inline unsigned long dma_max_pfn(struct device *dev)
650 return *dev->dma_mask >> PAGE_SHIFT;
654 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
655 dma_addr_t *dma_handle, gfp_t flag)
657 void *ret = dma_alloc_coherent(dev, size, dma_handle,
662 #ifdef CONFIG_HAS_DMA
663 static inline int dma_get_cache_alignment(void)
665 #ifdef ARCH_DMA_MINALIGN
666 return ARCH_DMA_MINALIGN;
672 /* flags for the coherent memory api */
673 #define DMA_MEMORY_MAP 0x01
674 #define DMA_MEMORY_IO 0x02
675 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
676 #define DMA_MEMORY_EXCLUSIVE 0x08
678 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
679 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
680 dma_addr_t device_addr, size_t size, int flags);
681 void dma_release_declared_memory(struct device *dev);
682 void *dma_mark_declared_memory_occupied(struct device *dev,
683 dma_addr_t device_addr, size_t size);
686 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
687 dma_addr_t device_addr, size_t size, int flags)
693 dma_release_declared_memory(struct device *dev)
698 dma_mark_declared_memory_occupied(struct device *dev,
699 dma_addr_t device_addr, size_t size)
701 return ERR_PTR(-EBUSY);
703 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
708 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
709 dma_addr_t *dma_handle, gfp_t gfp);
710 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
711 dma_addr_t dma_handle);
712 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
713 dma_addr_t *dma_handle, gfp_t gfp);
714 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
715 dma_addr_t dma_handle);
716 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
717 extern int dmam_declare_coherent_memory(struct device *dev,
718 phys_addr_t phys_addr,
719 dma_addr_t device_addr, size_t size,
721 extern void dmam_release_declared_memory(struct device *dev);
722 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
723 static inline int dmam_declare_coherent_memory(struct device *dev,
724 phys_addr_t phys_addr, dma_addr_t device_addr,
725 size_t size, gfp_t gfp)
730 static inline void dmam_release_declared_memory(struct device *dev)
733 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
735 static inline void *dma_alloc_wc(struct device *dev, size_t size,
736 dma_addr_t *dma_addr, gfp_t gfp)
738 return dma_alloc_attrs(dev, size, dma_addr, gfp,
739 DMA_ATTR_WRITE_COMBINE);
741 #ifndef dma_alloc_writecombine
742 #define dma_alloc_writecombine dma_alloc_wc
745 static inline void dma_free_wc(struct device *dev, size_t size,
746 void *cpu_addr, dma_addr_t dma_addr)
748 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
749 DMA_ATTR_WRITE_COMBINE);
751 #ifndef dma_free_writecombine
752 #define dma_free_writecombine dma_free_wc
755 static inline int dma_mmap_wc(struct device *dev,
756 struct vm_area_struct *vma,
757 void *cpu_addr, dma_addr_t dma_addr,
760 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
761 DMA_ATTR_WRITE_COMBINE);
763 #ifndef dma_mmap_writecombine
764 #define dma_mmap_writecombine dma_mmap_wc
767 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
768 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
769 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
770 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
771 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
772 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
773 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
775 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
776 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
777 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
778 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
779 #define dma_unmap_len(PTR, LEN_NAME) (0)
780 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)