1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 #include <asm/machvec.h>
9 #include <linux/scatterlist.h>
10 #include <asm/swiotlb.h>
11 #include <linux/dma-debug.h>
13 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
15 #define DMA_ERROR_CODE 0
17 extern struct dma_map_ops *dma_ops;
18 extern struct ia64_machine_vector ia64_mv;
19 extern void set_iommu_machvec(void);
21 extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
22 enum dma_data_direction);
23 extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
24 enum dma_data_direction);
26 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
27 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
29 #define get_dma_ops(dev) platform_dma_get_ops(dev)
31 #include <asm-generic/dma-mapping-common.h>
33 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
35 struct dma_map_ops *ops = platform_dma_get_ops(dev);
36 debug_dma_mapping_error(dev, daddr);
37 return ops->mapping_error(dev, daddr);
40 static inline int dma_supported(struct device *dev, u64 mask)
42 struct dma_map_ops *ops = platform_dma_get_ops(dev);
43 return ops->dma_supported(dev, mask);
47 dma_set_mask (struct device *dev, u64 mask)
49 if (!dev->dma_mask || !dma_supported(dev, mask))
51 *dev->dma_mask = mask;
55 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
60 return addr + size - 1 <= *dev->dma_mask;
63 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
68 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
74 dma_cache_sync (struct device *dev, void *vaddr, size_t size,
75 enum dma_data_direction dir)
78 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
79 * ensure that dma_cache_sync() enforces order, hence the mb().
84 #endif /* _ASM_IA64_DMA_MAPPING_H */