select DMA_NONCOHERENT
bool
+config DMA_PERDEV_COHERENT
+ bool
+ select DMA_MAYBE_COHERENT
+
config DMA_COHERENT
bool
struct dev_archdata {
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
+
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+ /* Non-zero if DMA is coherent with CPU caches */
+ bool dma_coherent;
+#endif
};
struct pdev_archdata {
IO_COHERENCE_DISABLED,
};
-#ifdef CONFIG_DMA_MAYBE_COHERENT
+#if defined(CONFIG_DMA_PERDEV_COHERENT)
+/* Don't provide (hw_)coherentio to avoid misuse */
+#elif defined(CONFIG_DMA_MAYBE_COHERENT)
extern enum coherent_io_user_state coherentio;
extern int hw_coherentio;
#else
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
+#define arch_setup_dma_ops arch_setup_dma_ops
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+ u64 size, const struct iommu_ops *iommu,
+ bool coherent)
+{
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+ dev->archdata.dma_coherent = coherent;
+#endif
+}
+
#endif /* _ASM_DMA_MAPPING_H */
static inline int plat_device_is_coherent(struct device *dev)
{
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+ return dev->archdata.dma_coherent;
+#else
switch (coherentio) {
default:
case IO_COHERENCE_DEFAULT:
case IO_COHERENCE_DISABLED:
return 0;
}
+#endif
}
#ifndef plat_post_dma_flush
__local_flush_icache_user_range = local_r4k_flush_icache_user_range;
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+# if defined(CONFIG_DMA_PERDEV_COHERENT)
+ if (0) {
+# else
if ((coherentio == IO_COHERENCE_ENABLED) ||
((coherentio == IO_COHERENCE_DEFAULT) && hw_coherentio)) {
+# endif
_dma_cache_wback_inv = (void *)cache_noop;
_dma_cache_wback = (void *)cache_noop;
_dma_cache_inv = (void *)cache_noop;
#include <dma-coherence.h>
-#ifdef CONFIG_DMA_MAYBE_COHERENT
+#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
/* User defined DMA coherency from command line. */
enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
EXPORT_SYMBOL_GPL(coherentio);