]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - lib/iommu-common.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[karo-tx-linux.git] / lib / iommu-common.c
index b99f1d744a8dd67e6b7d7961b6e3cfbb8a07c381..df30632f0bef9ec1c36a48d83a6eb87cd18ee405 100644 (file)
 #include <linux/dma-mapping.h>
 #include <linux/hash.h>
 
-unsigned long iommu_large_alloc = 15;
+#ifndef        DMA_ERROR_CODE
+#define        DMA_ERROR_CODE (~(dma_addr_t)0x0)
+#endif
 
-static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
+static unsigned long iommu_large_alloc = 15;
+
+static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
 
 static inline bool need_flush(struct iommu_map_table *iommu)
 {
@@ -40,7 +44,7 @@ static void setup_iommu_pool_hash(void)
                return;
        do_once = true;
        for_each_possible_cpu(i)
-               per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
+               per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
 }
 
 /*
@@ -49,12 +53,12 @@ static void setup_iommu_pool_hash(void)
  * the top 1/4 of the table will be set aside for pool allocations
  * of more than iommu_large_alloc pages.
  */
-extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
-                               unsigned long num_entries,
-                               u32 table_shift,
-                               void (*lazy_flush)(struct iommu_map_table *),
-                               bool large_pool, u32 npools,
-                               bool skip_span_boundary_check)
+void iommu_tbl_pool_init(struct iommu_map_table *iommu,
+                        unsigned long num_entries,
+                        u32 table_shift,
+                        void (*lazy_flush)(struct iommu_map_table *),
+                        bool large_pool, u32 npools,
+                        bool skip_span_boundary_check)
 {
        unsigned int start, i;
        struct iommu_pool *p = &(iommu->large_pool);
@@ -102,7 +106,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
                                unsigned long mask,
                                unsigned int align_order)
 {
-       unsigned int pool_hash = __this_cpu_read(iommu_pool_hash);
+       unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
        unsigned long n, end, start, limit, boundary_size;
        struct iommu_pool *pool;
        int pass = 0;
@@ -171,7 +175,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
                boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
                                      1 << iommu->table_shift);
        else
-               boundary_size = ALIGN(1UL << 32, 1 << iommu->table_shift);
+               boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
 
        boundary_size = boundary_size >> iommu->table_shift;
        /*