]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - include/rdma/ib_verbs.h
libnvdimm: add an atomic vs process context flag to rw_bytes
[karo-tx-linux.git] / include / rdma / ib_verbs.h
index 89f5bd4e1d5201c847ff77823b6b4159a741ef2f..99e4423eb2b80b142024bed892ddc4a84ac5e576 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/atomic.h>
 #include <linux/mmu_notifier.h>
 #include <linux/uaccess.h>
+#include <linux/cgroup_rdma.h>
 
 extern struct workqueue_struct *ib_wq;
 extern struct workqueue_struct *ib_comp_wq;
@@ -1356,6 +1357,12 @@ struct ib_fmr_attr {
 
 struct ib_umem;
 
+struct ib_rdmacg_object {
+#ifdef CONFIG_CGROUP_RDMA
+       struct rdma_cgroup      *cg;            /* owner rdma cgroup */
+#endif
+};
+
 struct ib_ucontext {
        struct ib_device       *device;
        struct list_head        pd_list;
@@ -1388,6 +1395,8 @@ struct ib_ucontext {
        struct list_head        no_private_counters;
        int                     odp_mrs_count;
 #endif
+
+       struct ib_rdmacg_object cg_obj;
 };
 
 struct ib_uobject {
@@ -1395,6 +1404,7 @@ struct ib_uobject {
        struct ib_ucontext     *context;        /* associated user context */
        void                   *object;         /* containing object */
        struct list_head        list;           /* link to context's list */
+       struct ib_rdmacg_object cg_obj;         /* rdmacg object */
        int                     id;             /* index into kernel idr */
        struct kref             ref;
        struct rw_semaphore     mutex;          /* protects .live */
@@ -1843,53 +1853,6 @@ struct ib_cache {
        struct ib_port_cache   *ports;
 };
 
-struct ib_dma_mapping_ops {
-       int             (*mapping_error)(struct ib_device *dev,
-                                        u64 dma_addr);
-       u64             (*map_single)(struct ib_device *dev,
-                                     void *ptr, size_t size,
-                                     enum dma_data_direction direction);
-       void            (*unmap_single)(struct ib_device *dev,
-                                       u64 addr, size_t size,
-                                       enum dma_data_direction direction);
-       u64             (*map_page)(struct ib_device *dev,
-                                   struct page *page, unsigned long offset,
-                                   size_t size,
-                                   enum dma_data_direction direction);
-       void            (*unmap_page)(struct ib_device *dev,
-                                     u64 addr, size_t size,
-                                     enum dma_data_direction direction);
-       int             (*map_sg)(struct ib_device *dev,
-                                 struct scatterlist *sg, int nents,
-                                 enum dma_data_direction direction);
-       void            (*unmap_sg)(struct ib_device *dev,
-                                   struct scatterlist *sg, int nents,
-                                   enum dma_data_direction direction);
-       int             (*map_sg_attrs)(struct ib_device *dev,
-                                       struct scatterlist *sg, int nents,
-                                       enum dma_data_direction direction,
-                                       unsigned long attrs);
-       void            (*unmap_sg_attrs)(struct ib_device *dev,
-                                         struct scatterlist *sg, int nents,
-                                         enum dma_data_direction direction,
-                                         unsigned long attrs);
-       void            (*sync_single_for_cpu)(struct ib_device *dev,
-                                              u64 dma_handle,
-                                              size_t size,
-                                              enum dma_data_direction dir);
-       void            (*sync_single_for_device)(struct ib_device *dev,
-                                                 u64 dma_handle,
-                                                 size_t size,
-                                                 enum dma_data_direction dir);
-       void            *(*alloc_coherent)(struct ib_device *dev,
-                                          size_t size,
-                                          u64 *dma_handle,
-                                          gfp_t flag);
-       void            (*free_coherent)(struct ib_device *dev,
-                                        size_t size, void *cpu_addr,
-                                        u64 dma_handle);
-};
-
 struct iw_cm_verbs;
 
 struct ib_port_immutable {
@@ -1900,6 +1863,7 @@ struct ib_port_immutable {
 };
 
 struct ib_device {
+       /* Do not access @dma_device directly from ULP nor from HW drivers. */
        struct device                *dma_device;
 
        char                          name[IB_DEVICE_NAME_MAX];
@@ -2151,7 +2115,6 @@ struct ib_device {
                                                           struct ib_rwq_ind_table_init_attr *init_attr,
                                                           struct ib_udata *udata);
        int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
-       struct ib_dma_mapping_ops   *dma_ops;
 
        struct module               *owner;
        struct device                dev;
@@ -2178,6 +2141,10 @@ struct ib_device {
        struct attribute_group       *hw_stats_ag;
        struct rdma_hw_stats         *hw_stats;
 
+#ifdef CONFIG_CGROUP_RDMA
+       struct rdmacg_device         cg_device;
+#endif
+
        /**
         * The following mandatory functions are used only at device
         * registration.  Keep functions such as these at the end of this
@@ -3043,8 +3010,6 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
  */
 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
 {
-       if (dev->dma_ops)
-               return dev->dma_ops->mapping_error(dev, dma_addr);
        return dma_mapping_error(dev->dma_device, dma_addr);
 }
 
@@ -3059,8 +3024,6 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
                                    void *cpu_addr, size_t size,
                                    enum dma_data_direction direction)
 {
-       if (dev->dma_ops)
-               return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
 }
 
@@ -3075,28 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
                                       u64 addr, size_t size,
                                       enum dma_data_direction direction)
 {
-       if (dev->dma_ops)
-               dev->dma_ops->unmap_single(dev, addr, size, direction);
-       else
-               dma_unmap_single(dev->dma_device, addr, size, direction);
-}
-
-static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
-                                         void *cpu_addr, size_t size,
-                                         enum dma_data_direction direction,
-                                         unsigned long dma_attrs)
-{
-       return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
-                                   direction, dma_attrs);
-}
-
-static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
-                                            u64 addr, size_t size,
-                                            enum dma_data_direction direction,
-                                            unsigned long dma_attrs)
-{
-       return dma_unmap_single_attrs(dev->dma_device, addr, size,
-                                     direction, dma_attrs);
+       dma_unmap_single(dev->dma_device, addr, size, direction);
 }
 
 /**
@@ -3113,8 +3055,6 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
                                  size_t size,
                                         enum dma_data_direction direction)
 {
-       if (dev->dma_ops)
-               return dev->dma_ops->map_page(dev, page, offset, size, direction);
        return dma_map_page(dev->dma_device, page, offset, size, direction);
 }
 
@@ -3129,10 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
                                     u64 addr, size_t size,
                                     enum dma_data_direction direction)
 {
-       if (dev->dma_ops)
-               dev->dma_ops->unmap_page(dev, addr, size, direction);
-       else
-               dma_unmap_page(dev->dma_device, addr, size, direction);
+       dma_unmap_page(dev->dma_device, addr, size, direction);
 }
 
 /**
@@ -3146,8 +3083,6 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
                                struct scatterlist *sg, int nents,
                                enum dma_data_direction direction)
 {
-       if (dev->dma_ops)
-               return dev->dma_ops->map_sg(dev, sg, nents, direction);
        return dma_map_sg(dev->dma_device, sg, nents, direction);
 }
 
@@ -3162,10 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
                                   struct scatterlist *sg, int nents,
                                   enum dma_data_direction direction)
 {
-       if (dev->dma_ops)
-               dev->dma_ops->unmap_sg(dev, sg, nents, direction);
-       else
-               dma_unmap_sg(dev->dma_device, sg, nents, direction);
+       dma_unmap_sg(dev->dma_device, sg, nents, direction);
 }
 
 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
@@ -3173,12 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
                                      enum dma_data_direction direction,
                                      unsigned long dma_attrs)
 {
-       if (dev->dma_ops)
-               return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
-                                                 dma_attrs);
-       else
-               return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
-                                       dma_attrs);
+       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+                               dma_attrs);
 }
 
 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3186,12 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
                                         enum dma_data_direction direction,
                                         unsigned long dma_attrs)
 {
-       if (dev->dma_ops)
-               return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
-                                                 dma_attrs);
-       else
-               dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
-                                  dma_attrs);
+       dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
 }
 /**
  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@ -3233,10 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
                                              size_t size,
                                              enum dma_data_direction dir)
 {
-       if (dev->dma_ops)
-               dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
-       else
-               dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+       dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
 }
 
 /**
@@ -3251,10 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
                                                 size_t size,
                                                 enum dma_data_direction dir)
 {
-       if (dev->dma_ops)
-               dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
-       else
-               dma_sync_single_for_device(dev->dma_device, addr, size, dir);
+       dma_sync_single_for_device(dev->dma_device, addr, size, dir);
 }
 
 /**
@@ -3266,19 +3183,10 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
  */
 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
                                           size_t size,
-                                          u64 *dma_handle,
+                                          dma_addr_t *dma_handle,
                                           gfp_t flag)
 {
-       if (dev->dma_ops)
-               return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
-       else {
-               dma_addr_t handle;
-               void *ret;
-
-               ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
-               *dma_handle = handle;
-               return ret;
-       }
+       return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
 }
 
 /**
@@ -3290,12 +3198,9 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
  */
 static inline void ib_dma_free_coherent(struct ib_device *dev,
                                        size_t size, void *cpu_addr,
-                                       u64 dma_handle)
+                                       dma_addr_t dma_handle)
 {
-       if (dev->dma_ops)
-               dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
-       else
-               dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
+       dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
 }
 
 /**