*
*/
-int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region)
+int hfi1_alloc_lkey(struct rvt_mregion *mr, int dma_region)
{
unsigned long flags;
u32 r;
u32 n;
int ret = 0;
struct hfi1_ibdev *dev = to_idev(mr->pd->device);
- struct hfi1_lkey_table *rkt = &dev->lk_table;
+ struct rvt_lkey_table *rkt = &dev->lk_table;
hfi1_get_mr(mr);
spin_lock_irqsave(&rkt->lock, flags);
/* special case for dma_mr lkey == 0 */
if (dma_region) {
- struct hfi1_mregion *tmr;
+ struct rvt_mregion *tmr;
tmr = rcu_access_pointer(dev->dma_mr);
if (!tmr) {
* hfi1_free_lkey - free an lkey
* @mr: mr to free from tables
*/
-void hfi1_free_lkey(struct hfi1_mregion *mr)
+void hfi1_free_lkey(struct rvt_mregion *mr)
{
unsigned long flags;
u32 lkey = mr->lkey;
u32 r;
struct hfi1_ibdev *dev = to_idev(mr->pd->device);
- struct hfi1_lkey_table *rkt = &dev->lk_table;
+ struct rvt_lkey_table *rkt = &dev->lk_table;
int freed = 0;
spin_lock_irqsave(&rkt->lock, flags);
* Check the IB SGE for validity and initialize our internal version
* of it.
*/
-int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct rvt_pd *pd,
+int hfi1_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
struct hfi1_sge *isge, struct ib_sge *sge, int acc)
{
- struct hfi1_mregion *mr;
+ struct rvt_mregion *mr;
unsigned n, m;
size_t off;
entries_spanned_by_off = off >> mr->page_shift;
off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off / HFI1_SEGSZ;
- n = entries_spanned_by_off % HFI1_SEGSZ;
+ m = entries_spanned_by_off / RVT_SEGSZ;
+ n = entries_spanned_by_off % RVT_SEGSZ;
} else {
m = 0;
n = 0;
while (off >= mr->map[m]->segs[n].length) {
off -= mr->map[m]->segs[n].length;
n++;
- if (n >= HFI1_SEGSZ) {
+ if (n >= RVT_SEGSZ) {
m++;
n = 0;
}
int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc)
{
- struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct hfi1_mregion *mr;
+ struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct rvt_mregion *mr;
unsigned n, m;
size_t off;
entries_spanned_by_off = off >> mr->page_shift;
off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off / HFI1_SEGSZ;
- n = entries_spanned_by_off % HFI1_SEGSZ;
+ m = entries_spanned_by_off / RVT_SEGSZ;
+ n = entries_spanned_by_off % RVT_SEGSZ;
} else {
m = 0;
n = 0;
while (off >= mr->map[m]->segs[n].length) {
off -= mr->map[m]->segs[n].length;
n++;
- if (n >= HFI1_SEGSZ) {
+ if (n >= RVT_SEGSZ) {
m++;
n = 0;
}
/* Fast memory region */
struct hfi1_fmr {
struct ib_fmr ibfmr;
- struct hfi1_mregion mr; /* must be last */
+ struct rvt_mregion mr; /* must be last */
};
static inline struct hfi1_fmr *to_ifmr(struct ib_fmr *ibfmr)
return container_of(ibfmr, struct hfi1_fmr, ibfmr);
}
-static int init_mregion(struct hfi1_mregion *mr, struct ib_pd *pd,
+static int init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
int count)
{
int m, i = 0;
int rval = 0;
- m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
+ m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
for (; i < m; i++) {
mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
if (!mr->map[i])
goto out;
}
-static void deinit_mregion(struct hfi1_mregion *mr)
+static void deinit_mregion(struct rvt_mregion *mr)
{
int i = mr->mapsz;
int m;
/* Allocate struct plus pointers to first level page tables. */
- m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
+ m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
if (!mr)
goto bail;
mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].length = umem->page_size;
n++;
- if (n == HFI1_SEGSZ) {
+ if (n == RVT_SEGSZ) {
m++;
n = 0;
}
int rval = -ENOMEM;
/* Allocate struct plus pointers to first level page tables. */
- m = (fmr_attr->max_pages + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
+ m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
if (!fmr)
goto bail;
int list_len, u64 iova)
{
struct hfi1_fmr *fmr = to_ifmr(ibfmr);
- struct hfi1_lkey_table *rkt;
+ struct rvt_lkey_table *rkt;
unsigned long flags;
int m, n, i;
u32 ps;
for (i = 0; i < list_len; i++) {
fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
fmr->mr.map[m]->segs[n].length = ps;
- if (++n == HFI1_SEGSZ) {
+ if (++n == RVT_SEGSZ) {
m++;
n = 0;
}
int hfi1_unmap_fmr(struct list_head *fmr_list)
{
struct hfi1_fmr *fmr;
- struct hfi1_lkey_table *rkt;
+ struct rvt_lkey_table *rkt;
unsigned long flags;
list_for_each_entry(fmr, fmr_list, ibfmr.list) {
{
int i, j, ret;
struct ib_wc wc;
- struct hfi1_lkey_table *rkt;
+ struct rvt_lkey_table *rkt;
struct rvt_pd *pd;
struct hfi1_sge_state *ss;
if (--sqp->s_sge.num_sge)
*sge = *sqp->s_sge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
struct sdma_txreq txreq;
struct hfi1_qp *qp;
struct hfi1_swqe *wqe;
- struct hfi1_mregion *mr;
+ struct rvt_mregion *mr;
struct hfi1_sge_state *ss;
struct sdma_engine *sde;
u16 hdr_dwords;
if (--ssge.num_sge)
*sge = *ssge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
int i;
int j;
int acc;
- struct hfi1_lkey_table *rkt;
+ struct rvt_lkey_table *rkt;
struct rvt_pd *pd;
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct hfi1_pportdata *ppd;
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
return;
sge->n = 0;
spin_lock_init(&dev->lk_table.lock);
dev->lk_table.max = 1 << hfi1_lkey_table_size;
/* ensure generation is at least 4 bits (keys.c) */
- if (hfi1_lkey_table_size > MAX_LKEY_TABLE_BITS) {
+ if (hfi1_lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
dd_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
- hfi1_lkey_table_size, MAX_LKEY_TABLE_BITS);
- hfi1_lkey_table_size = MAX_LKEY_TABLE_BITS;
+ hfi1_lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
+ hfi1_lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
}
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- dev->lk_table.table = (struct hfi1_mregion __rcu **)
+ dev->lk_table.table = (struct rvt_mregion __rcu **)
vmalloc(lk_tab_size);
if (dev->lk_table.table == NULL) {
ret = -ENOMEM;
struct hfi1_mmap_info *ip;
};
-/*
- * A segment is a linear region of low physical memory.
- * Used by the verbs layer.
- */
-struct hfi1_seg {
- void *vaddr;
- size_t length;
-};
-
-/* The number of hfi1_segs that fit in a page. */
-#define HFI1_SEGSZ (PAGE_SIZE / sizeof(struct hfi1_seg))
-
-struct hfi1_segarray {
- struct hfi1_seg segs[HFI1_SEGSZ];
-};
-
-struct hfi1_mregion {
- struct ib_pd *pd; /* shares refcnt of ibmr.pd */
- u64 user_base; /* User's address for this region */
- u64 iova; /* IB start address of this region */
- size_t length;
- u32 lkey;
- u32 offset; /* offset (bytes) to start of region */
- int access_flags;
- u32 max_segs; /* number of hfi1_segs in all the arrays */
- u32 mapsz; /* size of the map array */
- u8 page_shift; /* 0 - non unform/non powerof2 sizes */
- u8 lkey_published; /* in global table */
- struct completion comp; /* complete when refcount goes to zero */
- atomic_t refcount;
- struct hfi1_segarray *map[0]; /* the segments */
-};
-
/*
* These keep track of the copy progress within a memory region.
* Used by the verbs layer.
*/
struct hfi1_sge {
- struct hfi1_mregion *mr;
+ struct rvt_mregion *mr;
void *vaddr; /* kernel virtual address of segment */
u32 sge_length; /* length of the SGE */
u32 length; /* remaining length of the segment */
struct hfi1_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
- struct hfi1_mregion mr; /* must be last */
+ struct rvt_mregion mr; /* must be last */
};
/*
u32 s_flags;
struct hfi1_swqe *s_wqe;
struct hfi1_sge_state s_sge; /* current send request data */
- struct hfi1_mregion *s_rdma_mr;
+ struct rvt_mregion *s_rdma_mr;
u32 s_cur_size; /* size of send packet in bytes */
u32 s_len; /* total length of s_sge */
u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
rq->max_sge * sizeof(struct ib_sge)) * n);
}
-#define MAX_LKEY_TABLE_BITS 23
-
-struct hfi1_lkey_table {
- spinlock_t lock; /* protect changes in this struct */
- u32 next; /* next unused index (speeds search) */
- u32 gen; /* generation count */
- u32 max; /* size of the table */
- struct hfi1_mregion __rcu **table;
-};
-
struct hfi1_opcode_stats {
u64 n_packets; /* number of packets */
u64 n_bytes; /* total number of bytes */
struct list_head pending_mmaps;
spinlock_t mmap_offset_lock; /* protect mmap_offset */
u32 mmap_offset;
- struct hfi1_mregion __rcu *dma_mr;
+ struct rvt_mregion __rcu *dma_mr;
struct hfi1_qp_ibdev *qp_dev;
/* QP numbers are shared by all IB ports */
- struct hfi1_lkey_table lk_table;
+ struct rvt_lkey_table lk_table;
/* protect wait lists */
seqlock_t iowait_lock;
struct list_head txwait; /* list for wait verbs_txreq */
int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
-int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region);
+int hfi1_alloc_lkey(struct rvt_mregion *mr, int dma_region);
-void hfi1_free_lkey(struct hfi1_mregion *mr);
+void hfi1_free_lkey(struct rvt_mregion *mr);
-int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct rvt_pd *pd,
+int hfi1_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
struct hfi1_sge *isge, struct ib_sge *sge, int acc);
int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
int hfi1_dealloc_fmr(struct ib_fmr *ibfmr);
-static inline void hfi1_get_mr(struct hfi1_mregion *mr)
+static inline void hfi1_get_mr(struct rvt_mregion *mr)
{
atomic_inc(&mr->refcount);
}
-static inline void hfi1_put_mr(struct hfi1_mregion *mr)
+static inline void hfi1_put_mr(struct rvt_mregion *mr)
{
if (unlikely(atomic_dec_and_test(&mr->refcount)))
complete(&mr->comp);