2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 * lfsr (linear feedback shift register) with period 255
40 static u8 rxe_get_key(void)
46 key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10))
47 ^ (0 != (key & 0x80)) ^ (0 != (key & 0x40));
54 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
57 case RXE_MEM_TYPE_DMA:
61 case RXE_MEM_TYPE_FMR:
62 if (iova < mem->iova ||
63 length > mem->length ||
64 iova > mem->iova + mem->length - length)
73 #define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \
74 | IB_ACCESS_REMOTE_WRITE \
75 | IB_ACCESS_REMOTE_ATOMIC)
77 static void rxe_mem_init(int access, struct rxe_mem *mem)
79 u32 lkey = mem->pelem.index << 8 | rxe_get_key();
80 u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
82 if (mem->pelem.pool->type == RXE_TYPE_MR) {
83 mem->ibmr.lkey = lkey;
84 mem->ibmr.rkey = rkey;
89 mem->state = RXE_MEM_STATE_INVALID;
90 mem->type = RXE_MEM_TYPE_NONE;
91 mem->map_shift = ilog2(RXE_BUF_PER_MAP);
94 void rxe_mem_cleanup(void *arg)
96 struct rxe_mem *mem = arg;
100 ib_umem_release(mem->umem);
103 for (i = 0; i < mem->num_map; i++)
110 static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
114 struct rxe_map **map = mem->map;
116 num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
118 mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
122 for (i = 0; i < num_map; i++) {
123 mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
128 WARN_ON(!is_power_of_2(RXE_BUF_PER_MAP));
130 mem->map_shift = ilog2(RXE_BUF_PER_MAP);
131 mem->map_mask = RXE_BUF_PER_MAP - 1;
133 mem->num_buf = num_buf;
134 mem->num_map = num_map;
135 mem->max_buf = num_map * RXE_BUF_PER_MAP;
140 for (i--; i >= 0; i--)
148 int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
149 int access, struct rxe_mem *mem)
151 rxe_mem_init(access, mem);
154 mem->access = access;
155 mem->state = RXE_MEM_STATE_VALID;
156 mem->type = RXE_MEM_TYPE_DMA;
161 int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
162 u64 length, u64 iova, int access, struct ib_udata *udata,
166 struct rxe_map **map;
167 struct rxe_phys_buf *buf = NULL;
168 struct ib_umem *umem;
169 struct scatterlist *sg;
174 umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
176 pr_warn("err %d from rxe_umem_get\n",
183 num_buf = umem->nmap;
185 rxe_mem_init(access, mem);
187 err = rxe_mem_alloc(rxe, mem, num_buf);
189 pr_warn("err %d from rxe_mem_alloc\n", err);
190 ib_umem_release(umem);
194 WARN_ON(!is_power_of_2(umem->page_size));
196 mem->page_shift = ilog2(umem->page_size);
197 mem->page_mask = umem->page_size - 1;
204 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
205 vaddr = page_address(sg_page(sg));
207 pr_warn("null vaddr\n");
212 buf->addr = (uintptr_t)vaddr;
213 buf->size = umem->page_size;
217 if (num_buf >= RXE_BUF_PER_MAP) {
227 mem->access = access;
228 mem->length = length;
231 mem->offset = ib_umem_offset(umem);
232 mem->state = RXE_MEM_STATE_VALID;
233 mem->type = RXE_MEM_TYPE_MR;
241 int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
242 int max_pages, struct rxe_mem *mem)
246 rxe_mem_init(0, mem);
248 /* In fastreg, we also set the rkey */
249 mem->ibmr.rkey = mem->ibmr.lkey;
251 err = rxe_mem_alloc(rxe, mem, max_pages);
256 mem->max_buf = max_pages;
257 mem->state = RXE_MEM_STATE_FREE;
258 mem->type = RXE_MEM_TYPE_MR;
266 static void lookup_iova(
273 size_t offset = iova - mem->iova + mem->offset;
278 if (likely(mem->page_shift)) {
279 *offset_out = offset & mem->page_mask;
280 offset >>= mem->page_shift;
281 *n_out = offset & mem->map_mask;
282 *m_out = offset >> mem->map_shift;
287 length = mem->map[map_index]->buf[buf_index].size;
289 while (offset >= length) {
293 if (buf_index == RXE_BUF_PER_MAP) {
297 length = mem->map[map_index]->buf[buf_index].size;
302 *offset_out = offset;
306 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
312 if (mem->state != RXE_MEM_STATE_VALID) {
313 pr_warn("mem not in valid state\n");
319 addr = (void *)(uintptr_t)iova;
323 if (mem_check_range(mem, iova, length)) {
324 pr_warn("range violation\n");
329 lookup_iova(mem, iova, &m, &n, &offset);
331 if (offset + length > mem->map[m]->buf[n].size) {
332 pr_warn("crosses page boundary\n");
337 addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
343 /* copy data from a range (vaddr, vaddr+length-1) to or from
344 * a mem object starting at iova. Compute incremental value of
345 * crc32 if crcp is not zero. caller must hold a reference to mem
347 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
348 enum copy_direction dir, u32 *crcp)
353 struct rxe_map **map;
354 struct rxe_phys_buf *buf;
358 u32 crc = crcp ? (*crcp) : 0;
363 if (mem->type == RXE_MEM_TYPE_DMA) {
366 src = (dir == to_mem_obj) ?
367 addr : ((void *)(uintptr_t)iova);
369 dest = (dir == to_mem_obj) ?
370 ((void *)(uintptr_t)iova) : addr;
373 *crcp = crc32_le(*crcp, src, length);
375 memcpy(dest, src, length);
382 err = mem_check_range(mem, iova, length);
388 lookup_iova(mem, iova, &m, &i, &offset);
391 buf = map[0]->buf + i;
396 va = (u8 *)(uintptr_t)buf->addr + offset;
397 src = (dir == to_mem_obj) ? addr : va;
398 dest = (dir == to_mem_obj) ? va : addr;
400 bytes = buf->size - offset;
406 crc = crc32_le(crc, src, bytes);
408 memcpy(dest, src, bytes);
417 if (i == RXE_BUF_PER_MAP) {
433 /* copy data in or out of a wqe, i.e. sg list
434 * under the control of a dma descriptor
440 struct rxe_dma_info *dma,
443 enum copy_direction dir,
447 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
448 int offset = dma->sge_offset;
449 int resid = dma->resid;
450 struct rxe_mem *mem = NULL;
457 if (length > resid) {
462 if (sge->length && (offset < sge->length)) {
463 mem = lookup_mem(pd, access, sge->lkey, lookup_local);
473 if (offset >= sge->length) {
482 if (dma->cur_sge >= dma->num_sge) {
488 mem = lookup_mem(pd, access, sge->lkey,
499 if (bytes > sge->length - offset)
500 bytes = sge->length - offset;
503 iova = sge->addr + offset;
505 err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
516 dma->sge_offset = offset;
531 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
533 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
534 int offset = dma->sge_offset;
535 int resid = dma->resid;
540 if (offset >= sge->length) {
544 if (dma->cur_sge >= dma->num_sge)
550 if (bytes > sge->length - offset)
551 bytes = sge->length - offset;
558 dma->sge_offset = offset;
564 /* (1) find the mem (mr or mw) corresponding to lkey/rkey
565 * depending on lookup_type
566 * (2) verify that the (qp) pd matches the mem pd
567 * (3) verify that the mem can support the requested access
568 * (4) verify that mem state is valid
570 struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
571 enum lookup_type type)
574 struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
575 int index = key >> 8;
577 if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) {
578 mem = rxe_pool_get_index(&rxe->mr_pool, index);
585 if ((type == lookup_local && mem->lkey != key) ||
586 (type == lookup_remote && mem->rkey != key))
592 if (access && !(access & mem->access))
595 if (mem->state != RXE_MEM_STATE_VALID)
606 int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
607 u64 *page, int num_pages, u64 iova)
612 struct rxe_map **map;
613 struct rxe_phys_buf *buf;
616 if (num_pages > mem->max_buf) {
622 page_size = 1 << mem->page_shift;
626 for (i = 0; i < num_pages; i++) {
628 buf->size = page_size;
632 if (num_buf == RXE_BUF_PER_MAP) {
641 mem->length = num_pages << mem->page_shift;
642 mem->state = RXE_MEM_STATE_VALID;