2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <rdma/ib_umem.h>
49 static __be64 *mr_align(__be64 *ptr, int align)
51 unsigned long mask = align - 1;
53 return (__be64 *)(((unsigned long)ptr + mask) & ~mask);
56 static int order2idx(struct mlx5_ib_dev *dev, int order)
58 struct mlx5_mr_cache *cache = &dev->cache;
60 if (order < cache->ent[0].order)
63 return order - cache->ent[0].order;
66 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
68 struct mlx5_mr_cache *cache = &dev->cache;
69 struct mlx5_cache_ent *ent = &cache->ent[c];
70 struct mlx5_create_mkey_mbox_in *in;
71 struct mlx5_ib_mr *mr;
72 int npages = 1 << ent->order;
76 in = kzalloc(sizeof(*in), GFP_KERNEL);
80 for (i = 0; i < num; i++) {
81 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
86 mr->order = ent->order;
88 in->seg.status = 1 << 6;
89 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
90 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
91 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
92 in->seg.log2_page_size = 12;
94 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
97 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
101 cache->last_add = jiffies;
103 spin_lock(&ent->lock);
104 list_add_tail(&mr->list, &ent->head);
107 spin_unlock(&ent->lock);
115 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
117 struct mlx5_mr_cache *cache = &dev->cache;
118 struct mlx5_cache_ent *ent = &cache->ent[c];
119 struct mlx5_ib_mr *mr;
123 for (i = 0; i < num; i++) {
124 spin_lock(&ent->lock);
125 if (list_empty(&ent->head)) {
126 spin_unlock(&ent->lock);
129 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
133 spin_unlock(&ent->lock);
134 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
136 mlx5_ib_warn(dev, "failed destroy mkey\n");
142 static ssize_t size_write(struct file *filp, const char __user *buf,
143 size_t count, loff_t *pos)
145 struct mlx5_cache_ent *ent = filp->private_data;
146 struct mlx5_ib_dev *dev = ent->dev;
152 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
155 c = order2idx(dev, ent->order);
156 lbuf[sizeof(lbuf) - 1] = 0;
158 if (sscanf(lbuf, "%u", &var) != 1)
161 if (var < ent->limit)
164 if (var > ent->size) {
165 err = add_keys(dev, c, var - ent->size);
168 } else if (var < ent->size) {
169 remove_keys(dev, c, ent->size - var);
175 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
178 struct mlx5_cache_ent *ent = filp->private_data;
185 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
189 if (copy_to_user(buf, lbuf, err))
197 static const struct file_operations size_fops = {
198 .owner = THIS_MODULE,
204 static ssize_t limit_write(struct file *filp, const char __user *buf,
205 size_t count, loff_t *pos)
207 struct mlx5_cache_ent *ent = filp->private_data;
208 struct mlx5_ib_dev *dev = ent->dev;
214 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
217 c = order2idx(dev, ent->order);
218 lbuf[sizeof(lbuf) - 1] = 0;
220 if (sscanf(lbuf, "%u", &var) != 1)
228 if (ent->cur < ent->limit) {
229 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
237 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
240 struct mlx5_cache_ent *ent = filp->private_data;
247 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
251 if (copy_to_user(buf, lbuf, err))
259 static const struct file_operations limit_fops = {
260 .owner = THIS_MODULE,
262 .write = limit_write,
266 static int someone_adding(struct mlx5_mr_cache *cache)
270 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
271 if (cache->ent[i].cur < cache->ent[i].limit)
278 static void __cache_work_func(struct mlx5_cache_ent *ent)
280 struct mlx5_ib_dev *dev = ent->dev;
281 struct mlx5_mr_cache *cache = &dev->cache;
282 int i = order2idx(dev, ent->order);
287 ent = &dev->cache.ent[i];
288 if (ent->cur < 2 * ent->limit) {
290 if (ent->cur < 2 * ent->limit)
291 queue_work(cache->wq, &ent->work);
292 } else if (ent->cur > 2 * ent->limit) {
293 if (!someone_adding(cache) &&
294 time_after(jiffies, cache->last_add + 60 * HZ)) {
295 remove_keys(dev, i, 1);
296 if (ent->cur > ent->limit)
297 queue_work(cache->wq, &ent->work);
299 queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ);
304 static void delayed_cache_work_func(struct work_struct *work)
306 struct mlx5_cache_ent *ent;
308 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
309 __cache_work_func(ent);
312 static void cache_work_func(struct work_struct *work)
314 struct mlx5_cache_ent *ent;
316 ent = container_of(work, struct mlx5_cache_ent, work);
317 __cache_work_func(ent);
320 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
322 struct mlx5_mr_cache *cache = &dev->cache;
323 struct mlx5_ib_mr *mr = NULL;
324 struct mlx5_cache_ent *ent;
328 c = order2idx(dev, order);
329 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
330 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
334 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
335 ent = &cache->ent[i];
337 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
339 spin_lock(&ent->lock);
340 if (!list_empty(&ent->head)) {
341 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
345 spin_unlock(&ent->lock);
346 if (ent->cur < ent->limit)
347 queue_work(cache->wq, &ent->work);
350 spin_unlock(&ent->lock);
352 queue_work(cache->wq, &ent->work);
359 cache->ent[c].miss++;
364 static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
366 struct mlx5_mr_cache *cache = &dev->cache;
367 struct mlx5_cache_ent *ent;
371 c = order2idx(dev, mr->order);
372 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
373 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
376 ent = &cache->ent[c];
377 spin_lock(&ent->lock);
378 list_add_tail(&mr->list, &ent->head);
380 if (ent->cur > 2 * ent->limit)
382 spin_unlock(&ent->lock);
385 queue_work(cache->wq, &ent->work);
388 static void clean_keys(struct mlx5_ib_dev *dev, int c)
390 struct mlx5_mr_cache *cache = &dev->cache;
391 struct mlx5_cache_ent *ent = &cache->ent[c];
392 struct mlx5_ib_mr *mr;
395 cancel_delayed_work(&ent->dwork);
397 spin_lock(&ent->lock);
398 if (list_empty(&ent->head)) {
399 spin_unlock(&ent->lock);
402 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
406 spin_unlock(&ent->lock);
407 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
409 mlx5_ib_warn(dev, "failed destroy mkey\n");
415 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
417 struct mlx5_mr_cache *cache = &dev->cache;
418 struct mlx5_cache_ent *ent;
421 if (!mlx5_debugfs_root)
424 cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root);
428 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
429 ent = &cache->ent[i];
430 sprintf(ent->name, "%d", ent->order);
431 ent->dir = debugfs_create_dir(ent->name, cache->root);
435 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
440 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
445 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
450 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
459 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
461 if (!mlx5_debugfs_root)
464 debugfs_remove_recursive(dev->cache.root);
467 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
469 struct mlx5_mr_cache *cache = &dev->cache;
470 struct mlx5_cache_ent *ent;
476 cache->wq = create_singlethread_workqueue("mkey_cache");
478 mlx5_ib_warn(dev, "failed to create work queue\n");
482 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
483 INIT_LIST_HEAD(&cache->ent[i].head);
484 spin_lock_init(&cache->ent[i].lock);
486 ent = &cache->ent[i];
487 INIT_LIST_HEAD(&ent->head);
488 spin_lock_init(&ent->lock);
492 if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) {
493 size = dev->mdev.profile->mr_cache[i].size;
494 limit = dev->mdev.profile->mr_cache[i].limit;
496 size = DEF_CACHE_SIZE;
499 INIT_WORK(&ent->work, cache_work_func);
500 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
502 queue_work(cache->wq, &ent->work);
505 err = mlx5_mr_cache_debugfs_init(dev);
507 mlx5_ib_warn(dev, "cache debugfs failure\n");
512 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
516 dev->cache.stopped = 1;
517 flush_workqueue(dev->cache.wq);
519 mlx5_mr_cache_debugfs_cleanup(dev);
521 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
524 destroy_workqueue(dev->cache.wq);
529 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
531 struct mlx5_ib_dev *dev = to_mdev(pd->device);
532 struct mlx5_core_dev *mdev = &dev->mdev;
533 struct mlx5_create_mkey_mbox_in *in;
534 struct mlx5_mkey_seg *seg;
535 struct mlx5_ib_mr *mr;
538 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
540 return ERR_PTR(-ENOMEM);
542 in = kzalloc(sizeof(*in), GFP_KERNEL);
549 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
550 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
551 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
554 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in));
559 mr->ibmr.lkey = mr->mmr.key;
560 mr->ibmr.rkey = mr->mmr.key;
574 static int get_octo_len(u64 addr, u64 len, int page_size)
579 offset = addr & (page_size - 1);
580 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
581 return (npages + 1) / 2;
584 static int use_umr(int order)
589 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
590 struct ib_sge *sg, u64 dma, int n, u32 key,
591 int page_shift, u64 virt_addr, u64 len,
594 struct mlx5_ib_dev *dev = to_mdev(pd->device);
595 struct ib_mr *mr = dev->umrc.mr;
598 sg->length = ALIGN(sizeof(u64) * n, 64);
609 wr->opcode = MLX5_IB_WR_UMR;
610 wr->wr.fast_reg.page_list_len = n;
611 wr->wr.fast_reg.page_shift = page_shift;
612 wr->wr.fast_reg.rkey = key;
613 wr->wr.fast_reg.iova_start = virt_addr;
614 wr->wr.fast_reg.length = len;
615 wr->wr.fast_reg.access_flags = access_flags;
616 wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd;
619 static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
620 struct ib_send_wr *wr, u32 key)
622 wr->send_flags = MLX5_IB_SEND_UMR_UNREG;
623 wr->opcode = MLX5_IB_WR_UMR;
624 wr->wr.fast_reg.rkey = key;
627 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
629 struct mlx5_ib_mr *mr;
634 err = ib_poll_cq(cq, 1, &wc);
636 pr_warn("poll cq error %d\n", err);
642 mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id;
643 mr->status = wc.status;
646 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
649 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
650 u64 virt_addr, u64 len, int npages,
651 int page_shift, int order, int access_flags)
653 struct mlx5_ib_dev *dev = to_mdev(pd->device);
654 struct device *ddev = dev->ib_dev.dma_device;
655 struct umr_common *umrc = &dev->umrc;
656 struct ib_send_wr wr, *bad;
657 struct mlx5_ib_mr *mr;
659 int size = sizeof(u64) * npages;
663 for (i = 0; i < 10; i++) {
664 mr = alloc_cached_mr(dev, order);
668 err = add_keys(dev, order2idx(dev, order), 1);
670 mlx5_ib_warn(dev, "add_keys failed\n");
676 return ERR_PTR(-EAGAIN);
678 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
684 mlx5_ib_populate_pas(dev, umem, page_shift,
685 mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
687 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
689 if (dma_mapping_error(ddev, mr->dma)) {
695 memset(&wr, 0, sizeof(wr));
696 wr.wr_id = (u64)(unsigned long)mr;
697 prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
699 /* We serialize polls so one process does not kidnap another's
700 * completion. This is not a problem since wr is completed in
704 init_completion(&mr->done);
705 err = ib_post_send(umrc->qp, &wr, &bad);
707 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
711 wait_for_completion(&mr->done);
714 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
717 if (mr->status != IB_WC_SUCCESS) {
718 mlx5_ib_warn(dev, "reg umr failed\n");
726 free_cached_mr(dev, mr);
730 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
731 u64 length, struct ib_umem *umem,
732 int npages, int page_shift,
735 struct mlx5_ib_dev *dev = to_mdev(pd->device);
736 struct mlx5_create_mkey_mbox_in *in;
737 struct mlx5_ib_mr *mr;
741 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
743 return ERR_PTR(-ENOMEM);
745 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
746 in = mlx5_vzalloc(inlen);
751 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0);
753 in->seg.flags = convert_access(access_flags) |
754 MLX5_ACCESS_MODE_MTT;
755 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
756 in->seg.start_addr = cpu_to_be64(virt_addr);
757 in->seg.len = cpu_to_be64(length);
758 in->seg.bsfs_octo_size = 0;
759 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
760 in->seg.log2_page_size = page_shift;
761 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
762 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
763 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen);
765 mlx5_ib_warn(dev, "create mkey failed\n");
771 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
784 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
785 u64 virt_addr, int access_flags,
786 struct ib_udata *udata)
788 struct mlx5_ib_dev *dev = to_mdev(pd->device);
789 struct mlx5_ib_mr *mr = NULL;
790 struct ib_umem *umem;
797 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n",
798 start, virt_addr, length);
799 umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
802 mlx5_ib_dbg(dev, "umem get failed\n");
806 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
808 mlx5_ib_warn(dev, "avoid zero region\n");
813 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
814 npages, ncont, order, page_shift);
816 if (use_umr(order)) {
817 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
818 order, access_flags);
819 if (PTR_ERR(mr) == -EAGAIN) {
820 mlx5_ib_dbg(dev, "cache empty for order %d", order);
826 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
834 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
838 spin_lock(&dev->mr_lock);
839 dev->mdev.priv.reg_pages += npages;
840 spin_unlock(&dev->mr_lock);
841 mr->ibmr.lkey = mr->mmr.key;
842 mr->ibmr.rkey = mr->mmr.key;
847 ib_umem_release(umem);
851 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
853 struct umr_common *umrc = &dev->umrc;
854 struct ib_send_wr wr, *bad;
857 memset(&wr, 0, sizeof(wr));
858 wr.wr_id = (u64)(unsigned long)mr;
859 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
862 init_completion(&mr->done);
863 err = ib_post_send(umrc->qp, &wr, &bad);
866 mlx5_ib_dbg(dev, "err %d\n", err);
869 wait_for_completion(&mr->done);
871 if (mr->status != IB_WC_SUCCESS) {
872 mlx5_ib_warn(dev, "unreg umr failed\n");
882 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
884 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
885 struct mlx5_ib_mr *mr = to_mmr(ibmr);
886 struct ib_umem *umem = mr->umem;
887 int npages = mr->npages;
888 int umred = mr->umred;
892 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
894 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
899 err = unreg_umr(dev, mr);
901 mlx5_ib_warn(dev, "failed unregister\n");
904 free_cached_mr(dev, mr);
908 ib_umem_release(umem);
909 spin_lock(&dev->mr_lock);
910 dev->mdev.priv.reg_pages -= npages;
911 spin_unlock(&dev->mr_lock);
920 struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
921 int max_page_list_len)
923 struct mlx5_ib_dev *dev = to_mdev(pd->device);
924 struct mlx5_create_mkey_mbox_in *in;
925 struct mlx5_ib_mr *mr;
928 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
930 return ERR_PTR(-ENOMEM);
932 in = kzalloc(sizeof(*in), GFP_KERNEL);
938 in->seg.status = 1 << 6; /* free */
939 in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
940 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
941 in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
942 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
944 * TBD not needed - issue 197292 */
945 in->seg.log2_page_size = PAGE_SHIFT;
947 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in));
952 mr->ibmr.lkey = mr->mmr.key;
953 mr->ibmr.rkey = mr->mmr.key;
963 struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
966 struct mlx5_ib_fast_reg_page_list *mfrpl;
967 int size = page_list_len * sizeof(u64);
969 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
971 return ERR_PTR(-ENOMEM);
973 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
974 if (!mfrpl->ibfrpl.page_list)
977 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
980 if (!mfrpl->mapped_page_list)
983 WARN_ON(mfrpl->map & 0x3f);
985 return &mfrpl->ibfrpl;
988 kfree(mfrpl->ibfrpl.page_list);
990 return ERR_PTR(-ENOMEM);
993 void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
995 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
996 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
997 int size = page_list->max_page_list_len * sizeof(u64);
999 dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list,
1001 kfree(mfrpl->ibfrpl.page_list);