2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/rculist.h>
41 static DEFINE_PER_CPU(unsigned long, clean_list_grace);
42 #define CLEAN_LIST_BUSY_BIT 0
45 * This is stored as mr->r_trans_private.
48 struct rds_ib_device *device;
49 struct rds_ib_mr_pool *pool;
52 struct xlist_head xlist;
54 /* unmap_list is for freeing */
55 struct list_head unmap_list;
56 unsigned int remap_count;
58 struct scatterlist *sg;
65 * Our own little FMR pool
67 struct rds_ib_mr_pool {
68 struct mutex flush_lock; /* serialize fmr invalidate */
69 struct delayed_work flush_worker; /* flush worker */
71 atomic_t item_count; /* total # of MRs */
72 atomic_t dirty_count; /* # dirty of MRs */
74 struct xlist_head drop_list; /* MRs that have reached their max_maps limit */
75 struct xlist_head free_list; /* unused MRs */
76 struct xlist_head clean_list; /* global unused & unamapped MRs */
77 wait_queue_head_t flush_wait;
79 atomic_t free_pinned; /* memory pinned by free MRs */
80 unsigned long max_items;
81 unsigned long max_items_soft;
82 unsigned long max_free_pinned;
83 struct ib_fmr_attr fmr_attr;
86 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
87 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
88 static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
90 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
92 struct rds_ib_device *rds_ibdev;
93 struct rds_ib_ipaddr *i_ipaddr;
95 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
97 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
98 if (i_ipaddr->ipaddr == ipaddr) {
99 atomic_inc(&rds_ibdev->refcount);
110 static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
112 struct rds_ib_ipaddr *i_ipaddr;
114 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
118 i_ipaddr->ipaddr = ipaddr;
120 spin_lock_irq(&rds_ibdev->spinlock);
121 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
122 spin_unlock_irq(&rds_ibdev->spinlock);
127 static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
129 struct rds_ib_ipaddr *i_ipaddr;
130 struct rds_ib_ipaddr *to_free = NULL;
133 spin_lock_irq(&rds_ibdev->spinlock);
134 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
135 if (i_ipaddr->ipaddr == ipaddr) {
136 list_del_rcu(&i_ipaddr->list);
141 spin_unlock_irq(&rds_ibdev->spinlock);
149 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
151 struct rds_ib_device *rds_ibdev_old;
153 rds_ibdev_old = rds_ib_get_device(ipaddr);
155 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
156 rds_ib_dev_put(rds_ibdev_old);
159 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
162 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
164 struct rds_ib_connection *ic = conn->c_transport_data;
166 /* conn was previously on the nodev_conns_list */
167 spin_lock_irq(&ib_nodev_conns_lock);
168 BUG_ON(list_empty(&ib_nodev_conns));
169 BUG_ON(list_empty(&ic->ib_node));
170 list_del(&ic->ib_node);
172 spin_lock_irq(&rds_ibdev->spinlock);
173 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
174 spin_unlock_irq(&rds_ibdev->spinlock);
175 spin_unlock_irq(&ib_nodev_conns_lock);
177 ic->rds_ibdev = rds_ibdev;
178 atomic_inc(&rds_ibdev->refcount);
181 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
183 struct rds_ib_connection *ic = conn->c_transport_data;
185 /* place conn on nodev_conns_list */
186 spin_lock(&ib_nodev_conns_lock);
188 spin_lock_irq(&rds_ibdev->spinlock);
189 BUG_ON(list_empty(&ic->ib_node));
190 list_del(&ic->ib_node);
191 spin_unlock_irq(&rds_ibdev->spinlock);
193 list_add_tail(&ic->ib_node, &ib_nodev_conns);
195 spin_unlock(&ib_nodev_conns_lock);
197 ic->rds_ibdev = NULL;
198 rds_ib_dev_put(rds_ibdev);
201 void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
203 struct rds_ib_connection *ic, *_ic;
206 /* avoid calling conn_destroy with irqs off */
207 spin_lock_irq(list_lock);
208 list_splice(list, &tmp_list);
209 INIT_LIST_HEAD(list);
210 spin_unlock_irq(list_lock);
212 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
213 rds_conn_destroy(ic->conn);
216 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
218 struct rds_ib_mr_pool *pool;
220 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
222 return ERR_PTR(-ENOMEM);
224 INIT_XLIST_HEAD(&pool->free_list);
225 INIT_XLIST_HEAD(&pool->drop_list);
226 INIT_XLIST_HEAD(&pool->clean_list);
227 mutex_init(&pool->flush_lock);
228 init_waitqueue_head(&pool->flush_wait);
229 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
231 pool->fmr_attr.max_pages = fmr_message_size;
232 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
233 pool->fmr_attr.page_shift = PAGE_SHIFT;
234 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
236 /* We never allow more than max_items MRs to be allocated.
237 * When we exceed more than max_items_soft, we start freeing
238 * items more aggressively.
239 * Make sure that max_items > max_items_soft > max_items / 2
241 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
242 pool->max_items = rds_ibdev->max_fmrs;
247 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
249 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
251 iinfo->rdma_mr_max = pool->max_items;
252 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
255 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
257 cancel_delayed_work_sync(&pool->flush_worker);
258 rds_ib_flush_mr_pool(pool, 1, NULL);
259 WARN_ON(atomic_read(&pool->item_count));
260 WARN_ON(atomic_read(&pool->free_pinned));
264 static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
265 struct rds_ib_mr **ibmr_ret)
267 struct xlist_head *ibmr_xl;
268 ibmr_xl = xlist_del_head_fast(xl);
269 *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
272 static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
274 struct rds_ib_mr *ibmr = NULL;
275 struct xlist_head *ret;
279 flag = &__get_cpu_var(clean_list_grace);
280 set_bit(CLEAN_LIST_BUSY_BIT, flag);
281 ret = xlist_del_head(&pool->clean_list);
283 ibmr = list_entry(ret, struct rds_ib_mr, xlist);
285 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
290 static inline void wait_clean_list_grace(void)
295 for_each_online_cpu(cpu) {
296 flag = &per_cpu(clean_list_grace, cpu);
297 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
302 static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
304 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
305 struct rds_ib_mr *ibmr = NULL;
306 int err = 0, iter = 0;
309 ibmr = rds_ib_reuse_fmr(pool);
313 /* No clean MRs - now we have the choice of either
314 * allocating a fresh MR up to the limit imposed by the
315 * driver, or flush any dirty unused MRs.
316 * We try to avoid stalling in the send path if possible,
317 * so we allocate as long as we're allowed to.
319 * We're fussy with enforcing the FMR limit, though. If the driver
320 * tells us we can't use more than N fmrs, we shouldn't start
322 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
325 atomic_dec(&pool->item_count);
328 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
329 return ERR_PTR(-EAGAIN);
332 /* We do have some empty MRs. Flush them out. */
333 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
334 rds_ib_flush_mr_pool(pool, 0, &ibmr);
339 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
345 memset(ibmr, 0, sizeof(*ibmr));
347 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
348 (IB_ACCESS_LOCAL_WRITE |
349 IB_ACCESS_REMOTE_READ |
350 IB_ACCESS_REMOTE_WRITE|
351 IB_ACCESS_REMOTE_ATOMIC),
353 if (IS_ERR(ibmr->fmr)) {
354 err = PTR_ERR(ibmr->fmr);
356 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
360 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
366 ib_dealloc_fmr(ibmr->fmr);
369 atomic_dec(&pool->item_count);
373 static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
374 struct scatterlist *sg, unsigned int nents)
376 struct ib_device *dev = rds_ibdev->dev;
377 struct scatterlist *scat = sg;
381 int page_cnt, sg_dma_len;
385 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
387 if (unlikely(!sg_dma_len)) {
388 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
395 for (i = 0; i < sg_dma_len; ++i) {
396 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
397 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
399 if (dma_addr & ~PAGE_MASK) {
405 if ((dma_addr + dma_len) & ~PAGE_MASK) {
406 if (i < sg_dma_len - 1)
415 page_cnt += len >> PAGE_SHIFT;
416 if (page_cnt > fmr_message_size)
419 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
420 rdsibdev_to_node(rds_ibdev));
425 for (i = 0; i < sg_dma_len; ++i) {
426 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
427 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
429 for (j = 0; j < dma_len; j += PAGE_SIZE)
430 dma_pages[page_cnt++] =
431 (dma_addr & PAGE_MASK) + j;
434 ret = ib_map_phys_fmr(ibmr->fmr,
435 dma_pages, page_cnt, io_addr);
439 /* Success - we successfully remapped the MR, so we can
440 * safely tear down the old mapping. */
441 rds_ib_teardown_mr(ibmr);
444 ibmr->sg_len = nents;
445 ibmr->sg_dma_len = sg_dma_len;
448 rds_ib_stats_inc(s_ib_rdma_mr_used);
457 void rds_ib_sync_mr(void *trans_private, int direction)
459 struct rds_ib_mr *ibmr = trans_private;
460 struct rds_ib_device *rds_ibdev = ibmr->device;
463 case DMA_FROM_DEVICE:
464 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
465 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
468 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
469 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
474 static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
476 struct rds_ib_device *rds_ibdev = ibmr->device;
478 if (ibmr->sg_dma_len) {
479 ib_dma_unmap_sg(rds_ibdev->dev,
480 ibmr->sg, ibmr->sg_len,
482 ibmr->sg_dma_len = 0;
485 /* Release the s/g list */
489 for (i = 0; i < ibmr->sg_len; ++i) {
490 struct page *page = sg_page(&ibmr->sg[i]);
492 /* FIXME we need a way to tell a r/w MR
494 BUG_ON(irqs_disabled());
495 set_page_dirty(page);
505 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
507 unsigned int pinned = ibmr->sg_len;
509 __rds_ib_teardown_mr(ibmr);
511 struct rds_ib_device *rds_ibdev = ibmr->device;
512 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
514 atomic_sub(pinned, &pool->free_pinned);
518 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
520 unsigned int item_count;
522 item_count = atomic_read(&pool->item_count);
530 * given an xlist of mrs, put them all into the list_head for more processing
532 static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
534 struct rds_ib_mr *ibmr;
535 struct xlist_head splice;
536 struct xlist_head *cur;
537 struct xlist_head *next;
540 xlist_splice(xlist, &splice);
544 ibmr = list_entry(cur, struct rds_ib_mr, xlist);
545 list_add_tail(&ibmr->unmap_list, list);
551 * this takes a list head of mrs and turns it into an xlist of clusters.
552 * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
555 static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
556 struct list_head *list, struct xlist_head *xlist,
557 struct xlist_head **tail_ret)
559 struct rds_ib_mr *ibmr;
560 struct xlist_head *cur_mr = xlist;
561 struct xlist_head *tail_mr = NULL;
563 list_for_each_entry(ibmr, list, unmap_list) {
564 tail_mr = &ibmr->xlist;
565 tail_mr->next = NULL;
566 cur_mr->next = tail_mr;
573 * Flush our pool of MRs.
574 * At a minimum, all currently unused MRs are unmapped.
575 * If the number of MRs allocated exceeds the limit, we also try
576 * to free as many MRs as needed to get back to this limit.
578 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
579 int free_all, struct rds_ib_mr **ibmr_ret)
581 struct rds_ib_mr *ibmr, *next;
582 struct xlist_head clean_xlist;
583 struct xlist_head *clean_tail;
584 LIST_HEAD(unmap_list);
586 unsigned long unpinned = 0;
587 unsigned int nfreed = 0, ncleaned = 0, free_goal;
590 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
594 while(!mutex_trylock(&pool->flush_lock)) {
595 ibmr = rds_ib_reuse_fmr(pool);
598 finish_wait(&pool->flush_wait, &wait);
602 prepare_to_wait(&pool->flush_wait, &wait,
603 TASK_UNINTERRUPTIBLE);
604 if (xlist_empty(&pool->clean_list))
607 ibmr = rds_ib_reuse_fmr(pool);
610 finish_wait(&pool->flush_wait, &wait);
614 finish_wait(&pool->flush_wait, &wait);
616 mutex_lock(&pool->flush_lock);
619 ibmr = rds_ib_reuse_fmr(pool);
626 /* Get the list of all MRs to be dropped. Ordering matters -
627 * we want to put drop_list ahead of free_list.
629 xlist_append_to_list(&pool->drop_list, &unmap_list);
630 xlist_append_to_list(&pool->free_list, &unmap_list);
632 xlist_append_to_list(&pool->clean_list, &unmap_list);
634 free_goal = rds_ib_flush_goal(pool, free_all);
636 if (list_empty(&unmap_list))
639 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
640 list_for_each_entry(ibmr, &unmap_list, unmap_list)
641 list_add(&ibmr->fmr->list, &fmr_list);
643 ret = ib_unmap_fmr(&fmr_list);
645 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
647 /* Now we can destroy the DMA mapping and unpin any pages */
648 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
649 unpinned += ibmr->sg_len;
650 __rds_ib_teardown_mr(ibmr);
651 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
652 rds_ib_stats_inc(s_ib_rdma_mr_free);
653 list_del(&ibmr->unmap_list);
654 ib_dealloc_fmr(ibmr->fmr);
661 if (!list_empty(&unmap_list)) {
662 /* we have to make sure that none of the things we're about
663 * to put on the clean list would race with other cpus trying
664 * to pull items off. The xlist would explode if we managed to
665 * remove something from the clean list and then add it back again
666 * while another CPU was spinning on that same item in xlist_del_head.
668 * This is pretty unlikely, but just in case wait for an xlist grace period
669 * here before adding anything back into the clean list.
671 wait_clean_list_grace();
673 list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
675 refill_local(pool, &clean_xlist, ibmr_ret);
677 /* refill_local may have emptied our list */
678 if (!xlist_empty(&clean_xlist))
679 xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
683 atomic_sub(unpinned, &pool->free_pinned);
684 atomic_sub(ncleaned, &pool->dirty_count);
685 atomic_sub(nfreed, &pool->item_count);
688 mutex_unlock(&pool->flush_lock);
689 if (waitqueue_active(&pool->flush_wait))
690 wake_up(&pool->flush_wait);
695 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
697 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
699 rds_ib_flush_mr_pool(pool, 0, NULL);
702 void rds_ib_free_mr(void *trans_private, int invalidate)
704 struct rds_ib_mr *ibmr = trans_private;
705 struct rds_ib_device *rds_ibdev = ibmr->device;
706 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
708 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
710 /* Return it to the pool's free list */
711 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
712 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
714 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
716 atomic_add(ibmr->sg_len, &pool->free_pinned);
717 atomic_inc(&pool->dirty_count);
719 /* If we've pinned too many pages, request a flush */
720 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
721 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
722 queue_delayed_work(rds_wq, &pool->flush_worker, 10);
725 if (likely(!in_interrupt())) {
726 rds_ib_flush_mr_pool(pool, 0, NULL);
728 /* We get here if the user created a MR marked
729 * as use_once and invalidate at the same time. */
730 queue_delayed_work(rds_wq, &pool->flush_worker, 10);
734 rds_ib_dev_put(rds_ibdev);
737 void rds_ib_flush_mrs(void)
739 struct rds_ib_device *rds_ibdev;
741 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
742 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
745 rds_ib_flush_mr_pool(pool, 0, NULL);
749 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
750 struct rds_sock *rs, u32 *key_ret)
752 struct rds_ib_device *rds_ibdev;
753 struct rds_ib_mr *ibmr = NULL;
756 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
762 if (!rds_ibdev->mr_pool) {
767 ibmr = rds_ib_alloc_fmr(rds_ibdev);
771 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
773 *key_ret = ibmr->fmr->rkey;
775 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
777 ibmr->device = rds_ibdev;
783 rds_ib_free_mr(ibmr, 0);
787 rds_ib_dev_put(rds_ibdev);