2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/list.h>
48 #include <linux/mmu_notifier.h>
49 #include <linux/interval_tree_generic.h>
54 struct mmu_rb_handler {
55 struct list_head list;
56 struct mmu_notifier mn;
58 spinlock_t lock; /* protect the RB tree */
59 struct mmu_rb_ops *ops;
62 static LIST_HEAD(mmu_rb_handlers);
63 static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */
65 static unsigned long mmu_node_start(struct mmu_rb_node *);
66 static unsigned long mmu_node_last(struct mmu_rb_node *);
67 static struct mmu_rb_handler *find_mmu_handler(struct rb_root *);
68 static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
70 static inline void mmu_notifier_range_start(struct mmu_notifier *,
72 unsigned long, unsigned long);
73 static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
74 unsigned long, unsigned long);
75 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
76 unsigned long, unsigned long);
78 static struct mmu_notifier_ops mn_opts = {
79 .invalidate_page = mmu_notifier_page,
80 .invalidate_range_start = mmu_notifier_range_start,
83 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
84 mmu_node_start, mmu_node_last, static, __mmu_int_rb);
86 static unsigned long mmu_node_start(struct mmu_rb_node *node)
88 return node->addr & PAGE_MASK;
91 static unsigned long mmu_node_last(struct mmu_rb_node *node)
93 return ((node->addr & PAGE_MASK) + node->len);
96 int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
98 struct mmu_rb_handler *handlr;
101 if (!ops->compare || !ops->invalidate)
104 handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
110 INIT_HLIST_NODE(&handlr->mn.hlist);
111 spin_lock_init(&handlr->lock);
112 handlr->mn.ops = &mn_opts;
113 spin_lock_irqsave(&mmu_rb_lock, flags);
114 list_add_tail(&handlr->list, &mmu_rb_handlers);
115 spin_unlock_irqrestore(&mmu_rb_lock, flags);
117 return mmu_notifier_register(&handlr->mn, current->mm);
120 void hfi1_mmu_rb_unregister(struct rb_root *root)
122 struct mmu_rb_handler *handler = find_mmu_handler(root);
128 spin_lock_irqsave(&mmu_rb_lock, flags);
129 list_del(&handler->list);
130 spin_unlock_irqrestore(&mmu_rb_lock, flags);
132 if (!RB_EMPTY_ROOT(root)) {
133 struct rb_node *node;
134 struct mmu_rb_node *rbnode;
136 while ((node = rb_first(root))) {
137 rbnode = rb_entry(node, struct mmu_rb_node, node);
138 rb_erase(node, root);
139 if (handler->ops->remove)
140 handler->ops->remove(root, rbnode, false);
145 mmu_notifier_unregister(&handler->mn, current->mm);
149 int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
151 struct mmu_rb_handler *handler = find_mmu_handler(root);
152 struct mmu_rb_node *node;
159 spin_lock_irqsave(&handler->lock, flags);
160 node = __mmu_rb_search(handler, mnode->addr, mnode->len);
165 __mmu_int_rb_insert(mnode, root);
167 if (handler->ops->insert) {
168 ret = handler->ops->insert(root, mnode);
170 __mmu_int_rb_remove(mnode, root);
173 spin_unlock_irqrestore(&handler->lock, flags);
177 /* Caller must host handler lock */
178 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
182 struct mmu_rb_node *node;
184 node = __mmu_int_rb_iter_first(handler->root, addr, len);
188 static void __mmu_rb_remove(struct mmu_rb_handler *handler,
189 struct mmu_rb_node *node, bool arg)
191 /* Validity of handler and node pointers has been checked by caller. */
192 __mmu_int_rb_remove(node, handler->root);
193 if (handler->ops->remove)
194 handler->ops->remove(handler->root, node, arg);
197 struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
200 struct mmu_rb_handler *handler = find_mmu_handler(root);
201 struct mmu_rb_node *node;
205 return ERR_PTR(-EINVAL);
207 spin_lock_irqsave(&handler->lock, flags);
208 node = __mmu_rb_search(handler, addr, len);
209 spin_unlock_irqrestore(&handler->lock, flags);
214 void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
216 struct mmu_rb_handler *handler = find_mmu_handler(root);
219 if (!handler || !node)
222 spin_lock_irqsave(&handler->lock, flags);
223 __mmu_rb_remove(handler, node, false);
224 spin_unlock_irqrestore(&handler->lock, flags);
227 static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
229 struct mmu_rb_handler *handler;
232 spin_lock_irqsave(&mmu_rb_lock, flags);
233 list_for_each_entry(handler, &mmu_rb_handlers, list) {
234 if (handler->root == root)
239 spin_unlock_irqrestore(&mmu_rb_lock, flags);
243 static inline void mmu_notifier_page(struct mmu_notifier *mn,
244 struct mm_struct *mm, unsigned long addr)
246 mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE);
249 static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
250 struct mm_struct *mm,
254 mmu_notifier_mem_invalidate(mn, start, end);
257 static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
258 unsigned long start, unsigned long end)
260 struct mmu_rb_handler *handler =
261 container_of(mn, struct mmu_rb_handler, mn);
262 struct rb_root *root = handler->root;
263 struct mmu_rb_node *node;
266 spin_lock_irqsave(&handler->lock, flags);
267 for (node = __mmu_int_rb_iter_first(root, start, end); node;
268 node = __mmu_int_rb_iter_next(node, start, end)) {
269 if (handler->ops->invalidate(root, node))
270 __mmu_rb_remove(handler, node, true);
272 spin_unlock_irqrestore(&handler->lock, flags);