2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
12 bool list_lru_add(struct list_lru *lru, struct list_head *item)
14 int nid = page_to_nid(virt_to_page(item));
15 struct list_lru_node *nlru = &lru->node[nid];
17 spin_lock(&nlru->lock);
18 WARN_ON_ONCE(nlru->nr_items < 0);
19 if (list_empty(item)) {
20 list_add_tail(item, &nlru->list);
21 if (nlru->nr_items++ == 0)
22 node_set(nid, lru->active_nodes);
23 spin_unlock(&nlru->lock);
26 spin_unlock(&nlru->lock);
29 EXPORT_SYMBOL_GPL(list_lru_add);
31 bool list_lru_del(struct list_lru *lru, struct list_head *item)
33 int nid = page_to_nid(virt_to_page(item));
34 struct list_lru_node *nlru = &lru->node[nid];
36 spin_lock(&nlru->lock);
37 if (!list_empty(item)) {
39 if (--nlru->nr_items == 0)
40 node_clear(nid, lru->active_nodes);
41 WARN_ON_ONCE(nlru->nr_items < 0);
42 spin_unlock(&nlru->lock);
45 spin_unlock(&nlru->lock);
48 EXPORT_SYMBOL_GPL(list_lru_del);
50 unsigned long list_lru_count(struct list_lru *lru)
52 unsigned long count = 0;
55 for_each_node_mask(nid, lru->active_nodes) {
56 struct list_lru_node *nlru = &lru->node[nid];
58 spin_lock(&nlru->lock);
59 WARN_ON_ONCE(nlru->nr_items < 0);
60 count += nlru->nr_items;
61 spin_unlock(&nlru->lock);
66 EXPORT_SYMBOL_GPL(list_lru_count);
69 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
70 void *cb_arg, unsigned long *nr_to_walk)
73 struct list_lru_node *nlru = &lru->node[nid];
74 struct list_head *item, *n;
75 unsigned long isolated = 0;
77 * If we don't keep state of at which pass we are, we can loop at
78 * LRU_RETRY, since we have no guarantees that the caller will be able
79 * to do something other than retry on the next pass. We handle this by
80 * allowing at most one retry per object. This should not be altered
81 * by any condition other than LRU_RETRY.
83 bool first_pass = true;
85 spin_lock(&nlru->lock);
87 list_for_each_safe(item, n, &nlru->list) {
89 ret = isolate(item, &nlru->lock, cb_arg);
92 if (--nlru->nr_items == 0)
93 node_clear(nid, lru->active_nodes);
94 WARN_ON_ONCE(nlru->nr_items < 0);
98 list_move_tail(item, &nlru->list);
113 if ((*nr_to_walk)-- == 0)
118 spin_unlock(&nlru->lock);
121 EXPORT_SYMBOL_GPL(list_lru_walk_node);
123 unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
124 void *cb_arg, unsigned long nr_to_walk)
126 unsigned long isolated = 0;
129 for_each_node_mask(nid, lru->active_nodes) {
130 isolated += list_lru_walk_node(lru, nid, isolate,
131 cb_arg, &nr_to_walk);
137 EXPORT_SYMBOL_GPL(list_lru_walk);
139 static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
140 list_lru_dispose_cb dispose)
142 struct list_lru_node *nlru = &lru->node[nid];
143 LIST_HEAD(dispose_list);
144 unsigned long disposed = 0;
146 spin_lock(&nlru->lock);
147 while (!list_empty(&nlru->list)) {
148 list_splice_init(&nlru->list, &dispose_list);
149 disposed += nlru->nr_items;
151 node_clear(nid, lru->active_nodes);
152 spin_unlock(&nlru->lock);
154 dispose(&dispose_list);
156 spin_lock(&nlru->lock);
158 spin_unlock(&nlru->lock);
162 unsigned long list_lru_dispose_all(struct list_lru *lru,
163 list_lru_dispose_cb dispose)
165 unsigned long disposed;
166 unsigned long total = 0;
171 for_each_node_mask(nid, lru->active_nodes) {
172 disposed += list_lru_dispose_all_node(lru, nid,
176 } while (disposed != 0);
181 int list_lru_init(struct list_lru *lru)
185 nodes_clear(lru->active_nodes);
186 for (i = 0; i < MAX_NUMNODES; i++) {
187 spin_lock_init(&lru->node[i].lock);
188 INIT_LIST_HEAD(&lru->node[i].list);
189 lru->node[i].nr_items = 0;
193 EXPORT_SYMBOL_GPL(list_lru_init);