]> git.karo-electronics.de Git - karo-tx-linux.git/blob - mm/balloon_compaction.c
mm: cma: remove watermark hacks (fix)
[karo-tx-linux.git] / mm / balloon_compaction.c
1 /*
2  * mm/balloon_compaction.c
3  *
4  * Common interface for making balloon pages movable to compaction.
5  *
6  * Copyright (C) 2012, Red Hat, Inc.  Rafael Aquini <aquini@redhat.com>
7  */
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/balloon_compaction.h>
12
13 /*
14  * balloon_devinfo_alloc - allocates a balloon device information descriptor.
15  * @balloon_dev_descriptor: pointer to reference the balloon device which
16  *                          this struct balloon_dev_info will be servicing.
17  *
18  * Driver must call it to properly allocate and initialize an instance of
19  * struct balloon_dev_info which will be used to reference a balloon device
20  * as well as to keep track of the balloon device page list.
21  */
22 struct balloon_dev_info *balloon_devinfo_alloc(void *balloon_dev_descriptor)
23 {
24         struct balloon_dev_info *b_dev_info;
25         b_dev_info = kmalloc(sizeof(*b_dev_info), GFP_KERNEL);
26         if (!b_dev_info)
27                 return ERR_PTR(-ENOMEM);
28
29         b_dev_info->balloon_device = balloon_dev_descriptor;
30         b_dev_info->mapping = NULL;
31         b_dev_info->isolated_pages = 0;
32         spin_lock_init(&b_dev_info->pages_lock);
33         INIT_LIST_HEAD(&b_dev_info->pages);
34
35         return b_dev_info;
36 }
37 EXPORT_SYMBOL_GPL(balloon_devinfo_alloc);
38
39 /*
40  * balloon_page_enqueue - allocates a new page and inserts it into the balloon
41  *                        page list.
42  * @b_dev_info: balloon device decriptor where we will insert a new page to
43  *
44  * Driver must call it to properly allocate a new enlisted balloon page
45  * before definetively removing it from the guest system.
46  * This function returns the page address for the recently enqueued page or
47  * NULL in the case we fail to allocate a new page this turn.
48  */
49 struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
50 {
51         unsigned long flags;
52         struct page *page = alloc_page(balloon_mapping_gfp_mask() |
53                                         __GFP_NOMEMALLOC | __GFP_NORETRY);
54         if (!page)
55                 return NULL;
56
57         BUG_ON(!trylock_page(page));
58         spin_lock_irqsave(&b_dev_info->pages_lock, flags);
59         balloon_page_insert(page, b_dev_info->mapping, &b_dev_info->pages);
60         spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
61         unlock_page(page);
62         return page;
63 }
64 EXPORT_SYMBOL_GPL(balloon_page_enqueue);
65
66 /*
67  * balloon_page_dequeue - removes a page from balloon's page list and returns
68  *                        the its address to allow the driver release the page.
69  * @b_dev_info: balloon device decriptor where we will grab a page from.
70  *
71  * Driver must call it to properly de-allocate a previous enlisted balloon page
72  * before definetively releasing it back to the guest system.
73  * This function returns the page address for the recently dequeued page or
74  * NULL in the case we find balloon's page list temporarely empty due to
75  * compaction isolated pages.
76  */
77 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
78 {
79         struct page *page, *tmp;
80         unsigned long flags;
81         bool dequeued_page;
82
83         dequeued_page = false;
84         list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
85                 if (trylock_page(page)) {
86                         spin_lock_irqsave(&b_dev_info->pages_lock, flags);
87                         balloon_page_delete(page);
88                         spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
89                         unlock_page(page);
90                         dequeued_page = true;
91                         break;
92                 }
93         }
94
95         if (!dequeued_page) {
96                 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
97                 if (unlikely(list_empty(&b_dev_info->pages) &&
98                              !b_dev_info->isolated_pages))
99                         BUG();
100                 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
101                 page = NULL;
102         }
103         return page;
104 }
105 EXPORT_SYMBOL_GPL(balloon_page_dequeue);
106
107 #ifdef CONFIG_BALLOON_COMPACTION
108 /*
109  * balloon_mapping_alloc - allocates a special ->mapping for ballooned pages.
110  * @b_dev_info: holds the balloon device information descriptor.
111  * @a_ops: balloon_mapping address_space_operations descriptor.
112  *
113  * Driver must call it to properly allocate and initialize an instance of
114  * struct address_space which will be used as the special page->mapping for
115  * balloon device enlisted page instances.
116  */
117 struct address_space *balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
118                                 const struct address_space_operations *a_ops)
119 {
120         struct address_space *mapping;
121
122         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
123         if (!mapping)
124                 return ERR_PTR(-ENOMEM);
125
126         /*
127          * Give a clean 'zeroed' status to all elements of this special
128          * balloon page->mapping struct address_space instance.
129          */
130         address_space_init_once(mapping);
131
132         /*
133          * Set mapping->flags appropriately, to allow balloon pages
134          * ->mapping identification.
135          */
136         mapping_set_balloon(mapping);
137         mapping_set_gfp_mask(mapping, balloon_mapping_gfp_mask());
138
139         /* balloon's page->mapping->a_ops callback descriptor */
140         mapping->a_ops = a_ops;
141
142         /*
143          * Establish a pointer reference back to the balloon device descriptor
144          * this particular page->mapping will be servicing.
145          * This is used by compaction / migration procedures to identify and
146          * access the balloon device pageset while isolating / migrating pages.
147          *
148          * As some balloon drivers can register multiple balloon devices
149          * for a single guest, this also helps compaction / migration to
150          * properly deal with multiple balloon pagesets, when required.
151          */
152         mapping->private_data = b_dev_info;
153         b_dev_info->mapping = mapping;
154
155         return mapping;
156 }
157 EXPORT_SYMBOL_GPL(balloon_mapping_alloc);
158
159 static inline void __isolate_balloon_page(struct page *page)
160 {
161         struct balloon_dev_info *b_dev_info = page->mapping->private_data;
162         unsigned long flags;
163         spin_lock_irqsave(&b_dev_info->pages_lock, flags);
164         list_del(&page->lru);
165         b_dev_info->isolated_pages++;
166         spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
167 }
168
169 static inline void __putback_balloon_page(struct page *page)
170 {
171         struct balloon_dev_info *b_dev_info = page->mapping->private_data;
172         unsigned long flags;
173         spin_lock_irqsave(&b_dev_info->pages_lock, flags);
174         list_add(&page->lru, &b_dev_info->pages);
175         b_dev_info->isolated_pages--;
176         spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
177 }
178
179 static inline int __migrate_balloon_page(struct address_space *mapping,
180                 struct page *newpage, struct page *page, enum migrate_mode mode)
181 {
182         return page->mapping->a_ops->migratepage(mapping, newpage, page, mode);
183 }
184
185 /* __isolate_lru_page() counterpart for a ballooned page */
186 bool balloon_page_isolate(struct page *page)
187 {
188         /*
189          * Avoid burning cycles with pages that are yet under __free_pages(),
190          * or just got freed under us.
191          *
192          * In case we 'win' a race for a balloon page being freed under us and
193          * raise its refcount preventing __free_pages() from doing its job
194          * the put_page() at the end of this block will take care of
195          * release this page, thus avoiding a nasty leakage.
196          */
197         if (likely(get_page_unless_zero(page))) {
198                 /*
199                  * As balloon pages are not isolated from LRU lists, concurrent
200                  * compaction threads can race against page migration functions
201                  * as well as race against the balloon driver releasing a page.
202                  *
203                  * In order to avoid having an already isolated balloon page
204                  * being (wrongly) re-isolated while it is under migration,
205                  * or to avoid attempting to isolate pages being released by
206                  * the balloon driver, lets be sure we have the page lock
207                  * before proceeding with the balloon page isolation steps.
208                  */
209                 if (likely(trylock_page(page))) {
210                         /*
211                          * A ballooned page, by default, has just one refcount.
212                          * Prevent concurrent compaction threads from isolating
213                          * an already isolated balloon page by refcount check.
214                          */
215                         if (__is_movable_balloon_page(page) &&
216                             page_count(page) == 2) {
217                                 __isolate_balloon_page(page);
218                                 balloon_event_count(COMPACTBALLOONISOLATED);
219                                 unlock_page(page);
220                                 return true;
221                         }
222                         unlock_page(page);
223                 }
224                 put_page(page);
225         }
226         return false;
227 }
228
229 /* putback_lru_page() counterpart for a ballooned page */
230 void balloon_page_putback(struct page *page)
231 {
232         /*
233          * 'lock_page()' stabilizes the page and prevents races against
234          * concurrent isolation threads attempting to re-isolate it.
235          */
236         lock_page(page);
237
238         if (__is_movable_balloon_page(page)) {
239                 __putback_balloon_page(page);
240                 put_page(page);
241                 balloon_event_count(COMPACTBALLOONRETURNED);
242         } else {
243                 WARN_ON(1);
244                 dump_page(page);
245         }
246         unlock_page(page);
247 }
248
249 /* move_to_new_page() counterpart for a ballooned page */
250 int balloon_page_migrate(struct page *newpage,
251                          struct page *page, enum migrate_mode mode)
252 {
253         struct address_space *mapping;
254         int rc = -EAGAIN;
255
256         BUG_ON(!trylock_page(newpage));
257
258         if (WARN_ON(!__is_movable_balloon_page(page))) {
259                 dump_page(page);
260                 unlock_page(newpage);
261                 return rc;
262         }
263
264         mapping = page->mapping;
265         if (mapping)
266                 rc = __migrate_balloon_page(mapping, newpage, page, mode);
267
268         unlock_page(newpage);
269         return rc;
270 }
271 #endif /* CONFIG_BALLOON_COMPACTION */