]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/misc/vmw_balloon.c
VMware balloon: Do not limit the amount of frees and allocations in non-sleep mode.
[karo-tx-linux.git] / drivers / misc / vmw_balloon.c
1 /*
2  * VMware Balloon driver.
3  *
4  * Copyright (C) 2000-2013, VMware, Inc. All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation; version 2 of the License and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Maintained by:       Xavier Deguillard <xdeguillard@vmware.com>
21  *                      Philip Moltmann <moltmann@vmware.com>
22  */
23
24 /*
25  * This is VMware physical memory management driver for Linux. The driver
26  * acts like a "balloon" that can be inflated to reclaim physical pages by
27  * reserving them in the guest and invalidating them in the monitor,
28  * freeing up the underlying machine pages so they can be allocated to
29  * other guests.  The balloon can also be deflated to allow the guest to
30  * use more physical memory. Higher level policies can control the sizes
31  * of balloons in VMs in order to manage physical memory resources.
32  */
33
34 //#define DEBUG
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/sched.h>
42 #include <linux/module.h>
43 #include <linux/workqueue.h>
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #include <asm/hypervisor.h>
47
48 MODULE_AUTHOR("VMware, Inc.");
49 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
50 MODULE_VERSION("1.3.4.0-k");
51 MODULE_ALIAS("dmi:*:svnVMware*:*");
52 MODULE_ALIAS("vmware_vmmemctl");
53 MODULE_LICENSE("GPL");
54
55 /*
56  * Various constants controlling rate of inflaint/deflating balloon,
57  * measured in pages.
58  */
59
60 /*
61  * Rates of memory allocaton when guest experiences memory pressure
62  * (driver performs sleeping allocations).
63  */
64 #define VMW_BALLOON_RATE_ALLOC_MIN      512U
65 #define VMW_BALLOON_RATE_ALLOC_MAX      2048U
66 #define VMW_BALLOON_RATE_ALLOC_INC      16U
67
68 /*
69  * When guest is under memory pressure, use a reduced page allocation
70  * rate for next several cycles.
71  */
72 #define VMW_BALLOON_SLOW_CYCLES         4
73
74 /*
75  * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
76  * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
77  * __GFP_NOWARN, to suppress page allocation failure warnings.
78  */
79 #define VMW_PAGE_ALLOC_NOSLEEP          (__GFP_HIGHMEM|__GFP_NOWARN)
80
81 /*
82  * Use GFP_HIGHUSER when executing in a separate kernel thread
83  * context and allocation can sleep.  This is less stressful to
84  * the guest memory system, since it allows the thread to block
85  * while memory is reclaimed, and won't take pages from emergency
86  * low-memory pools.
87  */
88 #define VMW_PAGE_ALLOC_CANSLEEP         (GFP_HIGHUSER)
89
90 /* Maximum number of refused pages we accumulate during inflation cycle */
91 #define VMW_BALLOON_MAX_REFUSED         16
92
93 /*
94  * Hypervisor communication port definitions.
95  */
96 #define VMW_BALLOON_HV_PORT             0x5670
97 #define VMW_BALLOON_HV_MAGIC            0x456c6d6f
98 #define VMW_BALLOON_GUEST_ID            1       /* Linux */
99
100 enum vmwballoon_capabilities {
101         /*
102          * Bit 0 is reserved and not associated to any capability.
103          */
104         VMW_BALLOON_BASIC_CMDS          = (1 << 1),
105         VMW_BALLOON_BATCHED_CMDS        = (1 << 2)
106 };
107
108 #define VMW_BALLOON_CAPABILITIES        (VMW_BALLOON_BASIC_CMDS \
109                                         | VMW_BALLOON_BATCHED_CMDS)
110
111 /*
112  * Backdoor commands availability:
113  *
114  * START, GET_TARGET and GUEST_ID are always available,
115  *
116  * VMW_BALLOON_BASIC_CMDS:
117  *      LOCK and UNLOCK commands,
118  * VMW_BALLOON_BATCHED_CMDS:
119  *      BATCHED_LOCK and BATCHED_UNLOCK commands.
120  */
121 #define VMW_BALLOON_CMD_START           0
122 #define VMW_BALLOON_CMD_GET_TARGET      1
123 #define VMW_BALLOON_CMD_LOCK            2
124 #define VMW_BALLOON_CMD_UNLOCK          3
125 #define VMW_BALLOON_CMD_GUEST_ID        4
126 #define VMW_BALLOON_CMD_BATCHED_LOCK    6
127 #define VMW_BALLOON_CMD_BATCHED_UNLOCK  7
128
129 /* error codes */
130 #define VMW_BALLOON_SUCCESS                     0
131 #define VMW_BALLOON_FAILURE                     -1
132 #define VMW_BALLOON_ERROR_CMD_INVALID           1
133 #define VMW_BALLOON_ERROR_PPN_INVALID           2
134 #define VMW_BALLOON_ERROR_PPN_LOCKED            3
135 #define VMW_BALLOON_ERROR_PPN_UNLOCKED          4
136 #define VMW_BALLOON_ERROR_PPN_PINNED            5
137 #define VMW_BALLOON_ERROR_PPN_NOTNEEDED         6
138 #define VMW_BALLOON_ERROR_RESET                 7
139 #define VMW_BALLOON_ERROR_BUSY                  8
140
141 #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES   (0x03000000)
142
143 /* Batch page description */
144
145 /*
146  * Layout of a page in the batch page:
147  *
148  * +-------------+----------+--------+
149  * |             |          |        |
150  * | Page number | Reserved | Status |
151  * |             |          |        |
152  * +-------------+----------+--------+
153  * 64  PAGE_SHIFT          6         0
154  *
155  * For now only 4K pages are supported, but we can easily support large pages
156  * by using bits in the reserved field.
157  *
158  * The reserved field should be set to 0.
159  */
160 #define VMW_BALLOON_BATCH_MAX_PAGES     (PAGE_SIZE / sizeof(u64))
161 #define VMW_BALLOON_BATCH_STATUS_MASK   ((1UL << 5) - 1)
162 #define VMW_BALLOON_BATCH_PAGE_MASK     (~((1UL << PAGE_SHIFT) - 1))
163
164 struct vmballoon_batch_page {
165         u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
166 };
167
168 static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
169 {
170         return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
171 }
172
173 static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
174                                 int idx)
175 {
176         return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
177 }
178
179 static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
180                                 u64 pa)
181 {
182         batch->pages[idx] = pa;
183 }
184
185
186 #define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result)             \
187 ({                                                              \
188         unsigned long __status, __dummy1, __dummy2, __dummy3;   \
189         __asm__ __volatile__ ("inl %%dx" :                      \
190                 "=a"(__status),                                 \
191                 "=c"(__dummy1),                                 \
192                 "=d"(__dummy2),                                 \
193                 "=b"(result),                                   \
194                 "=S" (__dummy3) :                               \
195                 "0"(VMW_BALLOON_HV_MAGIC),                      \
196                 "1"(VMW_BALLOON_CMD_##cmd),                     \
197                 "2"(VMW_BALLOON_HV_PORT),                       \
198                 "3"(arg1),                                      \
199                 "4" (arg2) :                                    \
200                 "memory");                                      \
201         if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START)     \
202                 result = __dummy1;                              \
203         result &= -1UL;                                         \
204         __status & -1UL;                                        \
205 })
206
207 #ifdef CONFIG_DEBUG_FS
208 struct vmballoon_stats {
209         unsigned int timer;
210
211         /* allocation statistics */
212         unsigned int alloc;
213         unsigned int alloc_fail;
214         unsigned int sleep_alloc;
215         unsigned int sleep_alloc_fail;
216         unsigned int refused_alloc;
217         unsigned int refused_free;
218         unsigned int free;
219
220         /* monitor operations */
221         unsigned int lock;
222         unsigned int lock_fail;
223         unsigned int unlock;
224         unsigned int unlock_fail;
225         unsigned int target;
226         unsigned int target_fail;
227         unsigned int start;
228         unsigned int start_fail;
229         unsigned int guest_type;
230         unsigned int guest_type_fail;
231 };
232
233 #define STATS_INC(stat) (stat)++
234 #else
235 #define STATS_INC(stat)
236 #endif
237
238 struct vmballoon;
239
240 struct vmballoon_ops {
241         void (*add_page)(struct vmballoon *b, int idx, struct page *p);
242         int (*lock)(struct vmballoon *b, unsigned int num_pages,
243                                                 unsigned int *target);
244         int (*unlock)(struct vmballoon *b, unsigned int num_pages,
245                                                 unsigned int *target);
246 };
247
248 struct vmballoon {
249
250         /* list of reserved physical pages */
251         struct list_head pages;
252
253         /* transient list of non-balloonable pages */
254         struct list_head refused_pages;
255         unsigned int n_refused_pages;
256
257         /* balloon size in pages */
258         unsigned int size;
259         unsigned int target;
260
261         /* reset flag */
262         bool reset_required;
263
264         /* adjustment rates (pages per second) */
265         unsigned int rate_alloc;
266
267         /* slowdown page allocations for next few cycles */
268         unsigned int slow_allocation_cycles;
269
270         unsigned long capabilities;
271
272         struct vmballoon_batch_page *batch_page;
273         unsigned int batch_max_pages;
274         struct page *page;
275
276         const struct vmballoon_ops *ops;
277
278 #ifdef CONFIG_DEBUG_FS
279         /* statistics */
280         struct vmballoon_stats stats;
281
282         /* debugfs file exporting statistics */
283         struct dentry *dbg_entry;
284 #endif
285
286         struct sysinfo sysinfo;
287
288         struct delayed_work dwork;
289 };
290
291 static struct vmballoon balloon;
292
293 /*
294  * Send "start" command to the host, communicating supported version
295  * of the protocol.
296  */
297 static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
298 {
299         unsigned long status, capabilities, dummy = 0;
300
301         STATS_INC(b->stats.start);
302
303         status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
304
305         switch (status) {
306         case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
307                 b->capabilities = capabilities;
308                 return true;
309         case VMW_BALLOON_SUCCESS:
310                 b->capabilities = VMW_BALLOON_BASIC_CMDS;
311                 return true;
312         }
313
314         pr_debug("%s - failed, hv returns %ld\n", __func__, status);
315         STATS_INC(b->stats.start_fail);
316         return false;
317 }
318
319 static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
320 {
321         switch (status) {
322         case VMW_BALLOON_SUCCESS:
323                 return true;
324
325         case VMW_BALLOON_ERROR_RESET:
326                 b->reset_required = true;
327                 /* fall through */
328
329         default:
330                 return false;
331         }
332 }
333
334 /*
335  * Communicate guest type to the host so that it can adjust ballooning
336  * algorithm to the one most appropriate for the guest. This command
337  * is normally issued after sending "start" command and is part of
338  * standard reset sequence.
339  */
340 static bool vmballoon_send_guest_id(struct vmballoon *b)
341 {
342         unsigned long status, dummy = 0;
343
344         status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
345                                 dummy);
346
347         STATS_INC(b->stats.guest_type);
348
349         if (vmballoon_check_status(b, status))
350                 return true;
351
352         pr_debug("%s - failed, hv returns %ld\n", __func__, status);
353         STATS_INC(b->stats.guest_type_fail);
354         return false;
355 }
356
357 /*
358  * Retrieve desired balloon size from the host.
359  */
360 static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
361 {
362         unsigned long status;
363         unsigned long target;
364         unsigned long limit;
365         unsigned long dummy = 0;
366         u32 limit32;
367
368         /*
369          * si_meminfo() is cheap. Moreover, we want to provide dynamic
370          * max balloon size later. So let us call si_meminfo() every
371          * iteration.
372          */
373         si_meminfo(&b->sysinfo);
374         limit = b->sysinfo.totalram;
375
376         /* Ensure limit fits in 32-bits */
377         limit32 = (u32)limit;
378         if (limit != limit32)
379                 return false;
380
381         /* update stats */
382         STATS_INC(b->stats.target);
383
384         status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
385         if (vmballoon_check_status(b, status)) {
386                 *new_target = target;
387                 return true;
388         }
389
390         pr_debug("%s - failed, hv returns %ld\n", __func__, status);
391         STATS_INC(b->stats.target_fail);
392         return false;
393 }
394
395 /*
396  * Notify the host about allocated page so that host can use it without
397  * fear that guest will need it. Host may reject some pages, we need to
398  * check the return value and maybe submit a different page.
399  */
400 static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
401                                 unsigned int *hv_status, unsigned int *target)
402 {
403         unsigned long status, dummy = 0;
404         u32 pfn32;
405
406         pfn32 = (u32)pfn;
407         if (pfn32 != pfn)
408                 return -1;
409
410         STATS_INC(b->stats.lock);
411
412         *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
413         if (vmballoon_check_status(b, status))
414                 return 0;
415
416         pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
417         STATS_INC(b->stats.lock_fail);
418         return 1;
419 }
420
421 static int vmballoon_send_batched_lock(struct vmballoon *b,
422                                 unsigned int num_pages, unsigned int *target)
423 {
424         unsigned long status;
425         unsigned long pfn = page_to_pfn(b->page);
426
427         STATS_INC(b->stats.lock);
428
429         status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages, *target);
430         if (vmballoon_check_status(b, status))
431                 return 0;
432
433         pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
434         STATS_INC(b->stats.lock_fail);
435         return 1;
436 }
437
438 /*
439  * Notify the host that guest intends to release given page back into
440  * the pool of available (to the guest) pages.
441  */
442 static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
443                                                         unsigned int *target)
444 {
445         unsigned long status, dummy = 0;
446         u32 pfn32;
447
448         pfn32 = (u32)pfn;
449         if (pfn32 != pfn)
450                 return false;
451
452         STATS_INC(b->stats.unlock);
453
454         status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
455         if (vmballoon_check_status(b, status))
456                 return true;
457
458         pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
459         STATS_INC(b->stats.unlock_fail);
460         return false;
461 }
462
463 static bool vmballoon_send_batched_unlock(struct vmballoon *b,
464                                 unsigned int num_pages, unsigned int *target)
465 {
466         unsigned long status;
467         unsigned long pfn = page_to_pfn(b->page);
468
469         STATS_INC(b->stats.unlock);
470
471         status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages, *target);
472         if (vmballoon_check_status(b, status))
473                 return true;
474
475         pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
476         STATS_INC(b->stats.unlock_fail);
477         return false;
478 }
479
480 /*
481  * Quickly release all pages allocated for the balloon. This function is
482  * called when host decides to "reset" balloon for one reason or another.
483  * Unlike normal "deflate" we do not (shall not) notify host of the pages
484  * being released.
485  */
486 static void vmballoon_pop(struct vmballoon *b)
487 {
488         struct page *page, *next;
489
490         list_for_each_entry_safe(page, next, &b->pages, lru) {
491                 list_del(&page->lru);
492                 __free_page(page);
493                 STATS_INC(b->stats.free);
494                 b->size--;
495                 cond_resched();
496         }
497
498         if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
499                 if (b->batch_page)
500                         vunmap(b->batch_page);
501
502                 if (b->page)
503                         __free_page(b->page);
504         }
505 }
506
507 /*
508  * Notify the host of a ballooned page. If host rejects the page put it on the
509  * refuse list, those refused page are then released at the end of the
510  * inflation cycle.
511  */
512 static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
513                                                         unsigned int *target)
514 {
515         int locked, hv_status;
516         struct page *page = b->page;
517
518         locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
519                                                                 target);
520         if (locked > 0) {
521                 STATS_INC(b->stats.refused_alloc);
522
523                 if (hv_status == VMW_BALLOON_ERROR_RESET ||
524                                 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
525                         __free_page(page);
526                         return -EIO;
527                 }
528
529                 /*
530                  * Place page on the list of non-balloonable pages
531                  * and retry allocation, unless we already accumulated
532                  * too many of them, in which case take a breather.
533                  */
534                 if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
535                         b->n_refused_pages++;
536                         list_add(&page->lru, &b->refused_pages);
537                 } else {
538                         __free_page(page);
539                 }
540                 return -EIO;
541         }
542
543         /* track allocated page */
544         list_add(&page->lru, &b->pages);
545
546         /* update balloon size */
547         b->size++;
548
549         return 0;
550 }
551
552 static int vmballoon_lock_batched_page(struct vmballoon *b,
553                                 unsigned int num_pages, unsigned int *target)
554 {
555         int locked, i;
556
557         locked = vmballoon_send_batched_lock(b, num_pages, target);
558         if (locked > 0) {
559                 for (i = 0; i < num_pages; i++) {
560                         u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
561                         struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
562
563                         __free_page(p);
564                 }
565
566                 return -EIO;
567         }
568
569         for (i = 0; i < num_pages; i++) {
570                 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
571                 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
572
573                 locked = vmballoon_batch_get_status(b->batch_page, i);
574
575                 switch (locked) {
576                 case VMW_BALLOON_SUCCESS:
577                         list_add(&p->lru, &b->pages);
578                         b->size++;
579                         break;
580                 case VMW_BALLOON_ERROR_PPN_PINNED:
581                 case VMW_BALLOON_ERROR_PPN_INVALID:
582                         if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
583                                 list_add(&p->lru, &b->refused_pages);
584                                 b->n_refused_pages++;
585                                 break;
586                         }
587                         /* Fallthrough */
588                 case VMW_BALLOON_ERROR_RESET:
589                 case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
590                         __free_page(p);
591                         break;
592                 default:
593                         /* This should never happen */
594                         WARN_ON_ONCE(true);
595                 }
596         }
597
598         return 0;
599 }
600
601 /*
602  * Release the page allocated for the balloon. Note that we first notify
603  * the host so it can make sure the page will be available for the guest
604  * to use, if needed.
605  */
606 static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
607                                                         unsigned int *target)
608 {
609         struct page *page = b->page;
610
611         if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
612                 list_add(&page->lru, &b->pages);
613                 return -EIO;
614         }
615
616         /* deallocate page */
617         __free_page(page);
618         STATS_INC(b->stats.free);
619
620         /* update balloon size */
621         b->size--;
622
623         return 0;
624 }
625
626 static int vmballoon_unlock_batched_page(struct vmballoon *b,
627                                 unsigned int num_pages, unsigned int *target)
628 {
629         int locked, i, ret = 0;
630         bool hv_success;
631
632         hv_success = vmballoon_send_batched_unlock(b, num_pages, target);
633         if (!hv_success)
634                 ret = -EIO;
635
636         for (i = 0; i < num_pages; i++) {
637                 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
638                 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
639
640                 locked = vmballoon_batch_get_status(b->batch_page, i);
641                 if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
642                         /*
643                          * That page wasn't successfully unlocked by the
644                          * hypervisor, re-add it to the list of pages owned by
645                          * the balloon driver.
646                          */
647                         list_add(&p->lru, &b->pages);
648                 } else {
649                         /* deallocate page */
650                         __free_page(p);
651                         STATS_INC(b->stats.free);
652
653                         /* update balloon size */
654                         b->size--;
655                 }
656         }
657
658         return ret;
659 }
660
661 /*
662  * Release pages that were allocated while attempting to inflate the
663  * balloon but were refused by the host for one reason or another.
664  */
665 static void vmballoon_release_refused_pages(struct vmballoon *b)
666 {
667         struct page *page, *next;
668
669         list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
670                 list_del(&page->lru);
671                 __free_page(page);
672                 STATS_INC(b->stats.refused_free);
673         }
674
675         b->n_refused_pages = 0;
676 }
677
678 static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
679 {
680         b->page = p;
681 }
682
683 static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
684                                 struct page *p)
685 {
686         vmballoon_batch_set_pa(b->batch_page, idx,
687                         (u64)page_to_pfn(p) << PAGE_SHIFT);
688 }
689
690 /*
691  * Inflate the balloon towards its target size. Note that we try to limit
692  * the rate of allocation to make sure we are not choking the rest of the
693  * system.
694  */
695 static void vmballoon_inflate(struct vmballoon *b)
696 {
697         unsigned rate;
698         unsigned int allocations = 0;
699         unsigned int num_pages = 0;
700         int error = 0;
701         gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
702
703         pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
704
705         /*
706          * First try NOSLEEP page allocations to inflate balloon.
707          *
708          * If we do not throttle nosleep allocations, we can drain all
709          * free pages in the guest quickly (if the balloon target is high).
710          * As a side-effect, draining free pages helps to inform (force)
711          * the guest to start swapping if balloon target is not met yet,
712          * which is a desired behavior. However, balloon driver can consume
713          * all available CPU cycles if too many pages are allocated in a
714          * second. Therefore, we throttle nosleep allocations even when
715          * the guest is not under memory pressure. OTOH, if we have already
716          * predicted that the guest is under memory pressure, then we
717          * slowdown page allocations considerably.
718          */
719
720         /*
721          * Start with no sleep allocation rate which may be higher
722          * than sleeping allocation rate.
723          */
724         rate = b->slow_allocation_cycles ? b->rate_alloc : UINT_MAX;
725
726         pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n",
727                  __func__, b->target - b->size, rate, b->rate_alloc);
728
729         while (!b->reset_required &&
730                 b->size < b->target && num_pages < b->target - b->size) {
731                 struct page *page;
732
733                 if (flags == VMW_PAGE_ALLOC_NOSLEEP)
734                         STATS_INC(b->stats.alloc);
735                 else
736                         STATS_INC(b->stats.sleep_alloc);
737
738                 page = alloc_page(flags);
739                 if (!page) {
740                         if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
741                                 /*
742                                  * CANSLEEP page allocation failed, so guest
743                                  * is under severe memory pressure. Quickly
744                                  * decrease allocation rate.
745                                  */
746                                 b->rate_alloc = max(b->rate_alloc / 2,
747                                                     VMW_BALLOON_RATE_ALLOC_MIN);
748                                 STATS_INC(b->stats.sleep_alloc_fail);
749                                 break;
750                         }
751                         STATS_INC(b->stats.alloc_fail);
752
753                         /*
754                          * NOSLEEP page allocation failed, so the guest is
755                          * under memory pressure. Let us slow down page
756                          * allocations for next few cycles so that the guest
757                          * gets out of memory pressure. Also, if we already
758                          * allocated b->rate_alloc pages, let's pause,
759                          * otherwise switch to sleeping allocations.
760                          */
761                         b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
762
763                         if (allocations >= b->rate_alloc)
764                                 break;
765
766                         flags = VMW_PAGE_ALLOC_CANSLEEP;
767                         /* Lower rate for sleeping allocations. */
768                         rate = b->rate_alloc;
769                         continue;
770                 }
771
772                 b->ops->add_page(b, num_pages++, page);
773                 if (num_pages == b->batch_max_pages) {
774                         error = b->ops->lock(b, num_pages, &b->target);
775                         num_pages = 0;
776                         if (error)
777                                 break;
778                 }
779
780                 cond_resched();
781
782                 if (allocations >= rate) {
783                         /* We allocated enough pages, let's take a break. */
784                         break;
785                 }
786         }
787
788         if (num_pages > 0)
789                 b->ops->lock(b, num_pages, &b->target);
790
791         /*
792          * We reached our goal without failures so try increasing
793          * allocation rate.
794          */
795         if (error == 0 && allocations >= b->rate_alloc) {
796                 unsigned int mult = allocations / b->rate_alloc;
797
798                 b->rate_alloc =
799                         min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
800                             VMW_BALLOON_RATE_ALLOC_MAX);
801         }
802
803         vmballoon_release_refused_pages(b);
804 }
805
806 /*
807  * Decrease the size of the balloon allowing guest to use more memory.
808  */
809 static void vmballoon_deflate(struct vmballoon *b)
810 {
811         struct page *page, *next;
812         unsigned int i = 0;
813         unsigned int num_pages = 0;
814         int error;
815
816         pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
817
818         /* free pages to reach target */
819         list_for_each_entry_safe(page, next, &b->pages, lru) {
820                 list_del(&page->lru);
821                 b->ops->add_page(b, num_pages++, page);
822
823
824                 if (num_pages == b->batch_max_pages) {
825                         error = b->ops->unlock(b, num_pages, &b->target);
826                         num_pages = 0;
827                         if (error)
828                                 return;
829                 }
830
831                 if (b->reset_required || ++i >= b->size - b->target)
832                         break;
833
834                 cond_resched();
835         }
836
837         if (num_pages > 0)
838                 b->ops->unlock(b, num_pages, &b->target);
839 }
840
841 static const struct vmballoon_ops vmballoon_basic_ops = {
842         .add_page = vmballoon_add_page,
843         .lock = vmballoon_lock_page,
844         .unlock = vmballoon_unlock_page
845 };
846
847 static const struct vmballoon_ops vmballoon_batched_ops = {
848         .add_page = vmballoon_add_batched_page,
849         .lock = vmballoon_lock_batched_page,
850         .unlock = vmballoon_unlock_batched_page
851 };
852
853 static bool vmballoon_init_batching(struct vmballoon *b)
854 {
855         b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
856         if (!b->page)
857                 return false;
858
859         b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
860         if (!b->batch_page) {
861                 __free_page(b->page);
862                 return false;
863         }
864
865         return true;
866 }
867
868 /*
869  * Perform standard reset sequence by popping the balloon (in case it
870  * is not  empty) and then restarting protocol. This operation normally
871  * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
872  */
873 static void vmballoon_reset(struct vmballoon *b)
874 {
875         /* free all pages, skipping monitor unlock */
876         vmballoon_pop(b);
877
878         if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
879                 return;
880
881         if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
882                 b->ops = &vmballoon_batched_ops;
883                 b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
884                 if (!vmballoon_init_batching(b)) {
885                         /*
886                          * We failed to initialize batching, inform the monitor
887                          * about it by sending a null capability.
888                          *
889                          * The guest will retry in one second.
890                          */
891                         vmballoon_send_start(b, 0);
892                         return;
893                 }
894         } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
895                 b->ops = &vmballoon_basic_ops;
896                 b->batch_max_pages = 1;
897         }
898
899         b->reset_required = false;
900         if (!vmballoon_send_guest_id(b))
901                 pr_err("failed to send guest ID to the host\n");
902 }
903
904 /*
905  * Balloon work function: reset protocol, if needed, get the new size and
906  * adjust balloon as needed. Repeat in 1 sec.
907  */
908 static void vmballoon_work(struct work_struct *work)
909 {
910         struct delayed_work *dwork = to_delayed_work(work);
911         struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
912         unsigned int target;
913
914         STATS_INC(b->stats.timer);
915
916         if (b->reset_required)
917                 vmballoon_reset(b);
918
919         if (b->slow_allocation_cycles > 0)
920                 b->slow_allocation_cycles--;
921
922         if (vmballoon_send_get_target(b, &target)) {
923                 /* update target, adjust size */
924                 b->target = target;
925
926                 if (b->size < target)
927                         vmballoon_inflate(b);
928                 else if (b->size > target)
929                         vmballoon_deflate(b);
930         }
931
932         /*
933          * We are using a freezable workqueue so that balloon operations are
934          * stopped while the system transitions to/from sleep/hibernation.
935          */
936         queue_delayed_work(system_freezable_wq,
937                            dwork, round_jiffies_relative(HZ));
938 }
939
940 /*
941  * DEBUGFS Interface
942  */
943 #ifdef CONFIG_DEBUG_FS
944
945 static int vmballoon_debug_show(struct seq_file *f, void *offset)
946 {
947         struct vmballoon *b = f->private;
948         struct vmballoon_stats *stats = &b->stats;
949
950         /* format capabilities info */
951         seq_printf(f,
952                    "balloon capabilities:   %#4x\n"
953                    "used capabilities:      %#4lx\n",
954                    VMW_BALLOON_CAPABILITIES, b->capabilities);
955
956         /* format size info */
957         seq_printf(f,
958                    "target:             %8d pages\n"
959                    "current:            %8d pages\n",
960                    b->target, b->size);
961
962         /* format rate info */
963         seq_printf(f,
964                    "rateSleepAlloc:     %8d pages/sec\n",
965                    b->rate_alloc);
966
967         seq_printf(f,
968                    "\n"
969                    "timer:              %8u\n"
970                    "start:              %8u (%4u failed)\n"
971                    "guestType:          %8u (%4u failed)\n"
972                    "lock:               %8u (%4u failed)\n"
973                    "unlock:             %8u (%4u failed)\n"
974                    "target:             %8u (%4u failed)\n"
975                    "primNoSleepAlloc:   %8u (%4u failed)\n"
976                    "primCanSleepAlloc:  %8u (%4u failed)\n"
977                    "primFree:           %8u\n"
978                    "errAlloc:           %8u\n"
979                    "errFree:            %8u\n",
980                    stats->timer,
981                    stats->start, stats->start_fail,
982                    stats->guest_type, stats->guest_type_fail,
983                    stats->lock,  stats->lock_fail,
984                    stats->unlock, stats->unlock_fail,
985                    stats->target, stats->target_fail,
986                    stats->alloc, stats->alloc_fail,
987                    stats->sleep_alloc, stats->sleep_alloc_fail,
988                    stats->free,
989                    stats->refused_alloc, stats->refused_free);
990
991         return 0;
992 }
993
994 static int vmballoon_debug_open(struct inode *inode, struct file *file)
995 {
996         return single_open(file, vmballoon_debug_show, inode->i_private);
997 }
998
999 static const struct file_operations vmballoon_debug_fops = {
1000         .owner          = THIS_MODULE,
1001         .open           = vmballoon_debug_open,
1002         .read           = seq_read,
1003         .llseek         = seq_lseek,
1004         .release        = single_release,
1005 };
1006
1007 static int __init vmballoon_debugfs_init(struct vmballoon *b)
1008 {
1009         int error;
1010
1011         b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1012                                            &vmballoon_debug_fops);
1013         if (IS_ERR(b->dbg_entry)) {
1014                 error = PTR_ERR(b->dbg_entry);
1015                 pr_err("failed to create debugfs entry, error: %d\n", error);
1016                 return error;
1017         }
1018
1019         return 0;
1020 }
1021
1022 static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1023 {
1024         debugfs_remove(b->dbg_entry);
1025 }
1026
1027 #else
1028
1029 static inline int vmballoon_debugfs_init(struct vmballoon *b)
1030 {
1031         return 0;
1032 }
1033
1034 static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1035 {
1036 }
1037
1038 #endif  /* CONFIG_DEBUG_FS */
1039
1040 static int __init vmballoon_init(void)
1041 {
1042         int error;
1043
1044         /*
1045          * Check if we are running on VMware's hypervisor and bail out
1046          * if we are not.
1047          */
1048         if (x86_hyper != &x86_hyper_vmware)
1049                 return -ENODEV;
1050
1051         INIT_LIST_HEAD(&balloon.pages);
1052         INIT_LIST_HEAD(&balloon.refused_pages);
1053
1054         /* initialize rates */
1055         balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
1056
1057         INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1058
1059         /*
1060          * Start balloon.
1061          */
1062         if (!vmballoon_send_start(&balloon, VMW_BALLOON_CAPABILITIES)) {
1063                 pr_err("failed to send start command to the host\n");
1064                 return -EIO;
1065         }
1066
1067         if ((balloon.capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1068                 balloon.ops = &vmballoon_batched_ops;
1069                 balloon.batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
1070                 if (!vmballoon_init_batching(&balloon)) {
1071                         pr_err("failed to init batching\n");
1072                         return -EIO;
1073                 }
1074         } else if ((balloon.capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1075                 balloon.ops = &vmballoon_basic_ops;
1076                 balloon.batch_max_pages = 1;
1077         }
1078
1079         if (!vmballoon_send_guest_id(&balloon)) {
1080                 pr_err("failed to send guest ID to the host\n");
1081                 return -EIO;
1082         }
1083
1084         error = vmballoon_debugfs_init(&balloon);
1085         if (error)
1086                 return error;
1087
1088         queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1089
1090         return 0;
1091 }
1092 module_init(vmballoon_init);
1093
1094 static void __exit vmballoon_exit(void)
1095 {
1096         cancel_delayed_work_sync(&balloon.dwork);
1097
1098         vmballoon_debugfs_exit(&balloon);
1099
1100         /*
1101          * Deallocate all reserved memory, and reset connection with monitor.
1102          * Reset connection before deallocating memory to avoid potential for
1103          * additional spurious resets from guest touching deallocated pages.
1104          */
1105         vmballoon_send_start(&balloon, VMW_BALLOON_CAPABILITIES);
1106         vmballoon_pop(&balloon);
1107 }
1108 module_exit(vmballoon_exit);