2 * VMware Balloon driver.
4 * Copyright (C) 2000-2013, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * Maintained by: Xavier Deguillard <xdeguillard@vmware.com>
21 * Philip Moltmann <moltmann@vmware.com>
25 * This is VMware physical memory management driver for Linux. The driver
26 * acts like a "balloon" that can be inflated to reclaim physical pages by
27 * reserving them in the guest and invalidating them in the monitor,
28 * freeing up the underlying machine pages so they can be allocated to
29 * other guests. The balloon can also be deflated to allow the guest to
30 * use more physical memory. Higher level policies can control the sizes
31 * of balloons in VMs in order to manage physical memory resources.
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/types.h>
38 #include <linux/kernel.h>
40 #include <linux/vmalloc.h>
41 #include <linux/sched.h>
42 #include <linux/module.h>
43 #include <linux/workqueue.h>
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #include <asm/hypervisor.h>
48 MODULE_AUTHOR("VMware, Inc.");
49 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
50 MODULE_VERSION("1.3.4.0-k");
51 MODULE_ALIAS("dmi:*:svnVMware*:*");
52 MODULE_ALIAS("vmware_vmmemctl");
53 MODULE_LICENSE("GPL");
56 * Various constants controlling rate of inflaint/deflating balloon,
61 * Rates of memory allocaton when guest experiences memory pressure
62 * (driver performs sleeping allocations).
64 #define VMW_BALLOON_RATE_ALLOC_MIN 512U
65 #define VMW_BALLOON_RATE_ALLOC_MAX 2048U
66 #define VMW_BALLOON_RATE_ALLOC_INC 16U
69 * When guest is under memory pressure, use a reduced page allocation
70 * rate for next several cycles.
72 #define VMW_BALLOON_SLOW_CYCLES 4
75 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
76 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
77 * __GFP_NOWARN, to suppress page allocation failure warnings.
79 #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
82 * Use GFP_HIGHUSER when executing in a separate kernel thread
83 * context and allocation can sleep. This is less stressful to
84 * the guest memory system, since it allows the thread to block
85 * while memory is reclaimed, and won't take pages from emergency
88 #define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
90 /* Maximum number of refused pages we accumulate during inflation cycle */
91 #define VMW_BALLOON_MAX_REFUSED 16
94 * Hypervisor communication port definitions.
96 #define VMW_BALLOON_HV_PORT 0x5670
97 #define VMW_BALLOON_HV_MAGIC 0x456c6d6f
98 #define VMW_BALLOON_GUEST_ID 1 /* Linux */
100 enum vmwballoon_capabilities {
102 * Bit 0 is reserved and not associated to any capability.
104 VMW_BALLOON_BASIC_CMDS = (1 << 1),
105 VMW_BALLOON_BATCHED_CMDS = (1 << 2)
108 #define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
109 | VMW_BALLOON_BATCHED_CMDS)
112 * Backdoor commands availability:
114 * START, GET_TARGET and GUEST_ID are always available,
116 * VMW_BALLOON_BASIC_CMDS:
117 * LOCK and UNLOCK commands,
118 * VMW_BALLOON_BATCHED_CMDS:
119 * BATCHED_LOCK and BATCHED_UNLOCK commands.
121 #define VMW_BALLOON_CMD_START 0
122 #define VMW_BALLOON_CMD_GET_TARGET 1
123 #define VMW_BALLOON_CMD_LOCK 2
124 #define VMW_BALLOON_CMD_UNLOCK 3
125 #define VMW_BALLOON_CMD_GUEST_ID 4
126 #define VMW_BALLOON_CMD_BATCHED_LOCK 6
127 #define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
130 #define VMW_BALLOON_SUCCESS 0
131 #define VMW_BALLOON_FAILURE -1
132 #define VMW_BALLOON_ERROR_CMD_INVALID 1
133 #define VMW_BALLOON_ERROR_PPN_INVALID 2
134 #define VMW_BALLOON_ERROR_PPN_LOCKED 3
135 #define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
136 #define VMW_BALLOON_ERROR_PPN_PINNED 5
137 #define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
138 #define VMW_BALLOON_ERROR_RESET 7
139 #define VMW_BALLOON_ERROR_BUSY 8
141 #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
143 /* Batch page description */
146 * Layout of a page in the batch page:
148 * +-------------+----------+--------+
150 * | Page number | Reserved | Status |
152 * +-------------+----------+--------+
155 * For now only 4K pages are supported, but we can easily support large pages
156 * by using bits in the reserved field.
158 * The reserved field should be set to 0.
160 #define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
161 #define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
162 #define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
164 struct vmballoon_batch_page {
165 u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
168 static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
170 return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
173 static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
176 return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
179 static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
182 batch->pages[idx] = pa;
186 #define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
188 unsigned long __status, __dummy1, __dummy2, __dummy3; \
189 __asm__ __volatile__ ("inl %%dx" : \
195 "0"(VMW_BALLOON_HV_MAGIC), \
196 "1"(VMW_BALLOON_CMD_##cmd), \
197 "2"(VMW_BALLOON_HV_PORT), \
201 if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
207 #ifdef CONFIG_DEBUG_FS
208 struct vmballoon_stats {
211 /* allocation statistics */
213 unsigned int alloc_fail;
214 unsigned int sleep_alloc;
215 unsigned int sleep_alloc_fail;
216 unsigned int refused_alloc;
217 unsigned int refused_free;
220 /* monitor operations */
222 unsigned int lock_fail;
224 unsigned int unlock_fail;
226 unsigned int target_fail;
228 unsigned int start_fail;
229 unsigned int guest_type;
230 unsigned int guest_type_fail;
233 #define STATS_INC(stat) (stat)++
235 #define STATS_INC(stat)
240 struct vmballoon_ops {
241 void (*add_page)(struct vmballoon *b, int idx, struct page *p);
242 int (*lock)(struct vmballoon *b, unsigned int num_pages,
243 unsigned int *target);
244 int (*unlock)(struct vmballoon *b, unsigned int num_pages,
245 unsigned int *target);
250 /* list of reserved physical pages */
251 struct list_head pages;
253 /* transient list of non-balloonable pages */
254 struct list_head refused_pages;
255 unsigned int n_refused_pages;
257 /* balloon size in pages */
264 /* adjustment rates (pages per second) */
265 unsigned int rate_alloc;
267 /* slowdown page allocations for next few cycles */
268 unsigned int slow_allocation_cycles;
270 unsigned long capabilities;
272 struct vmballoon_batch_page *batch_page;
273 unsigned int batch_max_pages;
276 const struct vmballoon_ops *ops;
278 #ifdef CONFIG_DEBUG_FS
280 struct vmballoon_stats stats;
282 /* debugfs file exporting statistics */
283 struct dentry *dbg_entry;
286 struct sysinfo sysinfo;
288 struct delayed_work dwork;
291 static struct vmballoon balloon;
294 * Send "start" command to the host, communicating supported version
297 static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
299 unsigned long status, capabilities, dummy = 0;
301 STATS_INC(b->stats.start);
303 status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
306 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
307 b->capabilities = capabilities;
309 case VMW_BALLOON_SUCCESS:
310 b->capabilities = VMW_BALLOON_BASIC_CMDS;
314 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
315 STATS_INC(b->stats.start_fail);
319 static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
322 case VMW_BALLOON_SUCCESS:
325 case VMW_BALLOON_ERROR_RESET:
326 b->reset_required = true;
335 * Communicate guest type to the host so that it can adjust ballooning
336 * algorithm to the one most appropriate for the guest. This command
337 * is normally issued after sending "start" command and is part of
338 * standard reset sequence.
340 static bool vmballoon_send_guest_id(struct vmballoon *b)
342 unsigned long status, dummy = 0;
344 status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
347 STATS_INC(b->stats.guest_type);
349 if (vmballoon_check_status(b, status))
352 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
353 STATS_INC(b->stats.guest_type_fail);
358 * Retrieve desired balloon size from the host.
360 static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
362 unsigned long status;
363 unsigned long target;
365 unsigned long dummy = 0;
369 * si_meminfo() is cheap. Moreover, we want to provide dynamic
370 * max balloon size later. So let us call si_meminfo() every
373 si_meminfo(&b->sysinfo);
374 limit = b->sysinfo.totalram;
376 /* Ensure limit fits in 32-bits */
377 limit32 = (u32)limit;
378 if (limit != limit32)
382 STATS_INC(b->stats.target);
384 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
385 if (vmballoon_check_status(b, status)) {
386 *new_target = target;
390 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
391 STATS_INC(b->stats.target_fail);
396 * Notify the host about allocated page so that host can use it without
397 * fear that guest will need it. Host may reject some pages, we need to
398 * check the return value and maybe submit a different page.
400 static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
401 unsigned int *hv_status, unsigned int *target)
403 unsigned long status, dummy = 0;
410 STATS_INC(b->stats.lock);
412 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
413 if (vmballoon_check_status(b, status))
416 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
417 STATS_INC(b->stats.lock_fail);
421 static int vmballoon_send_batched_lock(struct vmballoon *b,
422 unsigned int num_pages, unsigned int *target)
424 unsigned long status;
425 unsigned long pfn = page_to_pfn(b->page);
427 STATS_INC(b->stats.lock);
429 status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages, *target);
430 if (vmballoon_check_status(b, status))
433 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
434 STATS_INC(b->stats.lock_fail);
439 * Notify the host that guest intends to release given page back into
440 * the pool of available (to the guest) pages.
442 static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
443 unsigned int *target)
445 unsigned long status, dummy = 0;
452 STATS_INC(b->stats.unlock);
454 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
455 if (vmballoon_check_status(b, status))
458 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
459 STATS_INC(b->stats.unlock_fail);
463 static bool vmballoon_send_batched_unlock(struct vmballoon *b,
464 unsigned int num_pages, unsigned int *target)
466 unsigned long status;
467 unsigned long pfn = page_to_pfn(b->page);
469 STATS_INC(b->stats.unlock);
471 status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages, *target);
472 if (vmballoon_check_status(b, status))
475 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
476 STATS_INC(b->stats.unlock_fail);
481 * Quickly release all pages allocated for the balloon. This function is
482 * called when host decides to "reset" balloon for one reason or another.
483 * Unlike normal "deflate" we do not (shall not) notify host of the pages
486 static void vmballoon_pop(struct vmballoon *b)
488 struct page *page, *next;
490 list_for_each_entry_safe(page, next, &b->pages, lru) {
491 list_del(&page->lru);
493 STATS_INC(b->stats.free);
498 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
500 vunmap(b->batch_page);
503 __free_page(b->page);
508 * Notify the host of a ballooned page. If host rejects the page put it on the
509 * refuse list, those refused page are then released at the end of the
512 static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
513 unsigned int *target)
515 int locked, hv_status;
516 struct page *page = b->page;
518 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
521 STATS_INC(b->stats.refused_alloc);
523 if (hv_status == VMW_BALLOON_ERROR_RESET ||
524 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
530 * Place page on the list of non-balloonable pages
531 * and retry allocation, unless we already accumulated
532 * too many of them, in which case take a breather.
534 if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
535 b->n_refused_pages++;
536 list_add(&page->lru, &b->refused_pages);
543 /* track allocated page */
544 list_add(&page->lru, &b->pages);
546 /* update balloon size */
552 static int vmballoon_lock_batched_page(struct vmballoon *b,
553 unsigned int num_pages, unsigned int *target)
557 locked = vmballoon_send_batched_lock(b, num_pages, target);
559 for (i = 0; i < num_pages; i++) {
560 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
561 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
569 for (i = 0; i < num_pages; i++) {
570 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
571 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
573 locked = vmballoon_batch_get_status(b->batch_page, i);
576 case VMW_BALLOON_SUCCESS:
577 list_add(&p->lru, &b->pages);
580 case VMW_BALLOON_ERROR_PPN_PINNED:
581 case VMW_BALLOON_ERROR_PPN_INVALID:
582 if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
583 list_add(&p->lru, &b->refused_pages);
584 b->n_refused_pages++;
588 case VMW_BALLOON_ERROR_RESET:
589 case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
593 /* This should never happen */
602 * Release the page allocated for the balloon. Note that we first notify
603 * the host so it can make sure the page will be available for the guest
606 static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
607 unsigned int *target)
609 struct page *page = b->page;
611 if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
612 list_add(&page->lru, &b->pages);
616 /* deallocate page */
618 STATS_INC(b->stats.free);
620 /* update balloon size */
626 static int vmballoon_unlock_batched_page(struct vmballoon *b,
627 unsigned int num_pages, unsigned int *target)
629 int locked, i, ret = 0;
632 hv_success = vmballoon_send_batched_unlock(b, num_pages, target);
636 for (i = 0; i < num_pages; i++) {
637 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
638 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
640 locked = vmballoon_batch_get_status(b->batch_page, i);
641 if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
643 * That page wasn't successfully unlocked by the
644 * hypervisor, re-add it to the list of pages owned by
645 * the balloon driver.
647 list_add(&p->lru, &b->pages);
649 /* deallocate page */
651 STATS_INC(b->stats.free);
653 /* update balloon size */
662 * Release pages that were allocated while attempting to inflate the
663 * balloon but were refused by the host for one reason or another.
665 static void vmballoon_release_refused_pages(struct vmballoon *b)
667 struct page *page, *next;
669 list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
670 list_del(&page->lru);
672 STATS_INC(b->stats.refused_free);
675 b->n_refused_pages = 0;
678 static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
683 static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
686 vmballoon_batch_set_pa(b->batch_page, idx,
687 (u64)page_to_pfn(p) << PAGE_SHIFT);
691 * Inflate the balloon towards its target size. Note that we try to limit
692 * the rate of allocation to make sure we are not choking the rest of the
695 static void vmballoon_inflate(struct vmballoon *b)
698 unsigned int allocations = 0;
699 unsigned int num_pages = 0;
701 gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
703 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
706 * First try NOSLEEP page allocations to inflate balloon.
708 * If we do not throttle nosleep allocations, we can drain all
709 * free pages in the guest quickly (if the balloon target is high).
710 * As a side-effect, draining free pages helps to inform (force)
711 * the guest to start swapping if balloon target is not met yet,
712 * which is a desired behavior. However, balloon driver can consume
713 * all available CPU cycles if too many pages are allocated in a
714 * second. Therefore, we throttle nosleep allocations even when
715 * the guest is not under memory pressure. OTOH, if we have already
716 * predicted that the guest is under memory pressure, then we
717 * slowdown page allocations considerably.
721 * Start with no sleep allocation rate which may be higher
722 * than sleeping allocation rate.
724 rate = b->slow_allocation_cycles ? b->rate_alloc : UINT_MAX;
726 pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n",
727 __func__, b->target - b->size, rate, b->rate_alloc);
729 while (!b->reset_required &&
730 b->size < b->target && num_pages < b->target - b->size) {
733 if (flags == VMW_PAGE_ALLOC_NOSLEEP)
734 STATS_INC(b->stats.alloc);
736 STATS_INC(b->stats.sleep_alloc);
738 page = alloc_page(flags);
740 if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
742 * CANSLEEP page allocation failed, so guest
743 * is under severe memory pressure. Quickly
744 * decrease allocation rate.
746 b->rate_alloc = max(b->rate_alloc / 2,
747 VMW_BALLOON_RATE_ALLOC_MIN);
748 STATS_INC(b->stats.sleep_alloc_fail);
751 STATS_INC(b->stats.alloc_fail);
754 * NOSLEEP page allocation failed, so the guest is
755 * under memory pressure. Let us slow down page
756 * allocations for next few cycles so that the guest
757 * gets out of memory pressure. Also, if we already
758 * allocated b->rate_alloc pages, let's pause,
759 * otherwise switch to sleeping allocations.
761 b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
763 if (allocations >= b->rate_alloc)
766 flags = VMW_PAGE_ALLOC_CANSLEEP;
767 /* Lower rate for sleeping allocations. */
768 rate = b->rate_alloc;
772 b->ops->add_page(b, num_pages++, page);
773 if (num_pages == b->batch_max_pages) {
774 error = b->ops->lock(b, num_pages, &b->target);
782 if (allocations >= rate) {
783 /* We allocated enough pages, let's take a break. */
789 b->ops->lock(b, num_pages, &b->target);
792 * We reached our goal without failures so try increasing
795 if (error == 0 && allocations >= b->rate_alloc) {
796 unsigned int mult = allocations / b->rate_alloc;
799 min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
800 VMW_BALLOON_RATE_ALLOC_MAX);
803 vmballoon_release_refused_pages(b);
807 * Decrease the size of the balloon allowing guest to use more memory.
809 static void vmballoon_deflate(struct vmballoon *b)
811 struct page *page, *next;
813 unsigned int num_pages = 0;
816 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
818 /* free pages to reach target */
819 list_for_each_entry_safe(page, next, &b->pages, lru) {
820 list_del(&page->lru);
821 b->ops->add_page(b, num_pages++, page);
824 if (num_pages == b->batch_max_pages) {
825 error = b->ops->unlock(b, num_pages, &b->target);
831 if (b->reset_required || ++i >= b->size - b->target)
838 b->ops->unlock(b, num_pages, &b->target);
841 static const struct vmballoon_ops vmballoon_basic_ops = {
842 .add_page = vmballoon_add_page,
843 .lock = vmballoon_lock_page,
844 .unlock = vmballoon_unlock_page
847 static const struct vmballoon_ops vmballoon_batched_ops = {
848 .add_page = vmballoon_add_batched_page,
849 .lock = vmballoon_lock_batched_page,
850 .unlock = vmballoon_unlock_batched_page
853 static bool vmballoon_init_batching(struct vmballoon *b)
855 b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
859 b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
860 if (!b->batch_page) {
861 __free_page(b->page);
869 * Perform standard reset sequence by popping the balloon (in case it
870 * is not empty) and then restarting protocol. This operation normally
871 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
873 static void vmballoon_reset(struct vmballoon *b)
875 /* free all pages, skipping monitor unlock */
878 if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
881 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
882 b->ops = &vmballoon_batched_ops;
883 b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
884 if (!vmballoon_init_batching(b)) {
886 * We failed to initialize batching, inform the monitor
887 * about it by sending a null capability.
889 * The guest will retry in one second.
891 vmballoon_send_start(b, 0);
894 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
895 b->ops = &vmballoon_basic_ops;
896 b->batch_max_pages = 1;
899 b->reset_required = false;
900 if (!vmballoon_send_guest_id(b))
901 pr_err("failed to send guest ID to the host\n");
905 * Balloon work function: reset protocol, if needed, get the new size and
906 * adjust balloon as needed. Repeat in 1 sec.
908 static void vmballoon_work(struct work_struct *work)
910 struct delayed_work *dwork = to_delayed_work(work);
911 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
914 STATS_INC(b->stats.timer);
916 if (b->reset_required)
919 if (b->slow_allocation_cycles > 0)
920 b->slow_allocation_cycles--;
922 if (vmballoon_send_get_target(b, &target)) {
923 /* update target, adjust size */
926 if (b->size < target)
927 vmballoon_inflate(b);
928 else if (b->size > target)
929 vmballoon_deflate(b);
933 * We are using a freezable workqueue so that balloon operations are
934 * stopped while the system transitions to/from sleep/hibernation.
936 queue_delayed_work(system_freezable_wq,
937 dwork, round_jiffies_relative(HZ));
943 #ifdef CONFIG_DEBUG_FS
945 static int vmballoon_debug_show(struct seq_file *f, void *offset)
947 struct vmballoon *b = f->private;
948 struct vmballoon_stats *stats = &b->stats;
950 /* format capabilities info */
952 "balloon capabilities: %#4x\n"
953 "used capabilities: %#4lx\n",
954 VMW_BALLOON_CAPABILITIES, b->capabilities);
956 /* format size info */
958 "target: %8d pages\n"
959 "current: %8d pages\n",
962 /* format rate info */
964 "rateSleepAlloc: %8d pages/sec\n",
970 "start: %8u (%4u failed)\n"
971 "guestType: %8u (%4u failed)\n"
972 "lock: %8u (%4u failed)\n"
973 "unlock: %8u (%4u failed)\n"
974 "target: %8u (%4u failed)\n"
975 "primNoSleepAlloc: %8u (%4u failed)\n"
976 "primCanSleepAlloc: %8u (%4u failed)\n"
981 stats->start, stats->start_fail,
982 stats->guest_type, stats->guest_type_fail,
983 stats->lock, stats->lock_fail,
984 stats->unlock, stats->unlock_fail,
985 stats->target, stats->target_fail,
986 stats->alloc, stats->alloc_fail,
987 stats->sleep_alloc, stats->sleep_alloc_fail,
989 stats->refused_alloc, stats->refused_free);
994 static int vmballoon_debug_open(struct inode *inode, struct file *file)
996 return single_open(file, vmballoon_debug_show, inode->i_private);
999 static const struct file_operations vmballoon_debug_fops = {
1000 .owner = THIS_MODULE,
1001 .open = vmballoon_debug_open,
1003 .llseek = seq_lseek,
1004 .release = single_release,
1007 static int __init vmballoon_debugfs_init(struct vmballoon *b)
1011 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1012 &vmballoon_debug_fops);
1013 if (IS_ERR(b->dbg_entry)) {
1014 error = PTR_ERR(b->dbg_entry);
1015 pr_err("failed to create debugfs entry, error: %d\n", error);
1022 static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1024 debugfs_remove(b->dbg_entry);
1029 static inline int vmballoon_debugfs_init(struct vmballoon *b)
1034 static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1038 #endif /* CONFIG_DEBUG_FS */
1040 static int __init vmballoon_init(void)
1045 * Check if we are running on VMware's hypervisor and bail out
1048 if (x86_hyper != &x86_hyper_vmware)
1051 INIT_LIST_HEAD(&balloon.pages);
1052 INIT_LIST_HEAD(&balloon.refused_pages);
1054 /* initialize rates */
1055 balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
1057 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1062 if (!vmballoon_send_start(&balloon, VMW_BALLOON_CAPABILITIES)) {
1063 pr_err("failed to send start command to the host\n");
1067 if ((balloon.capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1068 balloon.ops = &vmballoon_batched_ops;
1069 balloon.batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
1070 if (!vmballoon_init_batching(&balloon)) {
1071 pr_err("failed to init batching\n");
1074 } else if ((balloon.capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1075 balloon.ops = &vmballoon_basic_ops;
1076 balloon.batch_max_pages = 1;
1079 if (!vmballoon_send_guest_id(&balloon)) {
1080 pr_err("failed to send guest ID to the host\n");
1084 error = vmballoon_debugfs_init(&balloon);
1088 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1092 module_init(vmballoon_init);
1094 static void __exit vmballoon_exit(void)
1096 cancel_delayed_work_sync(&balloon.dwork);
1098 vmballoon_debugfs_exit(&balloon);
1101 * Deallocate all reserved memory, and reset connection with monitor.
1102 * Reset connection before deallocating memory to avoid potential for
1103 * additional spurious resets from guest touching deallocated pages.
1105 vmballoon_send_start(&balloon, VMW_BALLOON_CAPABILITIES);
1106 vmballoon_pop(&balloon);
1108 module_exit(vmballoon_exit);