2 * Copyright (c) 2012, Microsoft Corporation.
5 * K. Y. Srinivasan <kys@microsoft.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
35 #include <linux/hyperv.h>
38 * We begin with definitions supporting the Dynamic Memory protocol
41 * Begin protocol definitions.
47 * Protocol versions. The low word is the minor version, the high word the major
52 * Changed to 0.1 on 2009/03/25
53 * Changes to 0.2 on 2009/05/14
54 * Changes to 0.3 on 2009/12/03
55 * Changed to 1.0 on 2011/04/05
58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
63 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65 DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
67 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
68 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
69 DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
71 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
80 enum dm_message_type {
85 DM_VERSION_REQUEST = 1,
86 DM_VERSION_RESPONSE = 2,
87 DM_CAPABILITIES_REPORT = 3,
88 DM_CAPABILITIES_RESPONSE = 4,
90 DM_BALLOON_REQUEST = 6,
91 DM_BALLOON_RESPONSE = 7,
92 DM_UNBALLOON_REQUEST = 8,
93 DM_UNBALLOON_RESPONSE = 9,
94 DM_MEM_HOT_ADD_REQUEST = 10,
95 DM_MEM_HOT_ADD_RESPONSE = 11,
96 DM_VERSION_03_MAX = 11,
100 DM_INFO_MESSAGE = 12,
101 DM_VERSION_1_MAX = 12
106 * Structures defining the dynamic memory management
124 * To support guests that may have alignment
125 * limitations on hot-add, the guest can specify
126 * its alignment requirements; a value of n
127 * represents an alignment of 2^n in mega bytes.
129 __u64 hot_add_alignment:4;
135 union dm_mem_page_range {
138 * The PFN number of the first page in the range.
139 * 40 bits is the architectural limit of a PFN
144 * The number of pages in the range.
154 * The header for all dynamic memory messages:
156 * type: Type of the message.
157 * size: Size of the message in bytes; including the header.
158 * trans_id: The guest is responsible for manufacturing this ID.
168 * A generic message format for dynamic memory.
169 * Specific message formats are defined later in the file.
173 struct dm_header hdr;
174 __u8 data[]; /* enclosed message */
179 * Specific message types supporting the dynamic memory protocol.
183 * Version negotiation message. Sent from the guest to the host.
184 * The guest is free to try different versions until the host
185 * accepts the version.
187 * dm_version: The protocol version requested.
188 * is_last_attempt: If TRUE, this is the last version guest will request.
189 * reservedz: Reserved field, set to zero.
192 struct dm_version_request {
193 struct dm_header hdr;
194 union dm_version version;
195 __u32 is_last_attempt:1;
200 * Version response message; Host to Guest and indicates
201 * if the host has accepted the version sent by the guest.
203 * is_accepted: If TRUE, host has accepted the version and the guest
204 * should proceed to the next stage of the protocol. FALSE indicates that
205 * guest should re-try with a different version.
207 * reservedz: Reserved field, set to zero.
210 struct dm_version_response {
211 struct dm_header hdr;
217 * Message reporting capabilities. This is sent from the guest to the
221 struct dm_capabilities {
222 struct dm_header hdr;
225 __u64 max_page_number;
229 * Response to the capabilities message. This is sent from the host to the
230 * guest. This message notifies if the host has accepted the guest's
231 * capabilities. If the host has not accepted, the guest must shutdown
234 * is_accepted: Indicates if the host has accepted guest's capabilities.
235 * reservedz: Must be 0.
238 struct dm_capabilities_resp_msg {
239 struct dm_header hdr;
245 * This message is used to report memory pressure from the guest.
246 * This message is not part of any transaction and there is no
247 * response to this message.
249 * num_avail: Available memory in pages.
250 * num_committed: Committed memory in pages.
251 * page_file_size: The accumulated size of all page files
252 * in the system in pages.
253 * zero_free: The nunber of zero and free pages.
254 * page_file_writes: The writes to the page file in pages.
255 * io_diff: An indicator of file cache efficiency or page file activity,
256 * calculated as File Cache Page Fault Count - Page Read Count.
257 * This value is in pages.
259 * Some of these metrics are Windows specific and fortunately
260 * the algorithm on the host side that computes the guest memory
261 * pressure only uses num_committed value.
265 struct dm_header hdr;
268 __u64 page_file_size;
270 __u32 page_file_writes;
276 * Message to ask the guest to allocate memory - balloon up message.
277 * This message is sent from the host to the guest. The guest may not be
278 * able to allocate as much memory as requested.
280 * num_pages: number of pages to allocate.
284 struct dm_header hdr;
291 * Balloon response message; this message is sent from the guest
292 * to the host in response to the balloon message.
294 * reservedz: Reserved; must be set to zero.
295 * more_pages: If FALSE, this is the last message of the transaction.
296 * if TRUE there will atleast one more message from the guest.
298 * range_count: The number of ranges in the range array.
300 * range_array: An array of page ranges returned to the host.
304 struct dm_balloon_response {
305 struct dm_header hdr;
308 __u32 range_count:31;
309 union dm_mem_page_range range_array[];
313 * Un-balloon message; this message is sent from the host
314 * to the guest to give guest more memory.
316 * more_pages: If FALSE, this is the last message of the transaction.
317 * if TRUE there will atleast one more message from the guest.
319 * reservedz: Reserved; must be set to zero.
321 * range_count: The number of ranges in the range array.
323 * range_array: An array of page ranges returned to the host.
327 struct dm_unballoon_request {
328 struct dm_header hdr;
332 union dm_mem_page_range range_array[];
336 * Un-balloon response message; this message is sent from the guest
337 * to the host in response to an unballoon request.
341 struct dm_unballoon_response {
342 struct dm_header hdr;
347 * Hot add request message. Message sent from the host to the guest.
349 * mem_range: Memory range to hot add.
351 * On Linux we currently don't support this since we cannot hot add
352 * arbitrary granularity of memory.
356 struct dm_header hdr;
357 union dm_mem_page_range range;
361 * Hot add response message.
362 * This message is sent by the guest to report the status of a hot add request.
363 * If page_count is less than the requested page count, then the host should
364 * assume all further hot add requests will fail, since this indicates that
365 * the guest has hit an upper physical memory barrier.
367 * Hot adds may also fail due to low resources; in this case, the guest must
368 * not complete this message until the hot add can succeed, and the host must
369 * not send a new hot add request until the response is sent.
370 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
371 * times it fails the request.
374 * page_count: number of pages that were successfully hot added.
376 * result: result of the operation 1: success, 0: failure.
380 struct dm_hot_add_response {
381 struct dm_header hdr;
387 * Types of information sent from host to the guest.
391 INFO_TYPE_MAX_PAGE_CNT = 0,
397 * Header for the information message.
400 struct dm_info_header {
401 enum dm_info_type type;
406 * This message is sent from the host to the guest to pass
407 * some relevant information (win8 addition).
410 * info_size: size of the information blob.
411 * info: information blob.
415 struct dm_header hdr;
422 * End protocol definitions.
426 * State to manage hot adding memory into the guest.
427 * The range start_pfn : end_pfn specifies the range
428 * that the host has asked us to hot add. The range
429 * start_pfn : ha_end_pfn specifies the range that we have
430 * currently hot added. We hot add in multiples of 128M
431 * chunks; it is possible that we may not be able to bring
432 * online all the pages in the region. The range
433 * covered_start_pfn:covered_end_pfn defines the pages that can
437 struct hv_hotadd_state {
438 struct list_head list;
439 unsigned long start_pfn;
440 unsigned long covered_start_pfn;
441 unsigned long covered_end_pfn;
442 unsigned long ha_end_pfn;
443 unsigned long end_pfn;
447 struct list_head gap_list;
450 struct hv_hotadd_gap {
451 struct list_head list;
452 unsigned long start_pfn;
453 unsigned long end_pfn;
456 struct balloon_state {
458 struct work_struct wrk;
462 union dm_mem_page_range ha_page_range;
463 union dm_mem_page_range ha_region_range;
464 struct work_struct wrk;
467 static bool hot_add = true;
468 static bool do_hot_add;
470 * Delay reporting memory pressure by
471 * the specified number of seconds.
473 static uint pressure_report_delay = 45;
476 * The last time we posted a pressure report to host.
478 static unsigned long last_post_time;
480 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
481 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
483 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
484 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
485 static atomic_t trans_id = ATOMIC_INIT(0);
487 static int dm_ring_size = (5 * PAGE_SIZE);
490 * Driver specific state.
503 static __u8 recv_buffer[PAGE_SIZE];
504 static __u8 *send_buffer;
505 #define PAGES_IN_2M 512
506 #define HA_CHUNK (32 * 1024)
508 struct hv_dynmem_device {
509 struct hv_device *dev;
510 enum hv_dm_state state;
511 struct completion host_event;
512 struct completion config_event;
515 * Number of pages we have currently ballooned out.
517 unsigned int num_pages_ballooned;
518 unsigned int num_pages_onlined;
519 unsigned int num_pages_added;
522 * State to manage the ballooning (up) operation.
524 struct balloon_state balloon_wrk;
527 * State to execute the "hot-add" operation.
529 struct hot_add_wrk ha_wrk;
532 * This state tracks if the host has specified a hot-add
535 bool host_specified_ha_region;
538 * State to synchronize hot-add.
540 struct completion ol_waitevent;
543 * This thread handles hot-add
544 * requests from the host as well as notifying
545 * the host with regards to memory pressure in
548 struct task_struct *thread;
551 * Protects ha_region_list, num_pages_onlined counter and individual
552 * regions from ha_region_list.
557 * A list of hot-add regions.
559 struct list_head ha_region_list;
562 * We start with the highest version we can support
563 * and downgrade based on the host; we save here the
564 * next version to try.
569 * The negotiated version agreed by host.
574 static struct hv_dynmem_device dm_device;
576 static void post_status(struct hv_dynmem_device *dm);
578 #ifdef CONFIG_MEMORY_HOTPLUG
579 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
582 struct memory_notify *mem = (struct memory_notify *)v;
587 spin_lock_irqsave(&dm_device.ha_lock, flags);
588 dm_device.num_pages_onlined += mem->nr_pages;
589 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
591 case MEM_CANCEL_ONLINE:
592 if (dm_device.ha_waiting) {
593 dm_device.ha_waiting = false;
594 complete(&dm_device.ol_waitevent);
599 spin_lock_irqsave(&dm_device.ha_lock, flags);
600 dm_device.num_pages_onlined -= mem->nr_pages;
601 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
603 case MEM_GOING_ONLINE:
604 case MEM_GOING_OFFLINE:
605 case MEM_CANCEL_OFFLINE:
611 static struct notifier_block hv_memory_nb = {
612 .notifier_call = hv_memory_notifier,
616 /* Check if the particular page is backed and can be onlined and online it. */
617 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
619 unsigned long cur_start_pgp;
620 unsigned long cur_end_pgp;
621 struct hv_hotadd_gap *gap;
623 cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
624 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
626 /* The page is not backed. */
627 if (((unsigned long)pg < cur_start_pgp) ||
628 ((unsigned long)pg >= cur_end_pgp))
631 /* Check for gaps. */
632 list_for_each_entry(gap, &has->gap_list, list) {
633 cur_start_pgp = (unsigned long)
634 pfn_to_page(gap->start_pfn);
635 cur_end_pgp = (unsigned long)
636 pfn_to_page(gap->end_pfn);
637 if (((unsigned long)pg >= cur_start_pgp) &&
638 ((unsigned long)pg < cur_end_pgp)) {
643 /* This frame is currently backed; online the page. */
644 __online_page_set_limits(pg);
645 __online_page_increment_counters(pg);
646 __online_page_free(pg);
649 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
650 unsigned long start_pfn, unsigned long size)
654 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
655 for (i = 0; i < size; i++)
656 hv_page_online_one(has, pfn_to_page(start_pfn + i));
659 static void hv_mem_hot_add(unsigned long start, unsigned long size,
660 unsigned long pfn_count,
661 struct hv_hotadd_state *has)
665 unsigned long start_pfn;
666 unsigned long processed_pfn;
667 unsigned long total_pfn = pfn_count;
670 for (i = 0; i < (size/HA_CHUNK); i++) {
671 start_pfn = start + (i * HA_CHUNK);
673 spin_lock_irqsave(&dm_device.ha_lock, flags);
674 has->ha_end_pfn += HA_CHUNK;
676 if (total_pfn > HA_CHUNK) {
677 processed_pfn = HA_CHUNK;
678 total_pfn -= HA_CHUNK;
680 processed_pfn = total_pfn;
684 has->covered_end_pfn += processed_pfn;
685 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
687 init_completion(&dm_device.ol_waitevent);
688 dm_device.ha_waiting = !memhp_auto_online;
690 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
691 ret = add_memory(nid, PFN_PHYS((start_pfn)),
692 (HA_CHUNK << PAGE_SHIFT));
695 pr_warn("hot_add memory failed error is %d\n", ret);
696 if (ret == -EEXIST) {
698 * This error indicates that the error
699 * is not a transient failure. This is the
700 * case where the guest's physical address map
701 * precludes hot adding memory. Stop all further
706 spin_lock_irqsave(&dm_device.ha_lock, flags);
707 has->ha_end_pfn -= HA_CHUNK;
708 has->covered_end_pfn -= processed_pfn;
709 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
714 * Wait for the memory block to be onlined when memory onlining
715 * is done outside of kernel (memhp_auto_online). Since the hot
716 * add has succeeded, it is ok to proceed even if the pages in
717 * the hot added region have not been "onlined" within the
720 if (dm_device.ha_waiting)
721 wait_for_completion_timeout(&dm_device.ol_waitevent,
723 post_status(&dm_device);
727 static void hv_online_page(struct page *pg)
729 struct hv_hotadd_state *has;
730 unsigned long cur_start_pgp;
731 unsigned long cur_end_pgp;
734 spin_lock_irqsave(&dm_device.ha_lock, flags);
735 list_for_each_entry(has, &dm_device.ha_region_list, list) {
736 cur_start_pgp = (unsigned long)
737 pfn_to_page(has->start_pfn);
738 cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
740 /* The page belongs to a different HAS. */
741 if (((unsigned long)pg < cur_start_pgp) ||
742 ((unsigned long)pg >= cur_end_pgp))
745 hv_page_online_one(has, pg);
748 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
751 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
753 struct hv_hotadd_state *has;
754 struct hv_hotadd_gap *gap;
755 unsigned long residual, new_inc;
759 spin_lock_irqsave(&dm_device.ha_lock, flags);
760 list_for_each_entry(has, &dm_device.ha_region_list, list) {
762 * If the pfn range we are dealing with is not in the current
763 * "hot add block", move on.
765 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
769 * If the current start pfn is not where the covered_end
770 * is, create a gap and update covered_end_pfn.
772 if (has->covered_end_pfn != start_pfn) {
773 gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
779 INIT_LIST_HEAD(&gap->list);
780 gap->start_pfn = has->covered_end_pfn;
781 gap->end_pfn = start_pfn;
782 list_add_tail(&gap->list, &has->gap_list);
784 has->covered_end_pfn = start_pfn;
788 * If the current hot add-request extends beyond
789 * our current limit; extend it.
791 if ((start_pfn + pfn_cnt) > has->end_pfn) {
792 residual = (start_pfn + pfn_cnt - has->end_pfn);
794 * Extend the region by multiples of HA_CHUNK.
796 new_inc = (residual / HA_CHUNK) * HA_CHUNK;
797 if (residual % HA_CHUNK)
800 has->end_pfn += new_inc;
806 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
811 static unsigned long handle_pg_range(unsigned long pg_start,
812 unsigned long pg_count)
814 unsigned long start_pfn = pg_start;
815 unsigned long pfn_cnt = pg_count;
817 struct hv_hotadd_state *has;
818 unsigned long pgs_ol = 0;
819 unsigned long old_covered_state;
820 unsigned long res = 0, flags;
822 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
825 spin_lock_irqsave(&dm_device.ha_lock, flags);
826 list_for_each_entry(has, &dm_device.ha_region_list, list) {
828 * If the pfn range we are dealing with is not in the current
829 * "hot add block", move on.
831 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
834 old_covered_state = has->covered_end_pfn;
836 if (start_pfn < has->ha_end_pfn) {
838 * This is the case where we are backing pages
839 * in an already hot added region. Bring
840 * these pages online first.
842 pgs_ol = has->ha_end_pfn - start_pfn;
843 if (pgs_ol > pfn_cnt)
846 has->covered_end_pfn += pgs_ol;
849 * Check if the corresponding memory block is already
850 * online by checking its last previously backed page.
851 * In case it is we need to bring rest (which was not
852 * backed previously) online too.
854 if (start_pfn > has->start_pfn &&
855 !PageReserved(pfn_to_page(start_pfn - 1)))
856 hv_bring_pgs_online(has, start_pfn, pgs_ol);
860 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
862 * We have some residual hot add range
863 * that needs to be hot added; hot add
864 * it now. Hot add a multiple of
865 * of HA_CHUNK that fully covers the pages
868 size = (has->end_pfn - has->ha_end_pfn);
869 if (pfn_cnt <= size) {
870 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
871 if (pfn_cnt % HA_CHUNK)
876 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
877 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
878 spin_lock_irqsave(&dm_device.ha_lock, flags);
881 * If we managed to online any pages that were given to us,
882 * we declare success.
884 res = has->covered_end_pfn - old_covered_state;
887 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
892 static unsigned long process_hot_add(unsigned long pg_start,
893 unsigned long pfn_cnt,
894 unsigned long rg_start,
895 unsigned long rg_size)
897 struct hv_hotadd_state *ha_region = NULL;
904 if (!dm_device.host_specified_ha_region) {
905 covered = pfn_covered(pg_start, pfn_cnt);
914 * If the host has specified a hot-add range; deal with it first.
918 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
922 INIT_LIST_HEAD(&ha_region->list);
923 INIT_LIST_HEAD(&ha_region->gap_list);
925 ha_region->start_pfn = rg_start;
926 ha_region->ha_end_pfn = rg_start;
927 ha_region->covered_start_pfn = pg_start;
928 ha_region->covered_end_pfn = pg_start;
929 ha_region->end_pfn = rg_start + rg_size;
931 spin_lock_irqsave(&dm_device.ha_lock, flags);
932 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
933 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
938 * Process the page range specified; bringing them
939 * online if possible.
941 return handle_pg_range(pg_start, pfn_cnt);
946 static void hot_add_req(struct work_struct *dummy)
948 struct dm_hot_add_response resp;
949 #ifdef CONFIG_MEMORY_HOTPLUG
950 unsigned long pg_start, pfn_cnt;
951 unsigned long rg_start, rg_sz;
953 struct hv_dynmem_device *dm = &dm_device;
955 memset(&resp, 0, sizeof(struct dm_hot_add_response));
956 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
957 resp.hdr.size = sizeof(struct dm_hot_add_response);
959 #ifdef CONFIG_MEMORY_HOTPLUG
960 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
961 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
963 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
964 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
966 if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
967 unsigned long region_size;
968 unsigned long region_start;
971 * The host has not specified the hot-add region.
972 * Based on the hot-add page range being specified,
973 * compute a hot-add region that can cover the pages
974 * that need to be hot-added while ensuring the alignment
975 * and size requirements of Linux as it relates to hot-add.
977 region_start = pg_start;
978 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
979 if (pfn_cnt % HA_CHUNK)
980 region_size += HA_CHUNK;
982 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
984 rg_start = region_start;
989 resp.page_count = process_hot_add(pg_start, pfn_cnt,
992 dm->num_pages_added += resp.page_count;
995 * The result field of the response structure has the
996 * following semantics:
998 * 1. If all or some pages hot-added: Guest should return success.
1000 * 2. If no pages could be hot-added:
1002 * If the guest returns success, then the host
1003 * will not attempt any further hot-add operations. This
1004 * signifies a permanent failure.
1006 * If the guest returns failure, then this failure will be
1007 * treated as a transient failure and the host may retry the
1008 * hot-add operation after some delay.
1010 if (resp.page_count > 0)
1012 else if (!do_hot_add)
1017 if (!do_hot_add || (resp.page_count == 0))
1018 pr_info("Memory hot add failed\n");
1020 dm->state = DM_INITIALIZED;
1021 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1022 vmbus_sendpacket(dm->dev->channel, &resp,
1023 sizeof(struct dm_hot_add_response),
1024 (unsigned long)NULL,
1025 VM_PKT_DATA_INBAND, 0);
1028 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1030 struct dm_info_header *info_hdr;
1032 info_hdr = (struct dm_info_header *)msg->info;
1034 switch (info_hdr->type) {
1035 case INFO_TYPE_MAX_PAGE_CNT:
1036 if (info_hdr->data_size == sizeof(__u64)) {
1037 __u64 *max_page_count = (__u64 *)&info_hdr[1];
1039 pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
1045 pr_info("Received Unknown type: %d\n", info_hdr->type);
1049 static unsigned long compute_balloon_floor(void)
1051 unsigned long min_pages;
1052 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1053 /* Simple continuous piecewiese linear function:
1054 * max MiB -> min MiB gradient
1064 if (totalram_pages < MB2PAGES(128))
1065 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1066 else if (totalram_pages < MB2PAGES(512))
1067 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1068 else if (totalram_pages < MB2PAGES(2048))
1069 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1070 else if (totalram_pages < MB2PAGES(8192))
1071 min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1073 min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1079 * Post our status as it relates memory pressure to the
1080 * host. Host expects the guests to post this status
1081 * periodically at 1 second intervals.
1083 * The metrics specified in this protocol are very Windows
1084 * specific and so we cook up numbers here to convey our memory
1088 static void post_status(struct hv_dynmem_device *dm)
1090 struct dm_status status;
1091 unsigned long now = jiffies;
1092 unsigned long last_post = last_post_time;
1094 if (pressure_report_delay > 0) {
1095 --pressure_report_delay;
1099 if (!time_after(now, (last_post_time + HZ)))
1102 memset(&status, 0, sizeof(struct dm_status));
1103 status.hdr.type = DM_STATUS_REPORT;
1104 status.hdr.size = sizeof(struct dm_status);
1105 status.hdr.trans_id = atomic_inc_return(&trans_id);
1108 * The host expects the guest to report free and committed memory.
1109 * Furthermore, the host expects the pressure information to include
1110 * the ballooned out pages. For a given amount of memory that we are
1111 * managing we need to compute a floor below which we should not
1112 * balloon. Compute this and add it to the pressure report.
1113 * We also need to report all offline pages (num_pages_added -
1114 * num_pages_onlined) as committed to the host, otherwise it can try
1115 * asking us to balloon them out.
1117 status.num_avail = si_mem_available();
1118 status.num_committed = vm_memory_committed() +
1119 dm->num_pages_ballooned +
1120 (dm->num_pages_added > dm->num_pages_onlined ?
1121 dm->num_pages_added - dm->num_pages_onlined : 0) +
1122 compute_balloon_floor();
1125 * If our transaction ID is no longer current, just don't
1126 * send the status. This can happen if we were interrupted
1127 * after we picked our transaction ID.
1129 if (status.hdr.trans_id != atomic_read(&trans_id))
1133 * If the last post time that we sampled has changed,
1134 * we have raced, don't post the status.
1136 if (last_post != last_post_time)
1139 last_post_time = jiffies;
1140 vmbus_sendpacket(dm->dev->channel, &status,
1141 sizeof(struct dm_status),
1142 (unsigned long)NULL,
1143 VM_PKT_DATA_INBAND, 0);
1147 static void free_balloon_pages(struct hv_dynmem_device *dm,
1148 union dm_mem_page_range *range_array)
1150 int num_pages = range_array->finfo.page_cnt;
1151 __u64 start_frame = range_array->finfo.start_page;
1155 for (i = 0; i < num_pages; i++) {
1156 pg = pfn_to_page(i + start_frame);
1158 dm->num_pages_ballooned--;
1164 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1165 unsigned int num_pages,
1166 struct dm_balloon_response *bl_resp,
1172 if (num_pages < alloc_unit)
1175 for (i = 0; (i * alloc_unit) < num_pages; i++) {
1176 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1178 return i * alloc_unit;
1181 * We execute this code in a thread context. Furthermore,
1182 * we don't want the kernel to try too hard.
1184 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1185 __GFP_NOMEMALLOC | __GFP_NOWARN,
1186 get_order(alloc_unit << PAGE_SHIFT));
1189 return i * alloc_unit;
1191 dm->num_pages_ballooned += alloc_unit;
1194 * If we allocatted 2M pages; split them so we
1195 * can free them in any order we get.
1198 if (alloc_unit != 1)
1199 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1201 bl_resp->range_count++;
1202 bl_resp->range_array[i].finfo.start_page =
1204 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1205 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1212 static void balloon_up(struct work_struct *dummy)
1214 unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1215 unsigned int num_ballooned = 0;
1216 struct dm_balloon_response *bl_resp;
1222 unsigned long floor;
1224 /* The host balloons pages in 2M granularity. */
1225 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1228 * We will attempt 2M allocations. However, if we fail to
1229 * allocate 2M chunks, we will go back to 4k allocations.
1233 avail_pages = si_mem_available();
1234 floor = compute_balloon_floor();
1236 /* Refuse to balloon below the floor, keep the 2M granularity. */
1237 if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1238 pr_warn("Balloon request will be partially fulfilled. %s\n",
1239 avail_pages < num_pages ? "Not enough memory." :
1240 "Balloon floor reached.");
1242 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1243 num_pages -= num_pages % PAGES_IN_2M;
1247 bl_resp = (struct dm_balloon_response *)send_buffer;
1248 memset(send_buffer, 0, PAGE_SIZE);
1249 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1250 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1251 bl_resp->more_pages = 1;
1253 num_pages -= num_ballooned;
1254 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1255 bl_resp, alloc_unit);
1257 if (alloc_unit != 1 && num_ballooned == 0) {
1262 if (num_ballooned == 0 || num_ballooned == num_pages) {
1263 pr_debug("Ballooned %u out of %u requested pages.\n",
1264 num_pages, dm_device.balloon_wrk.num_pages);
1266 bl_resp->more_pages = 0;
1268 dm_device.state = DM_INITIALIZED;
1272 * We are pushing a lot of data through the channel;
1273 * deal with transient failures caused because of the
1274 * lack of space in the ring buffer.
1278 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1279 ret = vmbus_sendpacket(dm_device.dev->channel,
1282 (unsigned long)NULL,
1283 VM_PKT_DATA_INBAND, 0);
1287 post_status(&dm_device);
1288 } while (ret == -EAGAIN);
1292 * Free up the memory we allocatted.
1294 pr_info("Balloon response failed\n");
1296 for (i = 0; i < bl_resp->range_count; i++)
1297 free_balloon_pages(&dm_device,
1298 &bl_resp->range_array[i]);
1306 static void balloon_down(struct hv_dynmem_device *dm,
1307 struct dm_unballoon_request *req)
1309 union dm_mem_page_range *range_array = req->range_array;
1310 int range_count = req->range_count;
1311 struct dm_unballoon_response resp;
1313 unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1315 for (i = 0; i < range_count; i++) {
1316 free_balloon_pages(dm, &range_array[i]);
1317 complete(&dm_device.config_event);
1320 pr_debug("Freed %u ballooned pages.\n",
1321 prev_pages_ballooned - dm->num_pages_ballooned);
1323 if (req->more_pages == 1)
1326 memset(&resp, 0, sizeof(struct dm_unballoon_response));
1327 resp.hdr.type = DM_UNBALLOON_RESPONSE;
1328 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1329 resp.hdr.size = sizeof(struct dm_unballoon_response);
1331 vmbus_sendpacket(dm_device.dev->channel, &resp,
1332 sizeof(struct dm_unballoon_response),
1333 (unsigned long)NULL,
1334 VM_PKT_DATA_INBAND, 0);
1336 dm->state = DM_INITIALIZED;
1339 static void balloon_onchannelcallback(void *context);
1341 static int dm_thread_func(void *dm_dev)
1343 struct hv_dynmem_device *dm = dm_dev;
1345 while (!kthread_should_stop()) {
1346 wait_for_completion_interruptible_timeout(
1347 &dm_device.config_event, 1*HZ);
1349 * The host expects us to post information on the memory
1350 * pressure every second.
1352 reinit_completion(&dm_device.config_event);
1360 static void version_resp(struct hv_dynmem_device *dm,
1361 struct dm_version_response *vresp)
1363 struct dm_version_request version_req;
1366 if (vresp->is_accepted) {
1368 * We are done; wakeup the
1369 * context waiting for version
1372 complete(&dm->host_event);
1376 * If there are more versions to try, continue
1377 * with negotiations; if not
1378 * shutdown the service since we are not able
1379 * to negotiate a suitable version number
1382 if (dm->next_version == 0)
1385 memset(&version_req, 0, sizeof(struct dm_version_request));
1386 version_req.hdr.type = DM_VERSION_REQUEST;
1387 version_req.hdr.size = sizeof(struct dm_version_request);
1388 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1389 version_req.version.version = dm->next_version;
1390 dm->version = version_req.version.version;
1393 * Set the next version to try in case current version fails.
1394 * Win7 protocol ought to be the last one to try.
1396 switch (version_req.version.version) {
1397 case DYNMEM_PROTOCOL_VERSION_WIN8:
1398 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1399 version_req.is_last_attempt = 0;
1402 dm->next_version = 0;
1403 version_req.is_last_attempt = 1;
1406 ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1407 sizeof(struct dm_version_request),
1408 (unsigned long)NULL,
1409 VM_PKT_DATA_INBAND, 0);
1417 dm->state = DM_INIT_ERROR;
1418 complete(&dm->host_event);
1421 static void cap_resp(struct hv_dynmem_device *dm,
1422 struct dm_capabilities_resp_msg *cap_resp)
1424 if (!cap_resp->is_accepted) {
1425 pr_info("Capabilities not accepted by host\n");
1426 dm->state = DM_INIT_ERROR;
1428 complete(&dm->host_event);
1431 static void balloon_onchannelcallback(void *context)
1433 struct hv_device *dev = context;
1436 struct dm_message *dm_msg;
1437 struct dm_header *dm_hdr;
1438 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1439 struct dm_balloon *bal_msg;
1440 struct dm_hot_add *ha_msg;
1441 union dm_mem_page_range *ha_pg_range;
1442 union dm_mem_page_range *ha_region;
1444 memset(recv_buffer, 0, sizeof(recv_buffer));
1445 vmbus_recvpacket(dev->channel, recv_buffer,
1446 PAGE_SIZE, &recvlen, &requestid);
1449 dm_msg = (struct dm_message *)recv_buffer;
1450 dm_hdr = &dm_msg->hdr;
1452 switch (dm_hdr->type) {
1453 case DM_VERSION_RESPONSE:
1455 (struct dm_version_response *)dm_msg);
1458 case DM_CAPABILITIES_RESPONSE:
1460 (struct dm_capabilities_resp_msg *)dm_msg);
1463 case DM_BALLOON_REQUEST:
1464 if (dm->state == DM_BALLOON_UP)
1465 pr_warn("Currently ballooning\n");
1466 bal_msg = (struct dm_balloon *)recv_buffer;
1467 dm->state = DM_BALLOON_UP;
1468 dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1469 schedule_work(&dm_device.balloon_wrk.wrk);
1472 case DM_UNBALLOON_REQUEST:
1473 dm->state = DM_BALLOON_DOWN;
1475 (struct dm_unballoon_request *)recv_buffer);
1478 case DM_MEM_HOT_ADD_REQUEST:
1479 if (dm->state == DM_HOT_ADD)
1480 pr_warn("Currently hot-adding\n");
1481 dm->state = DM_HOT_ADD;
1482 ha_msg = (struct dm_hot_add *)recv_buffer;
1483 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1485 * This is a normal hot-add request specifying
1488 dm->host_specified_ha_region = false;
1489 ha_pg_range = &ha_msg->range;
1490 dm->ha_wrk.ha_page_range = *ha_pg_range;
1491 dm->ha_wrk.ha_region_range.page_range = 0;
1494 * Host is specifying that we first hot-add
1495 * a region and then partially populate this
1498 dm->host_specified_ha_region = true;
1499 ha_pg_range = &ha_msg->range;
1500 ha_region = &ha_pg_range[1];
1501 dm->ha_wrk.ha_page_range = *ha_pg_range;
1502 dm->ha_wrk.ha_region_range = *ha_region;
1504 schedule_work(&dm_device.ha_wrk.wrk);
1507 case DM_INFO_MESSAGE:
1508 process_info(dm, (struct dm_info_msg *)dm_msg);
1512 pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1519 static int balloon_probe(struct hv_device *dev,
1520 const struct hv_vmbus_device_id *dev_id)
1524 struct dm_version_request version_req;
1525 struct dm_capabilities cap_msg;
1527 #ifdef CONFIG_MEMORY_HOTPLUG
1528 do_hot_add = hot_add;
1534 * First allocate a send buffer.
1537 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1541 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1542 balloon_onchannelcallback, dev);
1547 dm_device.dev = dev;
1548 dm_device.state = DM_INITIALIZING;
1549 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1550 init_completion(&dm_device.host_event);
1551 init_completion(&dm_device.config_event);
1552 INIT_LIST_HEAD(&dm_device.ha_region_list);
1553 spin_lock_init(&dm_device.ha_lock);
1554 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1555 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1556 dm_device.host_specified_ha_region = false;
1559 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1560 if (IS_ERR(dm_device.thread)) {
1561 ret = PTR_ERR(dm_device.thread);
1565 #ifdef CONFIG_MEMORY_HOTPLUG
1566 set_online_page_callback(&hv_online_page);
1567 register_memory_notifier(&hv_memory_nb);
1570 hv_set_drvdata(dev, &dm_device);
1572 * Initiate the hand shake with the host and negotiate
1573 * a version that the host can support. We start with the
1574 * highest version number and go down if the host cannot
1577 memset(&version_req, 0, sizeof(struct dm_version_request));
1578 version_req.hdr.type = DM_VERSION_REQUEST;
1579 version_req.hdr.size = sizeof(struct dm_version_request);
1580 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1581 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1582 version_req.is_last_attempt = 0;
1583 dm_device.version = version_req.version.version;
1585 ret = vmbus_sendpacket(dev->channel, &version_req,
1586 sizeof(struct dm_version_request),
1587 (unsigned long)NULL,
1588 VM_PKT_DATA_INBAND, 0);
1592 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1599 * If we could not negotiate a compatible version with the host
1600 * fail the probe function.
1602 if (dm_device.state == DM_INIT_ERROR) {
1607 pr_info("Using Dynamic Memory protocol version %u.%u\n",
1608 DYNMEM_MAJOR_VERSION(dm_device.version),
1609 DYNMEM_MINOR_VERSION(dm_device.version));
1612 * Now submit our capabilities to the host.
1614 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1615 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1616 cap_msg.hdr.size = sizeof(struct dm_capabilities);
1617 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1619 cap_msg.caps.cap_bits.balloon = 1;
1620 cap_msg.caps.cap_bits.hot_add = 1;
1623 * Specify our alignment requirements as it relates
1624 * memory hot-add. Specify 128MB alignment.
1626 cap_msg.caps.cap_bits.hot_add_alignment = 7;
1629 * Currently the host does not use these
1630 * values and we set them to what is done in the
1633 cap_msg.min_page_cnt = 0;
1634 cap_msg.max_page_number = -1;
1636 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1637 sizeof(struct dm_capabilities),
1638 (unsigned long)NULL,
1639 VM_PKT_DATA_INBAND, 0);
1643 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1650 * If the host does not like our capabilities,
1651 * fail the probe function.
1653 if (dm_device.state == DM_INIT_ERROR) {
1658 dm_device.state = DM_INITIALIZED;
1663 #ifdef CONFIG_MEMORY_HOTPLUG
1664 restore_online_page_callback(&hv_online_page);
1666 kthread_stop(dm_device.thread);
1669 vmbus_close(dev->channel);
1675 static int balloon_remove(struct hv_device *dev)
1677 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1678 struct hv_hotadd_state *has, *tmp;
1679 struct hv_hotadd_gap *gap, *tmp_gap;
1680 unsigned long flags;
1682 if (dm->num_pages_ballooned != 0)
1683 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1685 cancel_work_sync(&dm->balloon_wrk.wrk);
1686 cancel_work_sync(&dm->ha_wrk.wrk);
1688 vmbus_close(dev->channel);
1689 kthread_stop(dm->thread);
1691 #ifdef CONFIG_MEMORY_HOTPLUG
1692 restore_online_page_callback(&hv_online_page);
1693 unregister_memory_notifier(&hv_memory_nb);
1695 spin_lock_irqsave(&dm_device.ha_lock, flags);
1696 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1697 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1698 list_del(&gap->list);
1701 list_del(&has->list);
1704 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1709 static const struct hv_vmbus_device_id id_table[] = {
1710 /* Dynamic Memory Class ID */
1711 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1716 MODULE_DEVICE_TABLE(vmbus, id_table);
1718 static struct hv_driver balloon_drv = {
1719 .name = "hv_balloon",
1720 .id_table = id_table,
1721 .probe = balloon_probe,
1722 .remove = balloon_remove,
1725 static int __init init_balloon_drv(void)
1728 return vmbus_driver_register(&balloon_drv);
1731 module_init(init_balloon_drv);
1733 MODULE_DESCRIPTION("Hyper-V Balloon");
1734 MODULE_LICENSE("GPL");