diff options
Diffstat (limited to 'drivers/hv/hv_balloon.c')
| -rw-r--r-- | drivers/hv/hv_balloon.c | 1267 |
1 files changed, 941 insertions, 326 deletions
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 4c605c70ebf9..2b4080e51f97 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1,37 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2012, Microsoft Corporation. * * Author: * K. Y. Srinivasan <kys@microsoft.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/cleanup.h> #include <linux/kernel.h> +#include <linux/jiffies.h> #include <linux/mman.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/completion.h> +#include <linux/count_zeros.h> #include <linux/memory_hotplug.h> #include <linux/memory.h> #include <linux/notifier.h> #include <linux/percpu_counter.h> +#include <linux/page_reporting.h> +#include <linux/sizes.h> #include <linux/hyperv.h> +#include <hyperv/hvhdk.h> + +#include <asm/mshyperv.h> + +#define CREATE_TRACE_POINTS +#include "hv_trace_balloon.h" /* * We begin with definitions supporting the Dynamic Memory protocol @@ -40,8 +42,6 @@ * Begin protocol definitions. */ - - /* * Protocol versions. The low word is the minor version, the high word the major * version. @@ -61,15 +61,15 @@ enum { DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3), DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0), + DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0), DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1, DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2, + DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3, - DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8 + DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10 }; - - /* * Message Types */ @@ -98,7 +98,6 @@ enum dm_message_type { DM_VERSION_1_MAX = 12 }; - /* * Structures defining the dynamic memory management * protocol. @@ -112,7 +111,6 @@ union dm_version { __u32 version; } __packed; - union dm_caps { struct { __u64 balloon:1; @@ -145,8 +143,6 @@ union dm_mem_page_range { __u64 page_range; } __packed; - - /* * The header for all dynamic memory messages: * @@ -171,7 +167,6 @@ struct dm_message { __u8 data[]; /* enclosed message */ } __packed; - /* * Specific message types supporting the dynamic memory protocol. */ @@ -247,7 +242,7 @@ struct dm_capabilities_resp_msg { * num_committed: Committed memory in pages. * page_file_size: The accumulated size of all page files * in the system in pages. - * zero_free: The nunber of zero and free pages. + * zero_free: The number of zero and free pages. * page_file_writes: The writes to the page file in pages. * io_diff: An indicator of file cache efficiency or page file activity, * calculated as File Cache Page Fault Count - Page Read Count. @@ -268,7 +263,6 @@ struct dm_status { __u32 io_diff; } __packed; - /* * Message to ask the guest to allocate memory - balloon up message. * This message is sent from the host to the guest. The guest may not be @@ -283,14 +277,13 @@ struct dm_balloon { __u32 reservedz; } __packed; - /* * Balloon response message; this message is sent from the guest * to the host in response to the balloon message. * * reservedz: Reserved; must be set to zero. * more_pages: If FALSE, this is the last message of the transaction. - * if TRUE there will atleast one more message from the guest. + * if TRUE there will be at least one more message from the guest. * * range_count: The number of ranges in the range array. * @@ -311,7 +304,7 @@ struct dm_balloon_response { * to the guest to give guest more memory. * * more_pages: If FALSE, this is the last message of the transaction. - * if TRUE there will atleast one more message from the guest. + * if TRUE there will be at least one more message from the guest. * * reservedz: Reserved; must be set to zero. * @@ -339,14 +332,11 @@ struct dm_unballoon_response { struct dm_header hdr; } __packed; - /* * Hot add request message. Message sent from the host to the guest. * * mem_range: Memory range to hot add. * - * On Linux we currently don't support this since we cannot hot add - * arbitrary granularity of memory. */ struct dm_hot_add { @@ -389,7 +379,6 @@ enum dm_info_type { MAX_INFO_TYPE }; - /* * Header for the information message. */ @@ -424,11 +413,11 @@ struct dm_info_msg { * The range start_pfn : end_pfn specifies the range * that the host has asked us to hot add. The range * start_pfn : ha_end_pfn specifies the range that we have - * currently hot added. We hot add in multiples of 128M - * chunks; it is possible that we may not be able to bring - * online all the pages in the region. The range - * covered_start_pfn : covered_end_pfn defines the pages that can - * be brough online. + * currently hot added. We hot add in chunks equal to the + * memory block size; it is possible that we may not be able + * to bring online all the pages in the region. The range + * covered_start_pfn:covered_end_pfn defines the pages that can + * be brought online. */ struct hv_hotadd_state { @@ -438,6 +427,16 @@ struct hv_hotadd_state { unsigned long covered_end_pfn; unsigned long ha_end_pfn; unsigned long end_pfn; + /* + * A list of gaps. + */ + struct list_head gap_list; +}; + +struct hv_hotadd_gap { + struct list_head list; + unsigned long start_pfn; + unsigned long end_pfn; }; struct balloon_state { @@ -451,6 +450,7 @@ struct hot_add_wrk { struct work_struct wrk; }; +static bool allow_hibernation; static bool hot_add = true; static bool do_hot_add; /* @@ -458,15 +458,24 @@ static bool do_hot_add; * the specified number of seconds. */ static uint pressure_report_delay = 45; +extern unsigned int page_reporting_order; +#define HV_MAX_FAILURES 2 -module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); +/* + * The last time we posted a pressure report to host. + */ +static unsigned long last_post_time; + +static int hv_hypercall_multi_failure; + +module_param(hot_add, bool, 0644); MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); -module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); +module_param(pressure_report_delay, uint, 0644); MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); static atomic_t trans_id = ATOMIC_INIT(0); -static int dm_ring_size = (5 * PAGE_SIZE); +static int dm_ring_size = VMBUS_RING_SIZE(16 * 1024); /* * Driver specific state. @@ -481,11 +490,13 @@ enum hv_dm_state { DM_INIT_ERROR }; +static __u8 recv_buffer[HV_HYP_PAGE_SIZE]; +static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE]; -static __u8 recv_buffer[PAGE_SIZE]; -static __u8 *send_buffer; -#define PAGES_IN_2M 512 -#define HA_CHUNK (32 * 1024) +static unsigned long ha_pages_in_chunk; +#define HA_BYTES_IN_CHUNK (ha_pages_in_chunk << PAGE_SHIFT) + +#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE) struct hv_dynmem_device { struct hv_device *dev; @@ -497,6 +508,8 @@ struct hv_dynmem_device { * Number of pages we have currently ballooned out. */ unsigned int num_pages_ballooned; + unsigned int num_pages_onlined; + unsigned int num_pages_added; /* * State to manage the ballooning (up) operation. @@ -518,7 +531,6 @@ struct hv_dynmem_device { * State to synchronize hot-add. */ struct completion ol_waitevent; - bool ha_waiting; /* * This thread handles hot-add * requests from the host as well as notifying @@ -528,6 +540,12 @@ struct hv_dynmem_device { struct task_struct *thread; /* + * Protects ha_region_list, num_pages_onlined counter and individual + * regions from ha_region_list. + */ + spinlock_t ha_lock; + + /* * A list of hot-add regions. */ struct list_head ha_region_list; @@ -538,23 +556,151 @@ struct hv_dynmem_device { * next version to try. */ __u32 next_version; + + /* + * The negotiated version agreed by host. + */ + __u32 version; + + struct page_reporting_dev_info pr_dev_info; + + /* + * Maximum number of pages that can be hot_add-ed + */ + __u64 max_dynamic_page_count; }; static struct hv_dynmem_device dm_device; +static void post_status(struct hv_dynmem_device *dm); + +static void enable_page_reporting(void); + +static void disable_page_reporting(void); + #ifdef CONFIG_MEMORY_HOTPLUG +static inline bool has_pfn_is_backed(struct hv_hotadd_state *has, + unsigned long pfn) +{ + struct hv_hotadd_gap *gap; + + /* The page is not backed. */ + if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn) + return false; -static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size) + /* Check for gaps. */ + list_for_each_entry(gap, &has->gap_list, list) { + if (pfn >= gap->start_pfn && pfn < gap->end_pfn) + return false; + } + + return true; +} + +static unsigned long hv_page_offline_check(unsigned long start_pfn, + unsigned long nr_pages) { - int i; + unsigned long pfn = start_pfn, count = 0; + struct hv_hotadd_state *has; + bool found; - for (i = 0; i < size; i++) { - struct page *pg; - pg = pfn_to_page(start_pfn + i); - __online_page_set_limits(pg); - __online_page_increment_counters(pg); - __online_page_free(pg); + while (pfn < start_pfn + nr_pages) { + /* + * Search for HAS which covers the pfn and when we find one + * count how many consequitive PFNs are covered. + */ + found = false; + list_for_each_entry(has, &dm_device.ha_region_list, list) { + while ((pfn >= has->start_pfn) && + (pfn < has->end_pfn) && + (pfn < start_pfn + nr_pages)) { + found = true; + if (has_pfn_is_backed(has, pfn)) + count++; + pfn++; + } + } + + /* + * This PFN is not in any HAS (e.g. we're offlining a region + * which was present at boot), no need to account for it. Go + * to the next one. + */ + if (!found) + pfn++; } + + return count; +} + +static int hv_memory_notifier(struct notifier_block *nb, unsigned long val, + void *v) +{ + struct memory_notify *mem = (struct memory_notify *)v; + unsigned long pfn_count; + + switch (val) { + case MEM_ONLINE: + case MEM_CANCEL_ONLINE: + complete(&dm_device.ol_waitevent); + break; + + case MEM_OFFLINE: + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { + pfn_count = hv_page_offline_check(mem->start_pfn, + mem->nr_pages); + if (pfn_count <= dm_device.num_pages_onlined) { + dm_device.num_pages_onlined -= pfn_count; + } else { + /* + * We're offlining more pages than we + * managed to online. This is + * unexpected. In any case don't let + * num_pages_onlined wrap around zero. + */ + WARN_ON_ONCE(1); + dm_device.num_pages_onlined = 0; + } + } + break; + case MEM_GOING_ONLINE: + case MEM_GOING_OFFLINE: + case MEM_CANCEL_OFFLINE: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block hv_memory_nb = { + .notifier_call = hv_memory_notifier, + .priority = 0 +}; + +/* Check if the particular page is backed and can be onlined and online it. */ +static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg) +{ + if (!has_pfn_is_backed(has, page_to_pfn(pg))) { + if (!PageOffline(pg)) + __SetPageOffline(pg); + return; + } else if (!PageOffline(pg)) + return; + + /* This frame is currently backed; online the page. */ + generic_online_page(pg, 0); + + lockdep_assert_held(&dm_device.ha_lock); + dm_device.num_pages_onlined++; +} + +static void hv_bring_pgs_online(struct hv_hotadd_state *has, + unsigned long start_pfn, unsigned long size) +{ + int i; + + pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); + for (i = 0; i < size; i++) + hv_page_online_one(has, pfn_to_page(start_pfn + i)); } static void hv_mem_hot_add(unsigned long start, unsigned long size, @@ -562,34 +708,29 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, struct hv_hotadd_state *has) { int ret = 0; - int i, nid, t; + int i, nid; unsigned long start_pfn; unsigned long processed_pfn; unsigned long total_pfn = pfn_count; - for (i = 0; i < (size/HA_CHUNK); i++) { - start_pfn = start + (i * HA_CHUNK); - has->ha_end_pfn += HA_CHUNK; + for (i = 0; i < (size/ha_pages_in_chunk); i++) { + start_pfn = start + (i * ha_pages_in_chunk); - if (total_pfn > HA_CHUNK) { - processed_pfn = HA_CHUNK; - total_pfn -= HA_CHUNK; - } else { - processed_pfn = total_pfn; - total_pfn = 0; + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { + has->ha_end_pfn += ha_pages_in_chunk; + processed_pfn = umin(total_pfn, ha_pages_in_chunk); + total_pfn -= processed_pfn; + has->covered_end_pfn += processed_pfn; } - has->covered_end_pfn += processed_pfn; - - init_completion(&dm_device.ol_waitevent); - dm_device.ha_waiting = true; + reinit_completion(&dm_device.ol_waitevent); nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); ret = add_memory(nid, PFN_PHYS((start_pfn)), - (HA_CHUNK << PAGE_SHIFT)); + HA_BYTES_IN_CHUNK, MHP_MERGE_RESOURCE); if (ret) { - pr_info("hot_add memory failed error is %d\n", ret); + pr_err("hot_add memory failed error is %d\n", ret); if (ret == -EEXIST) { /* * This error indicates that the error @@ -600,131 +741,118 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, */ do_hot_add = false; } - has->ha_end_pfn -= HA_CHUNK; - has->covered_end_pfn -= processed_pfn; + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { + has->ha_end_pfn -= ha_pages_in_chunk; + has->covered_end_pfn -= processed_pfn; + } break; } /* - * Wait for the memory block to be onlined. + * Wait for memory to get onlined. If the kernel onlined the + * memory when adding it, this will return directly. Otherwise, + * it will wait for user space to online the memory. This helps + * to avoid adding memory faster than it is getting onlined. As + * adding succeeded, it is ok to proceed even if the memory was + * not onlined in time. */ - t = wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); - if (t == 0) { - pr_info("hot_add memory timedout\n"); - has->ha_end_pfn -= HA_CHUNK; - has->covered_end_pfn -= processed_pfn; - break; - } - + wait_for_completion_timeout(&dm_device.ol_waitevent, secs_to_jiffies(5)); + post_status(&dm_device); } - - return; } -static void hv_online_page(struct page *pg) +static void hv_online_page(struct page *pg, unsigned int order) { - struct list_head *cur; struct hv_hotadd_state *has; - unsigned long cur_start_pgp; - unsigned long cur_end_pgp; + unsigned long pfn = page_to_pfn(pg); - if (dm_device.ha_waiting) { - dm_device.ha_waiting = false; - complete(&dm_device.ol_waitevent); - } + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { + list_for_each_entry(has, &dm_device.ha_region_list, list) { + /* The page belongs to a different HAS. */ + if (pfn < has->start_pfn || + (pfn + (1UL << order) > has->end_pfn)) + continue; - list_for_each(cur, &dm_device.ha_region_list) { - has = list_entry(cur, struct hv_hotadd_state, list); - cur_start_pgp = (unsigned long) - pfn_to_page(has->covered_start_pfn); - cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn); - - if (((unsigned long)pg >= cur_start_pgp) && - ((unsigned long)pg < cur_end_pgp)) { - /* - * This frame is currently backed; online the - * page. - */ - __online_page_set_limits(pg); - __online_page_increment_counters(pg); - __online_page_free(pg); - has->covered_start_pfn++; + hv_bring_pgs_online(has, pfn, 1UL << order); + return; } } + generic_online_page(pg, order); } -static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) +static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) { - struct list_head *cur; struct hv_hotadd_state *has; - unsigned long residual, new_inc; - - if (list_empty(&dm_device.ha_region_list)) - return false; - - list_for_each(cur, &dm_device.ha_region_list) { - has = list_entry(cur, struct hv_hotadd_state, list); + struct hv_hotadd_gap *gap; + unsigned long residual; + int ret = 0; + guard(spinlock_irqsave)(&dm_device.ha_lock); + list_for_each_entry(has, &dm_device.ha_region_list, list) { /* * If the pfn range we are dealing with is not in the current * "hot add block", move on. */ - if ((start_pfn >= has->end_pfn)) + if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) continue; + /* - * If the current hot add-request extends beyond - * our current limit; extend it. + * If the current start pfn is not where the covered_end + * is, create a gap and update covered_end_pfn. */ - if ((start_pfn + pfn_cnt) > has->end_pfn) { - residual = (start_pfn + pfn_cnt - has->end_pfn); - /* - * Extend the region by multiples of HA_CHUNK. - */ - new_inc = (residual / HA_CHUNK) * HA_CHUNK; - if (residual % HA_CHUNK) - new_inc += HA_CHUNK; + if (has->covered_end_pfn != start_pfn) { + gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC); + if (!gap) { + ret = -ENOMEM; + break; + } - has->end_pfn += new_inc; + INIT_LIST_HEAD(&gap->list); + gap->start_pfn = has->covered_end_pfn; + gap->end_pfn = start_pfn; + list_add_tail(&gap->list, &has->gap_list); + + has->covered_end_pfn = start_pfn; } /* - * If the current start pfn is not where the covered_end - * is, update it. + * If the current hot add-request extends beyond + * our current limit; extend it. */ - - if (has->covered_end_pfn != start_pfn) { - has->covered_end_pfn = start_pfn; - has->covered_start_pfn = start_pfn; + if ((start_pfn + pfn_cnt) > has->end_pfn) { + /* Extend the region by multiples of ha_pages_in_chunk */ + residual = (start_pfn + pfn_cnt - has->end_pfn); + has->end_pfn += ALIGN(residual, ha_pages_in_chunk); } - return true; + ret = 1; + break; } - return false; + return ret; } static unsigned long handle_pg_range(unsigned long pg_start, - unsigned long pg_count) + unsigned long pg_count) { unsigned long start_pfn = pg_start; unsigned long pfn_cnt = pg_count; unsigned long size; - struct list_head *cur; struct hv_hotadd_state *has; unsigned long pgs_ol = 0; unsigned long old_covered_state; + unsigned long res = 0, flags; - if (list_empty(&dm_device.ha_region_list)) - return 0; - - list_for_each(cur, &dm_device.ha_region_list) { - has = list_entry(cur, struct hv_hotadd_state, list); + pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count, + pg_start); + spin_lock_irqsave(&dm_device.ha_lock, flags); + list_for_each_entry(has, &dm_device.ha_region_list, list) { /* * If the pfn range we are dealing with is not in the current * "hot add block", move on. */ - if ((start_pfn >= has->end_pfn)) + if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) continue; old_covered_state = has->covered_end_pfn; @@ -738,39 +866,50 @@ static unsigned long handle_pg_range(unsigned long pg_start, pgs_ol = has->ha_end_pfn - start_pfn; if (pgs_ol > pfn_cnt) pgs_ol = pfn_cnt; - hv_bring_pgs_online(start_pfn, pgs_ol); + has->covered_end_pfn += pgs_ol; - has->covered_start_pfn += pgs_ol; pfn_cnt -= pgs_ol; + /* + * Check if the corresponding memory block is already + * online. It is possible to observe struct pages still + * being uninitialized here so check section instead. + * In case the section is online we need to bring the + * rest of pfns (which were not backed previously) + * online too. + */ + if (start_pfn > has->start_pfn && + online_section_nr(pfn_to_section_nr(start_pfn))) + hv_bring_pgs_online(has, start_pfn, pgs_ol); } - if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) { + if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) { /* * We have some residual hot add range * that needs to be hot added; hot add * it now. Hot add a multiple of - * of HA_CHUNK that fully covers the pages + * ha_pages_in_chunk that fully covers the pages * we have. */ size = (has->end_pfn - has->ha_end_pfn); if (pfn_cnt <= size) { - size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK); - if (pfn_cnt % HA_CHUNK) - size += HA_CHUNK; + size = ALIGN(pfn_cnt, ha_pages_in_chunk); } else { pfn_cnt = size; } + spin_unlock_irqrestore(&dm_device.ha_lock, flags); hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has); + spin_lock_irqsave(&dm_device.ha_lock, flags); } /* * If we managed to online any pages that were given to us, * we declare success. */ - return has->covered_end_pfn - old_covered_state; - + res = has->covered_end_pfn - old_covered_state; + break; } + spin_unlock_irqrestore(&dm_device.ha_lock, flags); - return 0; + return res; } static unsigned long process_hot_add(unsigned long pg_start, @@ -779,13 +918,19 @@ static unsigned long process_hot_add(unsigned long pg_start, unsigned long rg_size) { struct hv_hotadd_state *ha_region = NULL; + int covered; if (pfn_cnt == 0) return 0; - if (!dm_device.host_specified_ha_region) - if (pfn_covered(pg_start, pfn_cnt)) + if (!dm_device.host_specified_ha_region) { + covered = pfn_covered(pg_start, pfn_cnt); + if (covered < 0) + return 0; + + if (covered) goto do_pg_range; + } /* * If the host has specified a hot-add range; deal with it first. @@ -797,13 +942,17 @@ static unsigned long process_hot_add(unsigned long pg_start, return 0; INIT_LIST_HEAD(&ha_region->list); + INIT_LIST_HEAD(&ha_region->gap_list); - list_add_tail(&ha_region->list, &dm_device.ha_region_list); ha_region->start_pfn = rg_start; ha_region->ha_end_pfn = rg_start; ha_region->covered_start_pfn = pg_start; ha_region->covered_end_pfn = pg_start; ha_region->end_pfn = rg_start + rg_size; + + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { + list_add_tail(&ha_region->list, &dm_device.ha_region_list); + } } do_pg_range: @@ -828,7 +977,6 @@ static void hot_add_req(struct work_struct *dummy) memset(&resp, 0, sizeof(struct dm_hot_add_response)); resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE; resp.hdr.size = sizeof(struct dm_hot_add_response); - resp.hdr.trans_id = atomic_inc_return(&trans_id); #ifdef CONFIG_MEMORY_HOTPLUG pg_start = dm->ha_wrk.ha_page_range.finfo.start_page; @@ -837,10 +985,7 @@ static void hot_add_req(struct work_struct *dummy) rg_start = dm->ha_wrk.ha_region_range.finfo.start_page; rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt; - if ((rg_start == 0) && (!dm->host_specified_ha_region)) { - unsigned long region_size; - unsigned long region_start; - + if (rg_start == 0 && !dm->host_specified_ha_region) { /* * The host has not specified the hot-add region. * Based on the hot-add page range being specified, @@ -848,20 +993,15 @@ static void hot_add_req(struct work_struct *dummy) * that need to be hot-added while ensuring the alignment * and size requirements of Linux as it relates to hot-add. */ - region_start = pg_start; - region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK; - if (pfn_cnt % HA_CHUNK) - region_size += HA_CHUNK; - - region_start = (pg_start / HA_CHUNK) * HA_CHUNK; - - rg_start = region_start; - rg_sz = region_size; + rg_start = ALIGN_DOWN(pg_start, ha_pages_in_chunk); + rg_sz = ALIGN(pfn_cnt, ha_pages_in_chunk); } if (do_hot_add) resp.page_count = process_hot_add(pg_start, pfn_cnt, - rg_start, rg_sz); + rg_start, rg_sz); + + dm->num_pages_added += resp.page_count; #endif /* * The result field of the response structure has the @@ -886,10 +1026,15 @@ static void hot_add_req(struct work_struct *dummy) else resp.result = 0; - if (!do_hot_add || (resp.page_count == 0)) - pr_info("Memory hot add failed\n"); + if (!do_hot_add || resp.page_count == 0) { + if (!allow_hibernation) + pr_err("Memory hot add failed\n"); + else + pr_info("Ignore hot-add request!\n"); + } dm->state = DM_INITIALIZED; + resp.hdr.trans_id = atomic_inc_return(&trans_id); vmbus_sendpacket(dm->dev->channel, &resp, sizeof(struct dm_hot_add_response), (unsigned long)NULL, @@ -904,17 +1049,24 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) switch (info_hdr->type) { case INFO_TYPE_MAX_PAGE_CNT: - pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n"); - pr_info("Data Size is %d\n", info_hdr->data_size); + if (info_hdr->data_size == sizeof(__u64)) { + __u64 *max_page_count = (__u64 *)&info_hdr[1]; + + pr_info("Max. dynamic memory size: %llu MB\n", + (*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT)); + dm->max_dynamic_page_count = *max_page_count; + } + break; default: - pr_info("Received Unknown type: %d\n", info_hdr->type); + pr_warn("Received Unknown type: %d\n", info_hdr->type); } } static unsigned long compute_balloon_floor(void) { unsigned long min_pages; + unsigned long nr_pages = totalram_pages(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient @@ -924,23 +1076,37 @@ static unsigned long compute_balloon_floor(void) * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) - * 8192 552 (1/32) - * 32768 1320 - * 131072 4392 + * 8192 744 (1/16) + * 32768 1512 (1/32) */ - if (totalram_pages < MB2PAGES(128)) - min_pages = MB2PAGES(8) + (totalram_pages >> 1); - else if (totalram_pages < MB2PAGES(512)) - min_pages = MB2PAGES(40) + (totalram_pages >> 2); - else if (totalram_pages < MB2PAGES(2048)) - min_pages = MB2PAGES(104) + (totalram_pages >> 3); + if (nr_pages < MB2PAGES(128)) + min_pages = MB2PAGES(8) + (nr_pages >> 1); + else if (nr_pages < MB2PAGES(512)) + min_pages = MB2PAGES(40) + (nr_pages >> 2); + else if (nr_pages < MB2PAGES(2048)) + min_pages = MB2PAGES(104) + (nr_pages >> 3); + else if (nr_pages < MB2PAGES(8192)) + min_pages = MB2PAGES(232) + (nr_pages >> 4); else - min_pages = MB2PAGES(296) + (totalram_pages >> 5); + min_pages = MB2PAGES(488) + (nr_pages >> 5); #undef MB2PAGES return min_pages; } /* + * Compute total committed memory pages + */ + +static unsigned long get_pages_committed(struct hv_dynmem_device *dm) +{ + return vm_memory_committed() + + dm->num_pages_ballooned + + (dm->num_pages_added > dm->num_pages_onlined ? + dm->num_pages_added - dm->num_pages_onlined : 0) + + compute_balloon_floor(); +} + +/* * Post our status as it relates memory pressure to the * host. Host expects the guests to post this status * periodically at 1 second intervals. @@ -953,40 +1119,68 @@ static unsigned long compute_balloon_floor(void) static void post_status(struct hv_dynmem_device *dm) { struct dm_status status; - struct sysinfo val; + unsigned long now = jiffies; + unsigned long last_post = last_post_time; + unsigned long num_pages_avail, num_pages_committed; if (pressure_report_delay > 0) { --pressure_report_delay; return; } - si_meminfo(&val); + + if (!time_after(now, (last_post_time + HZ))) + return; + memset(&status, 0, sizeof(struct dm_status)); status.hdr.type = DM_STATUS_REPORT; status.hdr.size = sizeof(struct dm_status); status.hdr.trans_id = atomic_inc_return(&trans_id); /* - * The host expects the guest to report free memory. - * Further, the host expects the pressure information to - * include the ballooned out pages. - * For a given amount of memory that we are managing, we - * need to compute a floor below which we should not balloon. - * Compute this and add it to the pressure report. + * The host expects the guest to report free and committed memory. + * Furthermore, the host expects the pressure information to include + * the ballooned out pages. For a given amount of memory that we are + * managing we need to compute a floor below which we should not + * balloon. Compute this and add it to the pressure report. + * We also need to report all offline pages (num_pages_added - + * num_pages_onlined) as committed to the host, otherwise it can try + * asking us to balloon them out. */ - status.num_avail = val.freeram; - status.num_committed = vm_memory_committed() + - dm->num_pages_ballooned + - compute_balloon_floor(); + num_pages_avail = si_mem_available(); + num_pages_committed = get_pages_committed(dm); + + trace_balloon_status(num_pages_avail, num_pages_committed, + vm_memory_committed(), dm->num_pages_ballooned, + dm->num_pages_added, dm->num_pages_onlined); + + /* Convert numbers of pages into numbers of HV_HYP_PAGEs. */ + status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE; + status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE; + /* + * If our transaction ID is no longer current, just don't + * send the status. This can happen if we were interrupted + * after we picked our transaction ID. + */ + if (status.hdr.trans_id != atomic_read(&trans_id)) + return; + + /* + * If the last post time that we sampled has changed, + * we have raced, don't post the status. + */ + if (last_post != last_post_time) + return; + + last_post_time = jiffies; vmbus_sendpacket(dm->dev->channel, &status, sizeof(struct dm_status), (unsigned long)NULL, VM_PKT_DATA_INBAND, 0); - } static void free_balloon_pages(struct hv_dynmem_device *dm, - union dm_mem_page_range *range_array) + union dm_mem_page_range *range_array) { int num_pages = range_array->finfo.page_cnt; __u64 start_frame = range_array->finfo.start_page; @@ -995,26 +1189,25 @@ static void free_balloon_pages(struct hv_dynmem_device *dm, for (i = 0; i < num_pages; i++) { pg = pfn_to_page(i + start_frame); + __ClearPageOffline(pg); __free_page(pg); dm->num_pages_ballooned--; + mod_node_page_state(page_pgdat(pg), NR_BALLOON_PAGES, -1); + adjust_managed_page_count(pg, 1); } } - - -static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages, - struct dm_balloon_response *bl_resp, int alloc_unit, - bool *alloc_error) +static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, + unsigned int num_pages, + struct dm_balloon_response *bl_resp, + int alloc_unit) { - int i = 0; + unsigned int i, j; struct page *pg; - if (num_pages < alloc_unit) - return 0; - - for (i = 0; (i * alloc_unit) < num_pages; i++) { + for (i = 0; i < num_pages / alloc_unit; i++) { if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) > - PAGE_SIZE) + HV_HYP_PAGE_SIZE) return i * alloc_unit; /* @@ -1025,13 +1218,11 @@ static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages, __GFP_NOMEMALLOC | __GFP_NOWARN, get_order(alloc_unit << PAGE_SHIFT)); - if (!pg) { - *alloc_error = true; + if (!pg) return i * alloc_unit; - } - dm->num_pages_ballooned += alloc_unit; + mod_node_page_state(page_pgdat(pg), NR_BALLOON_PAGES, alloc_unit); /* * If we allocatted 2M pages; split them so we @@ -1041,57 +1232,72 @@ static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages, if (alloc_unit != 1) split_page(pg, get_order(alloc_unit << PAGE_SHIFT)); + /* mark all pages offline */ + for (j = 0; j < alloc_unit; j++) { + __SetPageOffline(pg + j); + adjust_managed_page_count(pg + j, -1); + } + bl_resp->range_count++; bl_resp->range_array[i].finfo.start_page = page_to_pfn(pg); bl_resp->range_array[i].finfo.page_cnt = alloc_unit; bl_resp->hdr.size += sizeof(union dm_mem_page_range); - } - return num_pages; + return i * alloc_unit; } - - static void balloon_up(struct work_struct *dummy) { - int num_pages = dm_device.balloon_wrk.num_pages; - int num_ballooned = 0; + unsigned int num_pages = dm_device.balloon_wrk.num_pages; + unsigned int num_ballooned = 0; struct dm_balloon_response *bl_resp; int alloc_unit; int ret; - bool alloc_error = false; bool done = false; int i; - + long avail_pages; + unsigned long floor; /* * We will attempt 2M allocations. However, if we fail to - * allocate 2M chunks, we will go back to 4k allocations. + * allocate 2M chunks, we will go back to PAGE_SIZE allocations. */ - alloc_unit = 512; + alloc_unit = PAGES_IN_2M; + + avail_pages = si_mem_available(); + floor = compute_balloon_floor(); + + /* Refuse to balloon below the floor. */ + if (avail_pages < num_pages || avail_pages - num_pages < floor) { + pr_info("Balloon request will be partially fulfilled. %s\n", + avail_pages < num_pages ? "Not enough memory." : + "Balloon floor reached."); + + num_pages = avail_pages > floor ? (avail_pages - floor) : 0; + } while (!done) { - bl_resp = (struct dm_balloon_response *)send_buffer; - memset(send_buffer, 0, PAGE_SIZE); + memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE); + bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer; bl_resp->hdr.type = DM_BALLOON_RESPONSE; - bl_resp->hdr.trans_id = atomic_inc_return(&trans_id); bl_resp->hdr.size = sizeof(struct dm_balloon_response); bl_resp->more_pages = 1; - num_pages -= num_ballooned; num_ballooned = alloc_balloon_pages(&dm_device, num_pages, - bl_resp, alloc_unit, - &alloc_error); + bl_resp, alloc_unit); - if ((alloc_error) && (alloc_unit != 1)) { + if (alloc_unit != 1 && num_ballooned == 0) { alloc_unit = 1; continue; } - if ((alloc_error) || (num_ballooned == num_pages)) { + if (num_ballooned == 0 || num_ballooned == num_pages) { + pr_debug("Ballooned %u out of %u requested pages.\n", + num_pages, dm_device.balloon_wrk.num_pages); + bl_resp->more_pages = 0; done = true; dm_device.state = DM_INITIALIZED; @@ -1104,6 +1310,7 @@ static void balloon_up(struct work_struct *dummy) */ do { + bl_resp->hdr.trans_id = atomic_inc_return(&trans_id); ret = vmbus_sendpacket(dm_device.dev->channel, bl_resp, bl_resp->hdr.size, @@ -1112,35 +1319,40 @@ static void balloon_up(struct work_struct *dummy) if (ret == -EAGAIN) msleep(20); - + post_status(&dm_device); } while (ret == -EAGAIN); if (ret) { /* * Free up the memory we allocatted. */ - pr_info("Balloon response failed\n"); + pr_err("Balloon response failed\n"); for (i = 0; i < bl_resp->range_count; i++) free_balloon_pages(&dm_device, - &bl_resp->range_array[i]); + &bl_resp->range_array[i]); done = true; } } - } static void balloon_down(struct hv_dynmem_device *dm, - struct dm_unballoon_request *req) + struct dm_unballoon_request *req) { union dm_mem_page_range *range_array = req->range_array; int range_count = req->range_count; struct dm_unballoon_response resp; int i; + unsigned int prev_pages_ballooned = dm->num_pages_ballooned; - for (i = 0; i < range_count; i++) + for (i = 0; i < range_count; i++) { free_balloon_pages(dm, &range_array[i]); + complete(&dm_device.config_event); + } + + pr_debug("Freed %u ballooned pages.\n", + prev_pages_ballooned - dm->num_pages_ballooned); if (req->more_pages == 1) return; @@ -1163,26 +1375,35 @@ static void balloon_onchannelcallback(void *context); static int dm_thread_func(void *dm_dev) { struct hv_dynmem_device *dm = dm_dev; - int t; while (!kthread_should_stop()) { - t = wait_for_completion_timeout(&dm_device.config_event, 1*HZ); + wait_for_completion_interruptible_timeout(&dm_device.config_event, + secs_to_jiffies(1)); /* * The host expects us to post information on the memory * pressure every second. */ - - if (t == 0) - post_status(dm); - + reinit_completion(&dm_device.config_event); + post_status(dm); + /* + * disable free page reporting if multiple hypercall + * failure flag set. It is not done in the page_reporting + * callback context as that causes a deadlock between + * page_reporting_process() and page_reporting_unregister() + */ + if (hv_hypercall_multi_failure >= HV_MAX_FAILURES) { + pr_err("Multiple failures in cold memory discard hypercall, disabling page reporting\n"); + disable_page_reporting(); + /* Reset the flag after disabling reporting */ + hv_hypercall_multi_failure = 0; + } } return 0; } - static void version_resp(struct hv_dynmem_device *dm, - struct dm_version_response *vresp) + struct dm_version_response *vresp) { struct dm_version_request version_req; int ret; @@ -1206,13 +1427,26 @@ static void version_resp(struct hv_dynmem_device *dm, if (dm->next_version == 0) goto version_error; - dm->next_version = 0; memset(&version_req, 0, sizeof(struct dm_version_request)); version_req.hdr.type = DM_VERSION_REQUEST; version_req.hdr.size = sizeof(struct dm_version_request); version_req.hdr.trans_id = atomic_inc_return(&trans_id); - version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7; - version_req.is_last_attempt = 1; + version_req.version.version = dm->next_version; + dm->version = version_req.version.version; + + /* + * Set the next version to try in case current version fails. + * Win7 protocol ought to be the last one to try. + */ + switch (version_req.version.version) { + case DYNMEM_PROTOCOL_VERSION_WIN8: + dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7; + version_req.is_last_attempt = 0; + break; + default: + dm->next_version = 0; + version_req.is_last_attempt = 1; + } ret = vmbus_sendpacket(dm->dev->channel, &version_req, sizeof(struct dm_version_request), @@ -1230,10 +1464,10 @@ version_error: } static void cap_resp(struct hv_dynmem_device *dm, - struct dm_capabilities_resp_msg *cap_resp) + struct dm_capabilities_resp_msg *cap_resp) { if (!cap_resp->is_accepted) { - pr_info("Capabilities not accepted by host\n"); + pr_err("Capabilities not accepted by host\n"); dm->state = DM_INIT_ERROR; } complete(&dm->host_event); @@ -1254,7 +1488,7 @@ static void balloon_onchannelcallback(void *context) memset(recv_buffer, 0, sizeof(recv_buffer)); vmbus_recvpacket(dev->channel, recv_buffer, - PAGE_SIZE, &recvlen, &requestid); + HV_HYP_PAGE_SIZE, &recvlen, &requestid); if (recvlen > 0) { dm_msg = (struct dm_message *)recv_buffer; @@ -1263,7 +1497,7 @@ static void balloon_onchannelcallback(void *context) switch (dm_hdr->type) { case DM_VERSION_RESPONSE: version_resp(dm, - (struct dm_version_response *)dm_msg); + (struct dm_version_response *)dm_msg); break; case DM_CAPABILITIES_RESPONSE: @@ -1272,6 +1506,11 @@ static void balloon_onchannelcallback(void *context) break; case DM_BALLOON_REQUEST: + if (allow_hibernation) { + pr_info("Ignore balloon-up request!\n"); + break; + } + if (dm->state == DM_BALLOON_UP) pr_warn("Currently ballooning\n"); bal_msg = (struct dm_balloon *)recv_buffer; @@ -1281,9 +1520,14 @@ static void balloon_onchannelcallback(void *context) break; case DM_UNBALLOON_REQUEST: + if (allow_hibernation) { + pr_info("Ignore balloon-down request!\n"); + break; + } + dm->state = DM_BALLOON_DOWN; balloon_down(dm, - (struct dm_unballoon_request *)recv_buffer); + (struct dm_unballoon_request *)recv_buffer); break; case DM_MEM_HOT_ADD_REQUEST: @@ -1296,6 +1540,7 @@ static void balloon_onchannelcallback(void *context) * This is a normal hot-add request specifying * hot-add memory. */ + dm->host_specified_ha_region = false; ha_pg_range = &ha_msg->range; dm->ha_wrk.ha_page_range = *ha_pg_range; dm->ha_wrk.ha_region_range.page_range = 0; @@ -1319,58 +1564,175 @@ static void balloon_onchannelcallback(void *context) break; default: - pr_err("Unhandled message: type: %d\n", dm_hdr->type); + pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type); + } + } +} + +#define HV_LARGE_REPORTING_ORDER 9 +#define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \ + HV_LARGE_REPORTING_ORDER) +static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info, + struct scatterlist *sgl, unsigned int nents) +{ + unsigned long flags; + struct hv_memory_hint *hint; + int i, order; + u64 status; + struct scatterlist *sg; + + WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES); + WARN_ON_ONCE(sgl->length < (HV_HYP_PAGE_SIZE << page_reporting_order)); + local_irq_save(flags); + hint = *this_cpu_ptr(hyperv_pcpu_input_arg); + if (!hint) { + local_irq_restore(flags); + return -ENOSPC; + } + + hint->heat_type = HV_EXTMEM_HEAT_HINT_COLD_DISCARD; + hint->reserved = 0; + for_each_sg(sgl, sg, nents, i) { + union hv_gpa_page_range *range; + + range = &hint->ranges[i]; + range->address_space = 0; + order = get_order(sg->length); + /* + * Hyper-V expects the additional_pages field in the units + * of one of these 3 sizes, 4Kbytes, 2Mbytes or 1Gbytes. + * This is dictated by the values of the fields page.largesize + * and page_size. + * This code however, only uses 4Kbytes and 2Mbytes units + * and not 1Gbytes unit. + */ + /* page reporting for pages 2MB or higher */ + if (order >= HV_LARGE_REPORTING_ORDER) { + range->page.largepage = 1; + range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB; + range->base_large_pfn = page_to_hvpfn( + sg_page(sg)) >> HV_LARGE_REPORTING_ORDER; + range->page.additional_pages = + (sg->length / HV_LARGE_REPORTING_LEN) - 1; + } else { + /* Page reporting for pages below 2MB */ + range->page.basepfn = page_to_hvpfn(sg_page(sg)); + range->page.largepage = false; + range->page.additional_pages = + (sg->length / HV_HYP_PAGE_SIZE) - 1; } } + status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0, + hint, NULL); + local_irq_restore(flags); + if (!hv_result_success(status)) { + pr_err("Cold memory discard hypercall failed with status %llx\n", + status); + if (hv_hypercall_multi_failure > 0) + hv_hypercall_multi_failure++; + + if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) { + pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n"); + pr_err("Defaulting to page_reporting_order %d\n", + pageblock_order); + page_reporting_order = pageblock_order; + hv_hypercall_multi_failure++; + return -EINVAL; + } + + return -EINVAL; + } + + return 0; } -static int balloon_probe(struct hv_device *dev, - const struct hv_vmbus_device_id *dev_id) +static void enable_page_reporting(void) { - int ret, t; - struct dm_version_request version_req; - struct dm_capabilities cap_msg; + int ret; - do_hot_add = hot_add; + if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) { + pr_debug("Cold memory discard hint not supported by Hyper-V\n"); + return; + } + BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES); + dm_device.pr_dev_info.report = hv_free_page_report; /* - * First allocate a send buffer. + * We let the page_reporting_order parameter decide the order + * in the page_reporting code */ + dm_device.pr_dev_info.order = 0; + ret = page_reporting_register(&dm_device.pr_dev_info); + if (ret < 0) { + dm_device.pr_dev_info.report = NULL; + pr_err("Failed to enable cold memory discard: %d\n", ret); + } else { + pr_info("Cold memory discard hint enabled with order %d\n", + page_reporting_order); + } +} - send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!send_buffer) - return -ENOMEM; - - ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0, - balloon_onchannelcallback, dev); +static void disable_page_reporting(void) +{ + if (dm_device.pr_dev_info.report) { + page_reporting_unregister(&dm_device.pr_dev_info); + dm_device.pr_dev_info.report = NULL; + } +} - if (ret) - goto probe_error0; +static int ballooning_enabled(void) +{ + /* + * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE), + * since currently it's unclear to us whether an unballoon request can + * make sure all page ranges are guest page size aligned. + */ + if (PAGE_SIZE != HV_HYP_PAGE_SIZE) { + pr_info("Ballooning disabled because page size is not 4096 bytes\n"); + return 0; + } - dm_device.dev = dev; - dm_device.state = DM_INITIALIZING; - dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7; - init_completion(&dm_device.host_event); - init_completion(&dm_device.config_event); - INIT_LIST_HEAD(&dm_device.ha_region_list); - INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up); - INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req); - dm_device.host_specified_ha_region = false; + return 1; +} - dm_device.thread = - kthread_run(dm_thread_func, &dm_device, "hv_balloon"); - if (IS_ERR(dm_device.thread)) { - ret = PTR_ERR(dm_device.thread); - goto probe_error1; +static int hot_add_enabled(void) +{ + /* + * Disable hot add on ARM64, because we currently rely on + * memory_add_physaddr_to_nid() to get a node id of a hot add range, + * however ARM64's memory_add_physaddr_to_nid() always return 0 and + * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for + * add_memory(). + */ + if (IS_ENABLED(CONFIG_ARM64)) { + pr_info("Memory hot add disabled on ARM64\n"); + return 0; } -#ifdef CONFIG_MEMORY_HOTPLUG - set_online_page_callback(&hv_online_page); -#endif + return 1; +} + +static int balloon_connect_vsp(struct hv_device *dev) +{ + struct dm_version_request version_req; + struct dm_capabilities cap_msg; + unsigned long t; + int ret; + + /* + * max_pkt_size should be large enough for one vmbus packet header plus + * our receive buffer size. Hyper-V sends messages up to + * HV_HYP_PAGE_SIZE bytes long on balloon channel. + */ + dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2; + + ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0, + balloon_onchannelcallback, dev); + if (ret) + return ret; - hv_set_drvdata(dev, &dm_device); /* * Initiate the hand shake with the host and negotiate * a version that the host can support. We start with the @@ -1381,20 +1743,20 @@ static int balloon_probe(struct hv_device *dev, version_req.hdr.type = DM_VERSION_REQUEST; version_req.hdr.size = sizeof(struct dm_version_request); version_req.hdr.trans_id = atomic_inc_return(&trans_id); - version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8; + version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10; version_req.is_last_attempt = 0; + dm_device.version = version_req.version.version; ret = vmbus_sendpacket(dev->channel, &version_req, - sizeof(struct dm_version_request), - (unsigned long)NULL, - VM_PKT_DATA_INBAND, 0); + sizeof(struct dm_version_request), + (unsigned long)NULL, VM_PKT_DATA_INBAND, 0); if (ret) - goto probe_error2; + goto out; - t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); + t = wait_for_completion_timeout(&dm_device.host_event, secs_to_jiffies(5)); if (t == 0) { ret = -ETIMEDOUT; - goto probe_error2; + goto out; } /* @@ -1402,9 +1764,14 @@ static int balloon_probe(struct hv_device *dev, * fail the probe function. */ if (dm_device.state == DM_INIT_ERROR) { - ret = -ETIMEDOUT; - goto probe_error2; + ret = -EPROTO; + goto out; } + + pr_info("Using Dynamic Memory protocol version %u.%u\n", + DYNMEM_MAJOR_VERSION(dm_device.version), + DYNMEM_MINOR_VERSION(dm_device.version)); + /* * Now submit our capabilities to the host. */ @@ -1413,14 +1780,22 @@ static int balloon_probe(struct hv_device *dev, cap_msg.hdr.size = sizeof(struct dm_capabilities); cap_msg.hdr.trans_id = atomic_inc_return(&trans_id); - cap_msg.caps.cap_bits.balloon = 1; - cap_msg.caps.cap_bits.hot_add = 1; + /* + * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host + * currently still requires the bits to be set, so we have to add code + * to fail the host's hot-add and balloon up/down requests, if any. + */ + cap_msg.caps.cap_bits.balloon = ballooning_enabled(); + cap_msg.caps.cap_bits.hot_add = hot_add_enabled(); /* - * Specify our alignment requirements as it relates - * memory hot-add. Specify 128MB alignment. + * Specify our alignment requirements for memory hot-add. The value is + * the log base 2 of the number of megabytes in a chunk. For example, + * with 256 MiB chunks, the value is 8. The number of MiB in a chunk + * must be a power of 2. */ - cap_msg.caps.cap_bits.hot_add_alignment = 7; + cap_msg.caps.cap_bits.hot_add_alignment = + ilog2(HA_BYTES_IN_CHUNK / SZ_1M); /* * Currently the host does not use these @@ -1431,16 +1806,15 @@ static int balloon_probe(struct hv_device *dev, cap_msg.max_page_number = -1; ret = vmbus_sendpacket(dev->channel, &cap_msg, - sizeof(struct dm_capabilities), - (unsigned long)NULL, - VM_PKT_DATA_INBAND, 0); + sizeof(struct dm_capabilities), + (unsigned long)NULL, VM_PKT_DATA_INBAND, 0); if (ret) - goto probe_error2; + goto out; - t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); + t = wait_for_completion_timeout(&dm_device.host_event, secs_to_jiffies(5)); if (t == 0) { ret = -ETIMEDOUT; - goto probe_error2; + goto out; } /* @@ -1448,52 +1822,290 @@ static int balloon_probe(struct hv_device *dev, * fail the probe function. */ if (dm_device.state == DM_INIT_ERROR) { - ret = -ETIMEDOUT; - goto probe_error2; + ret = -EPROTO; + goto out; } - dm_device.state = DM_INITIALIZED; + return 0; +out: + vmbus_close(dev->channel); + return ret; +} + +/* + * DEBUGFS Interface + */ +#ifdef CONFIG_DEBUG_FS + +/** + * hv_balloon_debug_show - shows statistics of balloon operations. + * @f: pointer to the &struct seq_file. + * @offset: ignored. + * + * Provides the statistics that can be accessed in hv-balloon in the debugfs. + * + * Return: zero on success or an error code. + */ +static int hv_balloon_debug_show(struct seq_file *f, void *offset) +{ + struct hv_dynmem_device *dm = f->private; + char *sname; + + seq_printf(f, "%-22s: %u.%u\n", "host_version", + DYNMEM_MAJOR_VERSION(dm->version), + DYNMEM_MINOR_VERSION(dm->version)); + + seq_printf(f, "%-22s:", "capabilities"); + if (ballooning_enabled()) + seq_puts(f, " enabled"); + + if (hot_add_enabled()) + seq_puts(f, " hot_add"); + + seq_puts(f, "\n"); + + seq_printf(f, "%-22s: %u", "state", dm->state); + switch (dm->state) { + case DM_INITIALIZING: + sname = "Initializing"; + break; + case DM_INITIALIZED: + sname = "Initialized"; + break; + case DM_BALLOON_UP: + sname = "Balloon Up"; + break; + case DM_BALLOON_DOWN: + sname = "Balloon Down"; + break; + case DM_HOT_ADD: + sname = "Hot Add"; + break; + case DM_INIT_ERROR: + sname = "Error"; + break; + default: + sname = "Unknown"; + } + seq_printf(f, " (%s)\n", sname); + + /* HV Page Size */ + seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE); + + /* Pages added with hot_add */ + seq_printf(f, "%-22s: %u\n", "pages_added", dm->num_pages_added); + + /* pages that are "onlined"/used from pages_added */ + seq_printf(f, "%-22s: %u\n", "pages_onlined", dm->num_pages_onlined); + + /* pages we have given back to host */ + seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned); + + seq_printf(f, "%-22s: %lu\n", "total_pages_committed", + get_pages_committed(dm)); + + seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count", + dm->max_dynamic_page_count); return 0; +} + +DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug); + +static void hv_balloon_debugfs_init(struct hv_dynmem_device *b) +{ + debugfs_create_file("hv-balloon", 0444, NULL, b, + &hv_balloon_debug_fops); +} + +static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b) +{ + debugfs_lookup_and_remove("hv-balloon", NULL); +} + +#else + +static inline void hv_balloon_debugfs_init(struct hv_dynmem_device *b) +{ +} + +static inline void hv_balloon_debugfs_exit(struct hv_dynmem_device *b) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +static int balloon_probe(struct hv_device *dev, + const struct hv_vmbus_device_id *dev_id) +{ + int ret; + + allow_hibernation = hv_is_hibernation_supported(); + if (allow_hibernation) + hot_add = false; -probe_error2: #ifdef CONFIG_MEMORY_HOTPLUG - restore_online_page_callback(&hv_online_page); + /* + * Hot-add must operate in chunks that are of size equal to the + * memory block size because that's what the core add_memory() + * interface requires. The Hyper-V interface requires that the memory + * block size be a power of 2, which is guaranteed by the check in + * memory_dev_init(). + */ + ha_pages_in_chunk = memory_block_size_bytes() / PAGE_SIZE; + do_hot_add = hot_add; +#else + /* + * Without MEMORY_HOTPLUG, the guest returns a failure status for all + * hot add requests from Hyper-V, and the chunk size is used only to + * specify alignment to Hyper-V as required by the host/guest protocol. + * Somewhat arbitrarily, use 128 MiB. + */ + ha_pages_in_chunk = SZ_128M / PAGE_SIZE; + do_hot_add = false; +#endif + dm_device.dev = dev; + dm_device.state = DM_INITIALIZING; + dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8; + init_completion(&dm_device.host_event); + init_completion(&dm_device.config_event); + INIT_LIST_HEAD(&dm_device.ha_region_list); + spin_lock_init(&dm_device.ha_lock); + INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up); + INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req); + dm_device.host_specified_ha_region = false; + +#ifdef CONFIG_MEMORY_HOTPLUG + set_online_page_callback(&hv_online_page); + init_completion(&dm_device.ol_waitevent); + register_memory_notifier(&hv_memory_nb); #endif - kthread_stop(dm_device.thread); -probe_error1: + hv_set_drvdata(dev, &dm_device); + + ret = balloon_connect_vsp(dev); + if (ret != 0) + goto connect_error; + + enable_page_reporting(); + dm_device.state = DM_INITIALIZED; + + dm_device.thread = + kthread_run(dm_thread_func, &dm_device, "hv_balloon"); + if (IS_ERR(dm_device.thread)) { + ret = PTR_ERR(dm_device.thread); + goto probe_error; + } + + hv_balloon_debugfs_init(&dm_device); + + return 0; + +probe_error: + dm_device.state = DM_INIT_ERROR; + dm_device.thread = NULL; + disable_page_reporting(); vmbus_close(dev->channel); -probe_error0: - kfree(send_buffer); +connect_error: +#ifdef CONFIG_MEMORY_HOTPLUG + unregister_memory_notifier(&hv_memory_nb); + restore_online_page_callback(&hv_online_page); +#endif return ret; } -static int balloon_remove(struct hv_device *dev) +static void balloon_remove(struct hv_device *dev) { struct hv_dynmem_device *dm = hv_get_drvdata(dev); - struct list_head *cur, *tmp; - struct hv_hotadd_state *has; + struct hv_hotadd_state *has, *tmp; + struct hv_hotadd_gap *gap, *tmp_gap; if (dm->num_pages_ballooned != 0) pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned); + hv_balloon_debugfs_exit(dm); + cancel_work_sync(&dm->balloon_wrk.wrk); cancel_work_sync(&dm->ha_wrk.wrk); - vmbus_close(dev->channel); kthread_stop(dm->thread); - kfree(send_buffer); + + /* + * This is to handle the case when balloon_resume() + * call has failed and some cleanup has been done as + * a part of the error handling. + */ + if (dm_device.state != DM_INIT_ERROR) { + disable_page_reporting(); + vmbus_close(dev->channel); #ifdef CONFIG_MEMORY_HOTPLUG - restore_online_page_callback(&hv_online_page); + unregister_memory_notifier(&hv_memory_nb); + restore_online_page_callback(&hv_online_page); #endif - list_for_each_safe(cur, tmp, &dm->ha_region_list) { - has = list_entry(cur, struct hv_hotadd_state, list); + } + + guard(spinlock_irqsave)(&dm_device.ha_lock); + list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) { + list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) { + list_del(&gap->list); + kfree(gap); + } list_del(&has->list); kfree(has); } +} + +static int balloon_suspend(struct hv_device *hv_dev) +{ + struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev); + + tasklet_disable(&hv_dev->channel->callback_event); + + cancel_work_sync(&dm->balloon_wrk.wrk); + cancel_work_sync(&dm->ha_wrk.wrk); + + if (dm->thread) { + kthread_stop(dm->thread); + dm->thread = NULL; + vmbus_close(hv_dev->channel); + } + + tasklet_enable(&hv_dev->channel->callback_event); + + return 0; +} + +static int balloon_resume(struct hv_device *dev) +{ + int ret; + + dm_device.state = DM_INITIALIZING; + + ret = balloon_connect_vsp(dev); + + if (ret != 0) + goto out; + + dm_device.thread = + kthread_run(dm_thread_func, &dm_device, "hv_balloon"); + if (IS_ERR(dm_device.thread)) { + ret = PTR_ERR(dm_device.thread); + dm_device.thread = NULL; + goto close_channel; + } + dm_device.state = DM_INITIALIZED; return 0; +close_channel: + vmbus_close(dev->channel); +out: + dm_device.state = DM_INIT_ERROR; + disable_page_reporting(); +#ifdef CONFIG_MEMORY_HOTPLUG + unregister_memory_notifier(&hv_memory_nb); + restore_online_page_callback(&hv_online_page); +#endif + return ret; } static const struct hv_vmbus_device_id id_table[] = { @@ -1510,16 +2122,19 @@ static struct hv_driver balloon_drv = { .id_table = id_table, .probe = balloon_probe, .remove = balloon_remove, + .suspend = balloon_suspend, + .resume = balloon_resume, + .driver = { + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, }; static int __init init_balloon_drv(void) { - return vmbus_driver_register(&balloon_drv); } module_init(init_balloon_drv); MODULE_DESCRIPTION("Hyper-V Balloon"); -MODULE_VERSION(HV_DRV_VERSION); MODULE_LICENSE("GPL"); |
