summaryrefslogtreecommitdiff
path: root/include/linux/bootmem_info.h
blob: e1a3c9c9754c5176fe8515e1112fd7e35dde4066 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BOOTMEM_INFO_H
#define __LINUX_BOOTMEM_INFO_H

#include <linux/mm.h>
#include <linux/kmemleak.h>

/*
 * Types for free bootmem stored in page->lru.next. These have to be in
 * some random range in unsigned long space for debugging purposes.
 */
enum {
	MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
	SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
	MIX_SECTION_INFO,
	NODE_INFO,
	MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
};

#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
void __init register_page_bootmem_info_node(struct pglist_data *pgdat);

void get_page_bootmem(unsigned long info, struct page *page,
		      unsigned long type);
void put_page_bootmem(struct page *page);

/*
 * Any memory allocated via the memblock allocator and not via the
 * buddy will be marked reserved already in the memmap. For those
 * pages, we can call this function to free it to buddy allocator.
 */
static inline void free_bootmem_page(struct page *page)
{
	unsigned long magic = page->index;

	/*
	 * The reserve_bootmem_region sets the reserved flag on bootmem
	 * pages.
	 */
	VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);

	if (magic == SECTION_INFO || magic == MIX_SECTION_INFO)
		put_page_bootmem(page);
	else
		VM_BUG_ON_PAGE(1, page);
}
#else
static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
}

static inline void put_page_bootmem(struct page *page)
{
}

static inline void get_page_bootmem(unsigned long info, struct page *page,
				    unsigned long type)
{
}

static inline void free_bootmem_page(struct page *page)
{
	kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
	free_reserved_page(page);
}
#endif

#endif /* __LINUX_BOOTMEM_INFO_H */