summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/vm/hugepage-vmemmap.c
blob: 557bdbd4f87e8684530abee51a633e0470f50829 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
// SPDX-License-Identifier: GPL-2.0
/*
 * A test case of using hugepage memory in a user application using the
 * mmap system call with MAP_HUGETLB flag.  Before running this program
 * make sure the administrator has allocated enough default sized huge
 * pages to cover the 2 MB allocation.
 */
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>

#define MAP_LENGTH		(2UL * 1024 * 1024)

#ifndef MAP_HUGETLB
#define MAP_HUGETLB		0x40000	/* arch specific */
#endif

#define PAGE_SIZE		4096

#define PAGE_COMPOUND_HEAD	(1UL << 15)
#define PAGE_COMPOUND_TAIL	(1UL << 16)
#define PAGE_HUGE		(1UL << 17)

#define HEAD_PAGE_FLAGS		(PAGE_COMPOUND_HEAD | PAGE_HUGE)
#define TAIL_PAGE_FLAGS		(PAGE_COMPOUND_TAIL | PAGE_HUGE)

#define PM_PFRAME_BITS		55
#define PM_PFRAME_MASK		~((1UL << PM_PFRAME_BITS) - 1)

/*
 * For ia64 architecture, Linux kernel reserves Region number 4 for hugepages.
 * That means the addresses starting with 0x800000... will need to be
 * specified.  Specifying a fixed address is not required on ppc64, i386
 * or x86_64.
 */
#ifdef __ia64__
#define MAP_ADDR		(void *)(0x8000000000000000UL)
#define MAP_FLAGS		(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED)
#else
#define MAP_ADDR		NULL
#define MAP_FLAGS		(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
#endif

static void write_bytes(char *addr, size_t length)
{
	unsigned long i;

	for (i = 0; i < length; i++)
		*(addr + i) = (char)i;
}

static unsigned long virt_to_pfn(void *addr)
{
	int fd;
	unsigned long pagemap;

	fd = open("/proc/self/pagemap", O_RDONLY);
	if (fd < 0)
		return -1UL;

	lseek(fd, (unsigned long)addr / PAGE_SIZE * sizeof(pagemap), SEEK_SET);
	read(fd, &pagemap, sizeof(pagemap));
	close(fd);

	return pagemap & ~PM_PFRAME_MASK;
}

static int check_page_flags(unsigned long pfn)
{
	int fd, i;
	unsigned long pageflags;

	fd = open("/proc/kpageflags", O_RDONLY);
	if (fd < 0)
		return -1;

	lseek(fd, pfn * sizeof(pageflags), SEEK_SET);

	read(fd, &pageflags, sizeof(pageflags));
	if ((pageflags & HEAD_PAGE_FLAGS) != HEAD_PAGE_FLAGS) {
		close(fd);
		printf("Head page flags (%lx) is invalid\n", pageflags);
		return -1;
	}

	/*
	 * pages other than the first page must be tail and shouldn't be head;
	 * this also verifies kernel has correctly set the fake page_head to tail
	 * while hugetlb_free_vmemmap is enabled.
	 */
	for (i = 1; i < MAP_LENGTH / PAGE_SIZE; i++) {
		read(fd, &pageflags, sizeof(pageflags));
		if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS ||
		    (pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) {
			close(fd);
			printf("Tail page flags (%lx) is invalid\n", pageflags);
			return -1;
		}
	}

	close(fd);

	return 0;
}

int main(int argc, char **argv)
{
	void *addr;
	unsigned long pfn;

	addr = mmap(MAP_ADDR, MAP_LENGTH, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
	if (addr == MAP_FAILED) {
		perror("mmap");
		exit(1);
	}

	/* Trigger allocation of HugeTLB page. */
	write_bytes(addr, MAP_LENGTH);

	pfn = virt_to_pfn(addr);
	if (pfn == -1UL) {
		munmap(addr, MAP_LENGTH);
		perror("virt_to_pfn");
		exit(1);
	}

	printf("Returned address is %p whose pfn is %lx\n", addr, pfn);

	if (check_page_flags(pfn) < 0) {
		munmap(addr, MAP_LENGTH);
		perror("check_page_flags");
		exit(1);
	}

	/* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
	if (munmap(addr, MAP_LENGTH)) {
		perror("munmap");
		exit(1);
	}

	return 0;
}