summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/pageattr.c
blob: 0876216ceee671af764dc8f0602f8f8ef8d6e861 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
// SPDX-License-Identifier: GPL-2.0

/*
 * MMU-generic set_memory implementation for powerpc
 *
 * Copyright 2019-2021, IBM Corporation.
 */

#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>

#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>


/*
 * Updates the attributes of a page in three steps:
 *
 * 1. invalidate the page table entry
 * 2. flush the TLB
 * 3. install the new entry with the updated attributes
 *
 * Invalidating the pte means there are situations where this will not work
 * when in theory it should.
 * For example:
 * - removing write from page whilst it is being executed
 * - setting a page read-only whilst it is being read by another CPU
 *
 */
static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
{
	long action = (long)data;
	pte_t pte;

	spin_lock(&init_mm.page_table_lock);

	/* invalidate the PTE so it's safe to modify */
	pte = ptep_get_and_clear(&init_mm, addr, ptep);
	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);

	/* modify the PTE bits as desired, then apply */
	switch (action) {
	case SET_MEMORY_RO:
		pte = pte_wrprotect(pte);
		break;
	case SET_MEMORY_RW:
		pte = pte_mkwrite(pte_mkdirty(pte));
		break;
	case SET_MEMORY_NX:
		pte = pte_exprotect(pte);
		break;
	case SET_MEMORY_X:
		pte = pte_mkexec(pte);
		break;
	default:
		WARN_ON_ONCE(1);
		break;
	}

	set_pte_at(&init_mm, addr, ptep, pte);

	/* See ptesync comment in radix__set_pte_at() */
	if (radix_enabled())
		asm volatile("ptesync": : :"memory");
	spin_unlock(&init_mm.page_table_lock);

	return 0;
}

int change_memory_attr(unsigned long addr, int numpages, long action)
{
	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
	unsigned long size = numpages * PAGE_SIZE;

	if (!numpages)
		return 0;

	if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) &&
			 is_vm_area_hugepages((void *)addr)))
		return -EINVAL;

#ifdef CONFIG_PPC_BOOK3S_64
	/*
	 * On hash, the linear mapping is not in the Linux page table so
	 * apply_to_existing_page_range() will have no effect. If in the future
	 * the set_memory_* functions are used on the linear map this will need
	 * to be updated.
	 */
	if (!radix_enabled()) {
		int region = get_region_id(addr);

		if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID))
			return -EINVAL;
	}
#endif

	return apply_to_existing_page_range(&init_mm, start, size,
					    change_page_attr, (void *)action);
}

/*
 * Set the attributes of a page:
 *
 * This function is used by PPC32 at the end of init to set final kernel memory
 * protection. It includes changing the maping of the page it is executing from
 * and data pages it is using.
 */
static int set_page_attr(pte_t *ptep, unsigned long addr, void *data)
{
	pgprot_t prot = __pgprot((unsigned long)data);

	spin_lock(&init_mm.page_table_lock);

	set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot));
	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);

	spin_unlock(&init_mm.page_table_lock);

	return 0;
}

int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot)
{
	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
	unsigned long sz = numpages * PAGE_SIZE;

	if (numpages <= 0)
		return 0;

	return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr,
					    (void *)pgprot_val(prot));
}