summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/hyp/include/nvhe/gfp.h
blob: 18a4494337bdd7ee784aa309f88f34cb61fb3fd1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __KVM_HYP_GFP_H
#define __KVM_HYP_GFP_H

#include <linux/list.h>

#include <nvhe/memory.h>
#include <nvhe/spinlock.h>

#define HYP_NO_ORDER	UINT_MAX

struct hyp_pool {
	/*
	 * Spinlock protecting concurrent changes to the memory pool as well as
	 * the struct hyp_page of the pool's pages until we have a proper atomic
	 * API at EL2.
	 */
	hyp_spinlock_t lock;
	struct list_head free_area[MAX_ORDER];
	phys_addr_t range_start;
	phys_addr_t range_end;
	unsigned int max_order;
};

static inline void hyp_page_ref_inc(struct hyp_page *p)
{
	struct hyp_pool *pool = hyp_page_to_pool(p);

	hyp_spin_lock(&pool->lock);
	p->refcount++;
	hyp_spin_unlock(&pool->lock);
}

static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
{
	struct hyp_pool *pool = hyp_page_to_pool(p);
	int ret;

	hyp_spin_lock(&pool->lock);
	p->refcount--;
	ret = (p->refcount == 0);
	hyp_spin_unlock(&pool->lock);

	return ret;
}

static inline void hyp_set_page_refcounted(struct hyp_page *p)
{
	struct hyp_pool *pool = hyp_page_to_pool(p);

	hyp_spin_lock(&pool->lock);
	if (p->refcount) {
		hyp_spin_unlock(&pool->lock);
		BUG();
	}
	p->refcount = 1;
	hyp_spin_unlock(&pool->lock);
}

/* Allocation */
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
void hyp_get_page(void *addr);
void hyp_put_page(void *addr);

/* Used pages cannot be freed */
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
		  unsigned int reserved_pages);
#endif /* __KVM_HYP_GFP_H */