summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/shmem_utils.c
blob: 43c7acbdc79dea2d819f6d8fb6cac0075adba983 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2020 Intel Corporation
 */

#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>

#include "gem/i915_gem_object.h"
#include "shmem_utils.h"

struct file *shmem_create_from_data(const char *name, void *data, size_t len)
{
	struct file *file;
	int err;

	file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
	if (IS_ERR(file))
		return file;

	err = shmem_write(file, 0, data, len);
	if (err) {
		fput(file);
		return ERR_PTR(err);
	}

	return file;
}

struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
{
	struct file *file;
	void *ptr;

	if (obj->ops == &i915_gem_shmem_ops) {
		file = obj->base.filp;
		atomic_long_inc(&file->f_count);
		return file;
	}

	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(ptr))
		return ERR_CAST(ptr);

	file = shmem_create_from_data("", ptr, obj->base.size);
	i915_gem_object_unpin_map(obj);

	return file;
}

static size_t shmem_npte(struct file *file)
{
	return file->f_mapping->host->i_size >> PAGE_SHIFT;
}

static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
{
	unsigned long pfn;

	vunmap(ptr);

	for (pfn = 0; pfn < n_pte; pfn++) {
		struct page *page;

		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
						   GFP_KERNEL);
		if (!WARN_ON(IS_ERR(page))) {
			put_page(page);
			put_page(page);
		}
	}
}

void *shmem_pin_map(struct file *file)
{
	const size_t n_pte = shmem_npte(file);
	pte_t *stack[32], **ptes, **mem;
	struct vm_struct *area;
	unsigned long pfn;

	mem = stack;
	if (n_pte > ARRAY_SIZE(stack)) {
		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
		if (!mem)
			return NULL;
	}

	area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
	if (!area) {
		if (mem != stack)
			kvfree(mem);
		return NULL;
	}

	ptes = mem;
	for (pfn = 0; pfn < n_pte; pfn++) {
		struct page *page;

		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
						   GFP_KERNEL);
		if (IS_ERR(page))
			goto err_page;

		**ptes++ = mk_pte(page,  PAGE_KERNEL);
	}

	if (mem != stack)
		kvfree(mem);

	mapping_set_unevictable(file->f_mapping);
	return area->addr;

err_page:
	if (mem != stack)
		kvfree(mem);

	__shmem_unpin_map(file, area->addr, pfn);
	return NULL;
}

void shmem_unpin_map(struct file *file, void *ptr)
{
	mapping_clear_unevictable(file->f_mapping);
	__shmem_unpin_map(file, ptr, shmem_npte(file));
}

static int __shmem_rw(struct file *file, loff_t off,
		      void *ptr, size_t len,
		      bool write)
{
	unsigned long pfn;

	for (pfn = off >> PAGE_SHIFT; len; pfn++) {
		unsigned int this =
			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
		struct page *page;
		void *vaddr;

		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
						   GFP_KERNEL);
		if (IS_ERR(page))
			return PTR_ERR(page);

		vaddr = kmap(page);
		if (write)
			memcpy(vaddr + offset_in_page(off), ptr, this);
		else
			memcpy(ptr, vaddr + offset_in_page(off), this);
		kunmap(page);
		put_page(page);

		len -= this;
		ptr += this;
		off = 0;
	}

	return 0;
}

int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
{
	return __shmem_rw(file, off, dst, len, false);
}

int shmem_write(struct file *file, loff_t off, void *src, size_t len)
{
	return __shmem_rw(file, off, src, len, true);
}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "st_shmem_utils.c"
#endif