summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_scatterlist.h
blob: 6cf8a298849fd9a842983e809b10fbc025f07965 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2016 Intel Corporation
 */

#ifndef I915_SCATTERLIST_H
#define I915_SCATTERLIST_H

#include <linux/pfn.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <xen/xen.h>

#include "i915_gem.h"

struct drm_mm_node;
struct ttm_resource;

/*
 * Optimised SGL iterator for GEM objects
 */
static __always_inline struct sgt_iter {
	struct scatterlist *sgp;
	union {
		unsigned long pfn;
		dma_addr_t dma;
	};
	unsigned int curr;
	unsigned int max;
} __sgt_iter(struct scatterlist *sgl, bool dma) {
	struct sgt_iter s = { .sgp = sgl };

	if (dma && s.sgp && sg_dma_len(s.sgp) == 0) {
		s.sgp = NULL;
	} else if (s.sgp) {
		s.max = s.curr = s.sgp->offset;
		if (dma) {
			s.dma = sg_dma_address(s.sgp);
			s.max += sg_dma_len(s.sgp);
		} else {
			s.pfn = page_to_pfn(sg_page(s.sgp));
			s.max += s.sgp->length;
		}
	}

	return s;
}

static inline int __sg_page_count(const struct scatterlist *sg)
{
	return sg->length >> PAGE_SHIFT;
}

static inline int __sg_dma_page_count(const struct scatterlist *sg)
{
	return sg_dma_len(sg) >> PAGE_SHIFT;
}

static inline struct scatterlist *____sg_next(struct scatterlist *sg)
{
	++sg;
	if (unlikely(sg_is_chain(sg)))
		sg = sg_chain_ptr(sg);
	return sg;
}

/**
 * __sg_next - return the next scatterlist entry in a list
 * @sg:		The current sg entry
 *
 * Description:
 *   If the entry is the last, return NULL; otherwise, step to the next
 *   element in the array (@sg@+1). If that's a chain pointer, follow it;
 *   otherwise just return the pointer to the current element.
 **/
static inline struct scatterlist *__sg_next(struct scatterlist *sg)
{
	return sg_is_last(sg) ? NULL : ____sg_next(sg);
}

/**
 * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table
 * @__dp:	Device address (output)
 * @__iter:	'struct sgt_iter' (iterator state, internal)
 * @__sgt:	sg_table to iterate over (input)
 * @__step:	step size
 */
#define __for_each_sgt_daddr(__dp, __iter, __sgt, __step)		\
	for ((__iter) = __sgt_iter((__sgt)->sgl, true);			\
	     ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp;	\
	     (((__iter).curr += (__step)) >= (__iter).max) ?		\
	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
/**
 * __for_each_daddr_next - iterates over the device addresses with pre-initialized iterator.
 * @__dp:	Device address (output)
 * @__iter:	'struct sgt_iter' (iterator state, external)
 * @__step:	step size
 */
#define __for_each_daddr_next(__dp, __iter, __step)                  \
	for (; ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp;   \
	     (((__iter).curr += (__step)) >= (__iter).max) ?            \
	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)

/**
 * for_each_sgt_page - iterate over the pages of the given sg_table
 * @__pp:	page pointer (output)
 * @__iter:	'struct sgt_iter' (iterator state, internal)
 * @__sgt:	sg_table to iterate over (input)
 */
#define for_each_sgt_page(__pp, __iter, __sgt)				\
	for ((__iter) = __sgt_iter((__sgt)->sgl, false);		\
	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)

/**
 * i915_sg_dma_sizes - Record the dma segment sizes of a scatterlist
 * @sg: The scatterlist
 *
 * Return: An unsigned int with segment sizes logically or'ed together.
 * A caller can use this information to determine what hardware page table
 * entry sizes can be used to map the memory represented by the scatterlist.
 */
static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg)
{
	unsigned int page_sizes;

	page_sizes = 0;
	while (sg && sg_dma_len(sg)) {
		GEM_BUG_ON(sg->offset);
		GEM_BUG_ON(!IS_ALIGNED(sg_dma_len(sg), PAGE_SIZE));
		page_sizes |= sg_dma_len(sg);
		sg = __sg_next(sg);
	}

	return page_sizes;
}

static inline unsigned int i915_sg_segment_size(struct device *dev)
{
	size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev));

	/*
	 * For Xen PV guests pages aren't contiguous in DMA (machine) address
	 * space.  The DMA API takes care of that both in dma_alloc_* (by
	 * calling into the hypervisor to make the pages contiguous) and in
	 * dma_map_* (by bounce buffering).  But i915 abuses ignores the
	 * coherency aspects of the DMA API and thus can't cope with bounce
	 * buffering actually happening, so add a hack here to force small
	 * allocations and mappings when running in PV mode on Xen.
	 *
	 * Note this will still break if bounce buffering is required for other
	 * reasons, like confidential computing hypervisors or PCIe root ports
	 * with addressing limitations.
	 */
	if (xen_pv_domain())
		max = PAGE_SIZE;
	return round_down(max, PAGE_SIZE);
}

bool i915_sg_trim(struct sg_table *orig_st);

/**
 * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt
 */
struct i915_refct_sgt_ops {
	/**
	 * @release: Free the memory of the struct i915_refct_sgt
	 */
	void (*release)(struct kref *ref);
};

/**
 * struct i915_refct_sgt - A refcounted scatter-gather table
 * @kref: struct kref for refcounting
 * @table: struct sg_table holding the scatter-gather table itself. Note that
 * @table->sgl = NULL can be used to determine whether a scatter-gather table
 * is present or not.
 * @size: The size in bytes of the underlying memory buffer
 * @ops: The operations structure.
 */
struct i915_refct_sgt {
	struct kref kref;
	struct sg_table table;
	size_t size;
	const struct i915_refct_sgt_ops *ops;
};

/**
 * i915_refct_sgt_put - Put a refcounted sg-table
 * @rsgt: the struct i915_refct_sgt to put.
 */
static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt)
{
	if (rsgt)
		kref_put(&rsgt->kref, rsgt->ops->release);
}

/**
 * i915_refct_sgt_get - Get a refcounted sg-table
 * @rsgt: the struct i915_refct_sgt to get.
 */
static inline struct i915_refct_sgt *
i915_refct_sgt_get(struct i915_refct_sgt *rsgt)
{
	kref_get(&rsgt->kref);
	return rsgt;
}

/**
 * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom
 * operations structure
 * @rsgt: The struct i915_refct_sgt to initialize.
 * @size: Size in bytes of the underlying memory buffer.
 * @ops: A customized operations structure in case the refcounted sg-list
 * is embedded into another structure.
 */
static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,
					 size_t size,
					 const struct i915_refct_sgt_ops *ops)
{
	kref_init(&rsgt->kref);
	rsgt->table.sgl = NULL;
	rsgt->size = size;
	rsgt->ops = ops;
}

void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);

struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
					      u64 region_start,
					      u32 page_alignment);

struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
						     u64 region_start,
						     u32 page_alignment);

#endif