summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
blob: f349642e6190d6933031d08ccd7f353231f0f1da (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
 *
 * Copyright 2023 VMware, Inc., Palo Alto, CA., USA
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/

#ifndef VMWGFX_BO_H
#define VMWGFX_BO_H

#include "device_include/svga_reg.h"

#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>

#include <linux/rbtree_types.h>
#include <linux/types.h>

struct vmw_bo_dirty;
struct vmw_fence_obj;
struct vmw_private;
struct vmw_resource;

enum vmw_bo_domain {
	VMW_BO_DOMAIN_SYS           = BIT(0),
	VMW_BO_DOMAIN_WAITABLE_SYS  = BIT(1),
	VMW_BO_DOMAIN_VRAM          = BIT(2),
	VMW_BO_DOMAIN_GMR           = BIT(3),
	VMW_BO_DOMAIN_MOB           = BIT(4),
};

struct vmw_bo_params {
	u32 domain;
	u32 busy_domain;
	enum ttm_bo_type bo_type;
	size_t size;
	bool pin;
	struct dma_resv *resv;
	struct sg_table *sg;
};

/**
 * struct vmw_bo - TTM buffer object with vmwgfx additions
 * @tbo: The TTM buffer object
 * @placement: The preferred placement for this buffer object
 * @places: The chosen places for the preferred placement.
 * @busy_places: Chosen busy places for the preferred placement
 * @map: Kmap object for semi-persistent mappings
 * @res_tree: RB tree of resources using this buffer object as a backing MOB
 * @res_prios: Eviction priority counts for attached resources
 * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
 * increased. May be decreased without reservation.
 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
 * @dirty: structure for user-space dirty-tracking
 */
struct vmw_bo {
	struct ttm_buffer_object tbo;

	struct ttm_placement placement;
	struct ttm_place places[5];
	struct ttm_place busy_places[5];

	/* Protected by reservation */
	struct ttm_bo_kmap_obj map;

	struct rb_root res_tree;
	u32 res_prios[TTM_MAX_BO_PRIORITY];

	atomic_t cpu_writers;
	/* Not ref-counted.  Protected by binding_mutex */
	struct vmw_resource *dx_query_ctx;
	struct vmw_bo_dirty *dirty;
};

void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo);

int vmw_bo_create(struct vmw_private *dev_priv,
		  struct vmw_bo_params *params,
		  struct vmw_bo **p_bo);

int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv);

int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
		       struct vmw_bo *buf,
		       bool interruptible);
int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
			      struct vmw_bo *buf,
			      bool interruptible);
int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
				struct vmw_bo *bo,
				bool interruptible);
void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin);
int vmw_bo_unpin(struct vmw_private *vmw_priv,
		 struct vmw_bo *bo,
		 bool interruptible);

void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
			  SVGAGuestPtr *ptr);
int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *file_priv);
void vmw_bo_fence_single(struct ttm_buffer_object *bo,
			 struct vmw_fence_obj *fence);

void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
void vmw_bo_unmap(struct vmw_bo *vbo);

void vmw_bo_move_notify(struct ttm_buffer_object *bo,
			struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);

int vmw_user_bo_lookup(struct drm_file *filp,
		       u32 handle,
		       struct vmw_bo **out);
/**
 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
 * according to attached resources
 * @vbo: The struct vmw_bo
 */
static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo)
{
	int i = ARRAY_SIZE(vbo->res_prios);

	while (i--) {
		if (vbo->res_prios[i]) {
			vbo->tbo.priority = i;
			return;
		}
	}

	vbo->tbo.priority = 3;
}

/**
 * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
 * eviction priority
 * @vbo: The struct vmw_bo
 * @prio: The resource priority
 *
 * After being notified, the code assigns the highest resource eviction priority
 * to the backing buffer object (mob).
 */
static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio)
{
	if (vbo->res_prios[prio]++ == 0)
		vmw_bo_prio_adjust(vbo);
}

/**
 * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain
 * priority being removed
 * @vbo: The struct vmw_bo
 * @prio: The resource priority
 *
 * After being notified, the code assigns the highest resource eviction priority
 * to the backing buffer object (mob).
 */
static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio)
{
	if (--vbo->res_prios[prio] == 0)
		vmw_bo_prio_adjust(vbo);
}

static inline void vmw_bo_unreference(struct vmw_bo **buf)
{
	struct vmw_bo *tmp_buf = *buf;

	*buf = NULL;
	if (tmp_buf)
		ttm_bo_put(&tmp_buf->tbo);
}

static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{
	ttm_bo_get(&buf->tbo);
	return buf;
}

static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo)
{
	drm_gem_object_get(&vbo->tbo.base);
	return vbo;
}

static inline void vmw_user_bo_unref(struct vmw_bo **buf)
{
	struct vmw_bo *tmp_buf = *buf;

	*buf = NULL;
	if (tmp_buf)
		drm_gem_object_put(&tmp_buf->tbo.base);
}

static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
{
	return container_of((gobj), struct vmw_bo, tbo.base);
}

#endif // VMWGFX_BO_H