summaryrefslogtreecommitdiff
path: root/drivers/vfio/vfio.h
blob: 85484a971a3e936b93b2cb7c5dc8638df3caf650 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
 *     Author: Alex Williamson <alex.williamson@redhat.com>
 */
#ifndef __VFIO_VFIO_H__
#define __VFIO_VFIO_H__

#include <linux/file.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/module.h>
#include <linux/vfio.h>

struct iommufd_ctx;
struct iommu_group;
struct vfio_container;

struct vfio_device_file {
	struct vfio_device *device;
	struct vfio_group *group;

	u8 access_granted;
	spinlock_t kvm_ref_lock; /* protect kvm field */
	struct kvm *kvm;
	struct iommufd_ctx *iommufd; /* protected by struct vfio_device_set::lock */
};

void vfio_device_put_registration(struct vfio_device *device);
bool vfio_device_try_get_registration(struct vfio_device *device);
int vfio_df_open(struct vfio_device_file *df);
void vfio_df_close(struct vfio_device_file *df);
struct vfio_device_file *
vfio_allocate_device_file(struct vfio_device *device);

extern const struct file_operations vfio_device_fops;

enum vfio_group_type {
	/*
	 * Physical device with IOMMU backing.
	 */
	VFIO_IOMMU,

	/*
	 * Virtual device without IOMMU backing. The VFIO core fakes up an
	 * iommu_group as the iommu_group sysfs interface is part of the
	 * userspace ABI.  The user of these devices must not be able to
	 * directly trigger unmediated DMA.
	 */
	VFIO_EMULATED_IOMMU,

	/*
	 * Physical device without IOMMU backing. The VFIO core fakes up an
	 * iommu_group as the iommu_group sysfs interface is part of the
	 * userspace ABI.  Users can trigger unmediated DMA by the device,
	 * usage is highly dangerous, requires an explicit opt-in and will
	 * taint the kernel.
	 */
	VFIO_NO_IOMMU,
};

struct vfio_group {
	struct device 			dev;
	struct cdev			cdev;
	/*
	 * When drivers is non-zero a driver is attached to the struct device
	 * that provided the iommu_group and thus the iommu_group is a valid
	 * pointer. When drivers is 0 the driver is being detached. Once users
	 * reaches 0 then the iommu_group is invalid.
	 */
	refcount_t			drivers;
	unsigned int			container_users;
	struct iommu_group		*iommu_group;
	struct vfio_container		*container;
	struct list_head		device_list;
	struct mutex			device_lock;
	struct list_head		vfio_next;
#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
	struct list_head		container_next;
#endif
	enum vfio_group_type		type;
	struct mutex			group_lock;
	struct kvm			*kvm;
	struct file			*opened_file;
	struct blocking_notifier_head	notifier;
	struct iommufd_ctx		*iommufd;
	spinlock_t			kvm_ref_lock;
	unsigned int			cdev_device_open_cnt;
};

int vfio_device_block_group(struct vfio_device *device);
void vfio_device_unblock_group(struct vfio_device *device);
int vfio_device_set_group(struct vfio_device *device,
			  enum vfio_group_type type);
void vfio_device_remove_group(struct vfio_device *device);
void vfio_device_group_register(struct vfio_device *device);
void vfio_device_group_unregister(struct vfio_device *device);
int vfio_device_group_use_iommu(struct vfio_device *device);
void vfio_device_group_unuse_iommu(struct vfio_device *device);
void vfio_df_group_close(struct vfio_device_file *df);
struct vfio_group *vfio_group_from_file(struct file *file);
bool vfio_group_enforced_coherent(struct vfio_group *group);
void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
bool vfio_device_has_container(struct vfio_device *device);
int __init vfio_group_init(void);
void vfio_group_cleanup(void);

static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
{
	return IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
	       vdev->group->type == VFIO_NO_IOMMU;
}

#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
/**
 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
 */
struct vfio_iommu_driver_ops {
	char		*name;
	struct module	*owner;
	void		*(*open)(unsigned long arg);
	void		(*release)(void *iommu_data);
	long		(*ioctl)(void *iommu_data, unsigned int cmd,
				 unsigned long arg);
	int		(*attach_group)(void *iommu_data,
					struct iommu_group *group,
					enum vfio_group_type);
	void		(*detach_group)(void *iommu_data,
					struct iommu_group *group);
	int		(*pin_pages)(void *iommu_data,
				     struct iommu_group *group,
				     dma_addr_t user_iova,
				     int npage, int prot,
				     struct page **pages);
	void		(*unpin_pages)(void *iommu_data,
				       dma_addr_t user_iova, int npage);
	void		(*register_device)(void *iommu_data,
					   struct vfio_device *vdev);
	void		(*unregister_device)(void *iommu_data,
					     struct vfio_device *vdev);
	int		(*dma_rw)(void *iommu_data, dma_addr_t user_iova,
				  void *data, size_t count, bool write);
	struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
						   struct iommu_group *group);
};

struct vfio_iommu_driver {
	const struct vfio_iommu_driver_ops	*ops;
	struct list_head			vfio_next;
};

int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);

struct vfio_container *vfio_container_from_file(struct file *filep);
int vfio_group_use_container(struct vfio_group *group);
void vfio_group_unuse_container(struct vfio_group *group);
int vfio_container_attach_group(struct vfio_container *container,
				struct vfio_group *group);
void vfio_group_detach_container(struct vfio_group *group);
void vfio_device_container_register(struct vfio_device *device);
void vfio_device_container_unregister(struct vfio_device *device);
int vfio_device_container_pin_pages(struct vfio_device *device,
				    dma_addr_t iova, int npage,
				    int prot, struct page **pages);
void vfio_device_container_unpin_pages(struct vfio_device *device,
				       dma_addr_t iova, int npage);
int vfio_device_container_dma_rw(struct vfio_device *device,
				 dma_addr_t iova, void *data,
				 size_t len, bool write);

int __init vfio_container_init(void);
void vfio_container_cleanup(void);
#else
static inline struct vfio_container *
vfio_container_from_file(struct file *filep)
{
	return NULL;
}

static inline int vfio_group_use_container(struct vfio_group *group)
{
	return -EOPNOTSUPP;
}

static inline void vfio_group_unuse_container(struct vfio_group *group)
{
}

static inline int vfio_container_attach_group(struct vfio_container *container,
					      struct vfio_group *group)
{
	return -EOPNOTSUPP;
}

static inline void vfio_group_detach_container(struct vfio_group *group)
{
}

static inline void vfio_device_container_register(struct vfio_device *device)
{
}

static inline void vfio_device_container_unregister(struct vfio_device *device)
{
}

static inline int vfio_device_container_pin_pages(struct vfio_device *device,
						  dma_addr_t iova, int npage,
						  int prot, struct page **pages)
{
	return -EOPNOTSUPP;
}

static inline void vfio_device_container_unpin_pages(struct vfio_device *device,
						     dma_addr_t iova, int npage)
{
}

static inline int vfio_device_container_dma_rw(struct vfio_device *device,
					       dma_addr_t iova, void *data,
					       size_t len, bool write)
{
	return -EOPNOTSUPP;
}

static inline int vfio_container_init(void)
{
	return 0;
}
static inline void vfio_container_cleanup(void)
{
}
#endif

#if IS_ENABLED(CONFIG_IOMMUFD)
int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx);
void vfio_iommufd_unbind(struct vfio_device *device);
#else
static inline int vfio_iommufd_bind(struct vfio_device *device,
				    struct iommufd_ctx *ictx)
{
	return -EOPNOTSUPP;
}

static inline void vfio_iommufd_unbind(struct vfio_device *device)
{
}
#endif

#if IS_ENABLED(CONFIG_VFIO_VIRQFD)
int __init vfio_virqfd_init(void);
void vfio_virqfd_exit(void);
#else
static inline int __init vfio_virqfd_init(void)
{
	return 0;
}
static inline void vfio_virqfd_exit(void)
{
}
#endif

#ifdef CONFIG_VFIO_NOIOMMU
extern bool vfio_noiommu __read_mostly;
#else
enum { vfio_noiommu = false };
#endif

#ifdef CONFIG_HAVE_KVM
void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
void vfio_device_put_kvm(struct vfio_device *device);
#else
static inline void _vfio_device_get_kvm_safe(struct vfio_device *device,
					     struct kvm *kvm)
{
}

static inline void vfio_device_put_kvm(struct vfio_device *device)
{
}
#endif

#endif