diff options
Diffstat (limited to 'include/linux/virtio_config.h')
| -rw-r--r-- | include/linux/virtio_config.h | 468 |
1 files changed, 359 insertions, 109 deletions
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 987b6491b946..69f84ea85d71 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -6,12 +6,34 @@ #include <linux/bug.h> #include <linux/virtio.h> #include <linux/virtio_byteorder.h> +#include <linux/compiler_types.h> #include <uapi/linux/virtio_config.h> struct irq_affinity; +struct virtio_shm_region { + u64 addr; + u64 len; +}; + +typedef void vq_callback_t(struct virtqueue *); + /** - * virtio_config_ops - operations for configuring a virtio device + * struct virtqueue_info - Info for a virtqueue passed to find_vqs(). + * @name: virtqueue description. Used mainly for debugging, NULL for + * a virtqueue unused by the driver. + * @callback: A callback to invoke on a used buffer notification. + * NULL for a virtqueue that does not need a callback. + * @ctx: whether to maintain an extra context per virtqueue. + */ +struct virtqueue_info { + const char *name; + vq_callback_t *callback; + bool ctx; +}; + +/** + * struct virtio_config_ops - operations for configuring a virtio device * Note: Do not assume that a transport implements all of the operations * getting/setting a value as a simple read/write! Generally speaking, * any of @get/@set, @get_status/@set_status, or @get_features/ @@ -45,19 +67,27 @@ struct irq_affinity; * vdev: the virtio_device * nvqs: the number of virtqueues to find * vqs: on success, includes new virtqueues - * callbacks: array of callbacks, for each virtqueue - * include a NULL entry for vqs that do not need a callback - * names: array of virtqueue names (mainly for debugging) - * include a NULL entry for vqs unused by driver + * vqs_info: array of virtqueue info structures * Returns 0 on success or error status * @del_vqs: free virtqueues found by find_vqs(). + * @synchronize_cbs: synchronize with the virtqueue callbacks (optional) + * The function guarantees that all memory operations on the + * queue before it are visible to the vring_interrupt() that is + * called after it. + * vdev: the virtio_device * @get_features: get the array of feature bits for this device. * vdev: the virtio_device - * Returns the first 64 feature bits (all we currently need). + * Returns the first 64 feature bits. + * @get_extended_features: + * vdev: the virtio_device + * Returns the first VIRTIO_FEATURES_BITS feature bits (all we currently + * need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device - * This gives the final feature bits for the device: it can change + * This sends the driver feature bits to the device: it can change * the dev->feature bits if it wants. + * Note that despite the name this can be called any number of + * times. * Returns 0 on success or error status * @bus_name: return the bus name associated with the device (optional) * vdev: the virtio_device @@ -65,8 +95,20 @@ struct irq_affinity; * the caller can then copy. * @set_vq_affinity: set the affinity for a virtqueue (optional). * @get_vq_affinity: get the affinity for a virtqueue (optional). + * @get_shm_region: get a shared memory region based on the index. + * @disable_vq_and_reset: reset a queue individually (optional). + * vq: the virtqueue + * Returns 0 on success or error status + * disable_vq_and_reset will guarantee that the callbacks are disabled and + * synchronized. + * Except for the callback, the caller should guarantee that the vring is + * not accessed by any functions of virtqueue. + * @enable_vq_after_reset: enable a reset queue + * vq: the virtqueue + * Returns 0 on success or error status + * If disable_vq_and_reset is set, then enable_vq_after_reset must also be + * set. */ -typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { void (*get)(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len); @@ -76,18 +118,97 @@ struct virtio_config_ops { u8 (*get_status)(struct virtio_device *vdev); void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); - int (*find_vqs)(struct virtio_device *, unsigned nvqs, - struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], const bool *ctx, + int (*find_vqs)(struct virtio_device *vdev, unsigned int nvqs, + struct virtqueue *vqs[], + struct virtqueue_info vqs_info[], struct irq_affinity *desc); void (*del_vqs)(struct virtio_device *); + void (*synchronize_cbs)(struct virtio_device *); u64 (*get_features)(struct virtio_device *vdev); + void (*get_extended_features)(struct virtio_device *vdev, + u64 *features); int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, const struct cpumask *cpu_mask); const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, - int index); + int index); + bool (*get_shm_region)(struct virtio_device *vdev, + struct virtio_shm_region *region, u8 id); + int (*disable_vq_and_reset)(struct virtqueue *vq); + int (*enable_vq_after_reset)(struct virtqueue *vq); +}; + +/** + * struct virtio_map_ops - operations for mapping buffer for a virtio device + * Note: For a transport that has its own mapping logic it must + * implement all of the operations + * @map_page: map a buffer to the device + * map: metadata for performing mapping + * page: the page that will be mapped by the device + * offset: the offset in the page for a buffer + * size: the buffer size + * dir: mapping direction + * attrs: mapping attributes + * Returns the mapped address + * @unmap_page: unmap a buffer from the device + * map: device specific mapping map + * map_handle: the mapped address + * size: the buffer size + * dir: mapping direction + * attrs: unmapping attributes + * @sync_single_for_cpu: sync a single buffer from device to cpu + * map: metadata for performing mapping + * map_handle: the mapping address to sync + * size: the size of the buffer + * dir: synchronization direction + * @sync_single_for_device: sync a single buffer from cpu to device + * map: metadata for performing mapping + * map_handle: the mapping address to sync + * size: the size of the buffer + * dir: synchronization direction + * @alloc: alloc a coherent buffer mapping + * map: metadata for performing mapping + * size: the size of the buffer + * map_handle: the mapping address to sync + * gfp: allocation flag (GFP_XXX) + * Returns virtual address of the allocated buffer + * @free: free a coherent buffer mapping + * map: metadata for performing mapping + * size: the size of the buffer + * vaddr: virtual address of the buffer + * map_handle: the mapping address that needs to be freed + * attrs: unmapping attributes + * @need_sync: if the buffer needs synchronization + * map: metadata for performing mapping + * map_handle: the mapped address + * Returns whether the buffer needs synchronization + * @mapping_error: if the mapping address is error + * map: metadata for performing mapping + * map_handle: the mapped address + * @max_mapping_size: get the maximum buffer size that can be mapped + * map: metadata for performing mapping + * Returns the maximum buffer size that can be mapped + */ +struct virtio_map_ops { + dma_addr_t (*map_page)(union virtio_map map, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs); + void (*unmap_page)(union virtio_map map, dma_addr_t map_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + void (*sync_single_for_cpu)(union virtio_map map, dma_addr_t map_handle, + size_t size, enum dma_data_direction dir); + void (*sync_single_for_device)(union virtio_map map, + dma_addr_t map_handle, size_t size, + enum dma_data_direction dir); + void *(*alloc)(union virtio_map map, size_t size, + dma_addr_t *map_handle, gfp_t gfp); + void (*free)(union virtio_map map, size_t size, void *vaddr, + dma_addr_t map_handle, unsigned long attrs); + bool (*need_sync)(union virtio_map map, dma_addr_t map_handle); + int (*mapping_error)(union virtio_map map, dma_addr_t map_handle); + size_t (*max_mapping_size)(union virtio_map map); }; /* If driver didn't advertise the feature, it will never appear. */ @@ -104,13 +225,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, static inline bool __virtio_test_bit(const struct virtio_device *vdev, unsigned int fbit) { - /* Did you forget to fix assumptions on max features? */ - if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 64); - else - BUG_ON(fbit >= 64); - - return vdev->features & BIT_ULL(fbit); + return virtio_features_test_bit(vdev->features_array, fbit); } /** @@ -121,13 +236,7 @@ static inline bool __virtio_test_bit(const struct virtio_device *vdev, static inline void __virtio_set_bit(struct virtio_device *vdev, unsigned int fbit) { - /* Did you forget to fix assumptions on max features? */ - if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 64); - else - BUG_ON(fbit >= 64); - - vdev->features |= BIT_ULL(fbit); + virtio_features_set_bit(vdev->features_array, fbit); } /** @@ -138,13 +247,7 @@ static inline void __virtio_set_bit(struct virtio_device *vdev, static inline void __virtio_clear_bit(struct virtio_device *vdev, unsigned int fbit) { - /* Did you forget to fix assumptions on max features? */ - if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 64); - else - BUG_ON(fbit >= 64); - - vdev->features &= ~BIT_ULL(fbit); + virtio_features_clear_bit(vdev->features_array, fbit); } /** @@ -161,55 +264,77 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, return __virtio_test_bit(vdev, fbit); } +static inline void virtio_get_features(struct virtio_device *vdev, + u64 *features_out) +{ + if (vdev->config->get_extended_features) { + vdev->config->get_extended_features(vdev, features_out); + return; + } + + virtio_features_from_u64(features_out, + vdev->config->get_features(vdev)); +} + /** - * virtio_has_iommu_quirk - determine whether this device has the iommu quirk + * virtio_has_dma_quirk - determine whether this device has the DMA quirk * @vdev: the device */ -static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) +static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev) { /* * Note the reverse polarity of the quirk feature (compared to most * other features), this is for compatibility with legacy systems. */ - return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM); +} + +static inline +int virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, + struct virtqueue *vqs[], + struct virtqueue_info vqs_info[], + struct irq_affinity *desc) +{ + return vdev->config->find_vqs(vdev, nvqs, vqs, vqs_info, desc); } static inline struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *c, const char *n) { - vq_callback_t *callbacks[] = { c }; - const char *names[] = { n }; + struct virtqueue_info vqs_info[] = { + { n, c }, + }; struct virtqueue *vq; - int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL, - NULL); + int err = virtio_find_vqs(vdev, 1, &vq, vqs_info, NULL); + if (err < 0) return ERR_PTR(err); return vq; } +/** + * virtio_synchronize_cbs - synchronize with virtqueue callbacks + * @dev: the virtio device + */ static inline -int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], - struct irq_affinity *desc) -{ - return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc); -} - -static inline -int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], const bool *ctx, - struct irq_affinity *desc) +void virtio_synchronize_cbs(struct virtio_device *dev) { - return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, - desc); + if (dev->config->synchronize_cbs) { + dev->config->synchronize_cbs(dev); + } else { + /* + * A best effort fallback to synchronize with + * interrupts, preemption and softirq disabled + * regions. See comment above synchronize_rcu(). + */ + synchronize_rcu(); + } } /** * virtio_device_ready - enable vq use in probe function - * @vdev: the device + * @dev: the virtio device * * Driver must call this to use vqs in the probe function. * @@ -220,7 +345,29 @@ void virtio_device_ready(struct virtio_device *dev) { unsigned status = dev->config->get_status(dev); - BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); + WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); + +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION + /* + * The virtio_synchronize_cbs() makes sure vring_interrupt() + * will see the driver specific setup if it sees vq->broken + * as false (even if the notifications come before DRIVER_OK). + */ + virtio_synchronize_cbs(dev); + __virtio_unbreak_device(dev); +#endif + /* + * The transport should ensure the visibility of vq->broken + * before setting DRIVER_OK. See the comments for the transport + * specific set_status() method. + * + * A well behaved device will only notify a virtqueue after + * DRIVER_OK, this means the device should "see" the coherent + * memory write that set vq->broken as false which is done by + * the driver when it sees DRIVER_OK, then the following + * driver's vring_interrupt() will see vq->broken as false so + * we won't lose any notification. + */ dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK); } @@ -235,9 +382,9 @@ const char *virtio_bus_name(struct virtio_device *vdev) /** * virtqueue_set_affinity - setting affinity for a virtqueue * @vq: the virtqueue - * @cpu: the cpu no. + * @cpu_mask: the cpu mask * - * Pay attention the function are best-effort: the affinity hint may not be set + * Note that this function is best-effort: the affinity hint may not be set * due to config support, irq type and sharing. * */ @@ -250,6 +397,15 @@ int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) return 0; } +static inline +bool virtio_get_shm_region(struct virtio_device *vdev, + struct virtio_shm_region *region_out, u8 id) +{ + if (!vdev->config->get_shm_region) + return false; + return vdev->config->get_shm_region(vdev, region_out, id); +} + static inline bool virtio_is_little_endian(struct virtio_device *vdev) { return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || @@ -287,68 +443,133 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); } +#define virtio_to_cpu(vdev, x) \ + _Generic((x), \ + __u8: (x), \ + __virtio16: virtio16_to_cpu((vdev), (x)), \ + __virtio32: virtio32_to_cpu((vdev), (x)), \ + __virtio64: virtio64_to_cpu((vdev), (x)) \ + ) + +#define cpu_to_virtio(vdev, x, m) \ + _Generic((m), \ + __u8: (x), \ + __virtio16: cpu_to_virtio16((vdev), (x)), \ + __virtio32: cpu_to_virtio32((vdev), (x)), \ + __virtio64: cpu_to_virtio64((vdev), (x)) \ + ) + +#define __virtio_native_type(structname, member) \ + typeof(virtio_to_cpu(NULL, ((structname*)0)->member)) + /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ - /* Must match the member's type, and be integer */ \ - if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ - (*ptr) = 1; \ + typeof(((structname*)0)->member) virtio_cread_v; \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \ \ - switch (sizeof(*ptr)) { \ + switch (sizeof(virtio_cread_v)) { \ case 1: \ - *(ptr) = virtio_cread8(vdev, \ - offsetof(structname, member)); \ - break; \ case 2: \ - *(ptr) = virtio_cread16(vdev, \ - offsetof(structname, member)); \ - break; \ case 4: \ - *(ptr) = virtio_cread32(vdev, \ - offsetof(structname, member)); \ - break; \ - case 8: \ - *(ptr) = virtio_cread64(vdev, \ - offsetof(structname, member)); \ + vdev->config->get((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + sizeof(virtio_cread_v)); \ break; \ default: \ - BUG(); \ + __virtio_cread_many((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + 1, \ + sizeof(virtio_cread_v)); \ + break; \ } \ + *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \ } while(0) /* Config space accessors. */ #define virtio_cwrite(vdev, structname, member, ptr) \ do { \ - /* Must match the member's type, and be integer */ \ - if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ - BUG_ON((*ptr) == 1); \ + typeof(((structname*)0)->member) virtio_cwrite_v = \ + cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \ \ - switch (sizeof(*ptr)) { \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \ + \ + vdev->config->set((vdev), offsetof(structname, member), \ + &virtio_cwrite_v, \ + sizeof(virtio_cwrite_v)); \ + } while(0) + +/* + * Nothing virtio-specific about these, but let's worry about generalizing + * these later. + */ +#define virtio_le_to_cpu(x) \ + _Generic((x), \ + __u8: (u8)(x), \ + __le16: (u16)le16_to_cpu(x), \ + __le32: (u32)le32_to_cpu(x), \ + __le64: (u64)le64_to_cpu(x) \ + ) + +#define virtio_cpu_to_le(x, m) \ + _Generic((m), \ + __u8: (x), \ + __le16: cpu_to_le16(x), \ + __le32: cpu_to_le32(x), \ + __le64: cpu_to_le64(x) \ + ) + +/* LE (e.g. modern) Config space accessors. */ +#define virtio_cread_le(vdev, structname, member, ptr) \ + do { \ + typeof(((structname*)0)->member) virtio_cread_v; \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \ + \ + switch (sizeof(virtio_cread_v)) { \ case 1: \ - virtio_cwrite8(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ case 2: \ - virtio_cwrite16(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ case 4: \ - virtio_cwrite32(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - case 8: \ - virtio_cwrite64(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ + vdev->config->get((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + sizeof(virtio_cread_v)); \ break; \ default: \ - BUG(); \ + __virtio_cread_many((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + 1, \ + sizeof(virtio_cread_v)); \ + break; \ } \ + *(ptr) = virtio_le_to_cpu(virtio_cread_v); \ + } while(0) + +#define virtio_cwrite_le(vdev, structname, member, ptr) \ + do { \ + typeof(((structname*)0)->member) virtio_cwrite_v = \ + virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \ + \ + vdev->config->set((vdev), offsetof(structname, member), \ + &virtio_cwrite_v, \ + sizeof(virtio_cwrite_v)); \ } while(0) + /* Read @count fields, @bytes each. */ static inline void __virtio_cread_many(struct virtio_device *vdev, unsigned int offset, @@ -358,6 +579,7 @@ static inline void __virtio_cread_many(struct virtio_device *vdev, vdev->config->generation(vdev) : 0; int i; + might_sleep(); do { old = gen; @@ -380,6 +602,8 @@ static inline void virtio_cread_bytes(struct virtio_device *vdev, static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) { u8 ret; + + might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); return ret; } @@ -387,52 +611,67 @@ static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) static inline void virtio_cwrite8(struct virtio_device *vdev, unsigned int offset, u8 val) { + might_sleep(); vdev->config->set(vdev, offset, &val, sizeof(val)); } static inline u16 virtio_cread16(struct virtio_device *vdev, unsigned int offset) { - u16 ret; + __virtio16 ret; + + might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return virtio16_to_cpu(vdev, (__force __virtio16)ret); + return virtio16_to_cpu(vdev, ret); } static inline void virtio_cwrite16(struct virtio_device *vdev, unsigned int offset, u16 val) { - val = (__force u16)cpu_to_virtio16(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + __virtio16 v; + + might_sleep(); + v = cpu_to_virtio16(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u32 virtio_cread32(struct virtio_device *vdev, unsigned int offset) { - u32 ret; + __virtio32 ret; + + might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return virtio32_to_cpu(vdev, (__force __virtio32)ret); + return virtio32_to_cpu(vdev, ret); } static inline void virtio_cwrite32(struct virtio_device *vdev, unsigned int offset, u32 val) { - val = (__force u32)cpu_to_virtio32(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + __virtio32 v; + + might_sleep(); + v = cpu_to_virtio32(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u64 virtio_cread64(struct virtio_device *vdev, unsigned int offset) { - u64 ret; + __virtio64 ret; + __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); - return virtio64_to_cpu(vdev, (__force __virtio64)ret); + return virtio64_to_cpu(vdev, ret); } static inline void virtio_cwrite64(struct virtio_device *vdev, unsigned int offset, u64 val) { - val = (__force u64)cpu_to_virtio64(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + __virtio64 v; + + might_sleep(); + v = cpu_to_virtio64(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } /* Conditional config space accessors. */ @@ -446,4 +685,15 @@ static inline void virtio_cwrite64(struct virtio_device *vdev, _r; \ }) +/* Conditional config space accessors. */ +#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \ + ({ \ + int _r = 0; \ + if (!virtio_has_feature(vdev, fbit)) \ + _r = -ENOENT; \ + else \ + virtio_cread_le((vdev), structname, member, ptr); \ + _r; \ + }) + #endif /* _LINUX_VIRTIO_CONFIG_H */ |
