summaryrefslogtreecommitdiff
path: root/include/linux/virtio_config.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/virtio_config.h')
-rw-r--r--include/linux/virtio_config.h635
1 files changed, 584 insertions, 51 deletions
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 29b9104232b4..69f84ea85d71 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -1,13 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_VIRTIO_CONFIG_H
#define _LINUX_VIRTIO_CONFIG_H
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/virtio.h>
+#include <linux/virtio_byteorder.h>
+#include <linux/compiler_types.h>
#include <uapi/linux/virtio_config.h>
+struct irq_affinity;
+
+struct virtio_shm_region {
+ u64 addr;
+ u64 len;
+};
+
+typedef void vq_callback_t(struct virtqueue *);
+
/**
- * virtio_config_ops - operations for configuring a virtio device
+ * struct virtqueue_info - Info for a virtqueue passed to find_vqs().
+ * @name: virtqueue description. Used mainly for debugging, NULL for
+ * a virtqueue unused by the driver.
+ * @callback: A callback to invoke on a used buffer notification.
+ * NULL for a virtqueue that does not need a callback.
+ * @ctx: whether to maintain an extra context per virtqueue.
+ */
+struct virtqueue_info {
+ const char *name;
+ vq_callback_t *callback;
+ bool ctx;
+};
+
+/**
+ * struct virtio_config_ops - operations for configuring a virtio device
+ * Note: Do not assume that a transport implements all of the operations
+ * getting/setting a value as a simple read/write! Generally speaking,
+ * any of @get/@set, @get_status/@set_status, or @get_features/
+ * @finalize_features are NOT safe to be called from an atomic
+ * context.
* @get: read the value of a configuration field
* vdev: the virtio_device
* offset: the offset of the configuration field
@@ -18,6 +49,9 @@
* offset: the offset of the configuration field
* buf: the buffer to read the field value from.
* len: the length of the buffer
+ * @generation: config generation counter (optional)
+ * vdev: the virtio_device
+ * Returns the config generation counter
* @get_status: read the status byte
* vdev: the virtio_device
* Returns the status byte
@@ -33,43 +67,148 @@
* vdev: the virtio_device
* nvqs: the number of virtqueues to find
* vqs: on success, includes new virtqueues
- * callbacks: array of callbacks, for each virtqueue
- * include a NULL entry for vqs that do not need a callback
- * names: array of virtqueue names (mainly for debugging)
- * include a NULL entry for vqs unused by driver
+ * vqs_info: array of virtqueue info structures
* Returns 0 on success or error status
* @del_vqs: free virtqueues found by find_vqs().
+ * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
+ * The function guarantees that all memory operations on the
+ * queue before it are visible to the vring_interrupt() that is
+ * called after it.
+ * vdev: the virtio_device
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
- * Returns the first 32 feature bits (all we currently need).
+ * Returns the first 64 feature bits.
+ * @get_extended_features:
+ * vdev: the virtio_device
+ * Returns the first VIRTIO_FEATURES_BITS feature bits (all we currently
+ * need).
* @finalize_features: confirm what device features we'll be using.
* vdev: the virtio_device
- * This gives the final feature bits for the device: it can change
+ * This sends the driver feature bits to the device: it can change
* the dev->feature bits if it wants.
- * @bus_name: return the bus name associated with the device
+ * Note that despite the name this can be called any number of
+ * times.
+ * Returns 0 on success or error status
+ * @bus_name: return the bus name associated with the device (optional)
* vdev: the virtio_device
* This returns a pointer to the bus name a la pci_name from which
* the caller can then copy.
- * @set_vq_affinity: set the affinity for a virtqueue.
+ * @set_vq_affinity: set the affinity for a virtqueue (optional).
+ * @get_vq_affinity: get the affinity for a virtqueue (optional).
+ * @get_shm_region: get a shared memory region based on the index.
+ * @disable_vq_and_reset: reset a queue individually (optional).
+ * vq: the virtqueue
+ * Returns 0 on success or error status
+ * disable_vq_and_reset will guarantee that the callbacks are disabled and
+ * synchronized.
+ * Except for the callback, the caller should guarantee that the vring is
+ * not accessed by any functions of virtqueue.
+ * @enable_vq_after_reset: enable a reset queue
+ * vq: the virtqueue
+ * Returns 0 on success or error status
+ * If disable_vq_and_reset is set, then enable_vq_after_reset must also be
+ * set.
*/
-typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len);
+ u32 (*generation)(struct virtio_device *vdev);
u8 (*get_status)(struct virtio_device *vdev);
void (*set_status)(struct virtio_device *vdev, u8 status);
void (*reset)(struct virtio_device *vdev);
- int (*find_vqs)(struct virtio_device *, unsigned nvqs,
+ int (*find_vqs)(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char *names[]);
+ struct virtqueue_info vqs_info[],
+ struct irq_affinity *desc);
void (*del_vqs)(struct virtio_device *);
- u32 (*get_features)(struct virtio_device *vdev);
- void (*finalize_features)(struct virtio_device *vdev);
+ void (*synchronize_cbs)(struct virtio_device *);
+ u64 (*get_features)(struct virtio_device *vdev);
+ void (*get_extended_features)(struct virtio_device *vdev,
+ u64 *features);
+ int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
- int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
+ int (*set_vq_affinity)(struct virtqueue *vq,
+ const struct cpumask *cpu_mask);
+ const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
+ int index);
+ bool (*get_shm_region)(struct virtio_device *vdev,
+ struct virtio_shm_region *region, u8 id);
+ int (*disable_vq_and_reset)(struct virtqueue *vq);
+ int (*enable_vq_after_reset)(struct virtqueue *vq);
+};
+
+/**
+ * struct virtio_map_ops - operations for mapping buffer for a virtio device
+ * Note: For a transport that has its own mapping logic it must
+ * implement all of the operations
+ * @map_page: map a buffer to the device
+ * map: metadata for performing mapping
+ * page: the page that will be mapped by the device
+ * offset: the offset in the page for a buffer
+ * size: the buffer size
+ * dir: mapping direction
+ * attrs: mapping attributes
+ * Returns the mapped address
+ * @unmap_page: unmap a buffer from the device
+ * map: device specific mapping map
+ * map_handle: the mapped address
+ * size: the buffer size
+ * dir: mapping direction
+ * attrs: unmapping attributes
+ * @sync_single_for_cpu: sync a single buffer from device to cpu
+ * map: metadata for performing mapping
+ * map_handle: the mapping address to sync
+ * size: the size of the buffer
+ * dir: synchronization direction
+ * @sync_single_for_device: sync a single buffer from cpu to device
+ * map: metadata for performing mapping
+ * map_handle: the mapping address to sync
+ * size: the size of the buffer
+ * dir: synchronization direction
+ * @alloc: alloc a coherent buffer mapping
+ * map: metadata for performing mapping
+ * size: the size of the buffer
+ * map_handle: the mapping address to sync
+ * gfp: allocation flag (GFP_XXX)
+ * Returns virtual address of the allocated buffer
+ * @free: free a coherent buffer mapping
+ * map: metadata for performing mapping
+ * size: the size of the buffer
+ * vaddr: virtual address of the buffer
+ * map_handle: the mapping address that needs to be freed
+ * attrs: unmapping attributes
+ * @need_sync: if the buffer needs synchronization
+ * map: metadata for performing mapping
+ * map_handle: the mapped address
+ * Returns whether the buffer needs synchronization
+ * @mapping_error: if the mapping address is error
+ * map: metadata for performing mapping
+ * map_handle: the mapped address
+ * @max_mapping_size: get the maximum buffer size that can be mapped
+ * map: metadata for performing mapping
+ * Returns the maximum buffer size that can be mapped
+ */
+struct virtio_map_ops {
+ dma_addr_t (*map_page)(union virtio_map map, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*unmap_page)(union virtio_map map, dma_addr_t map_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*sync_single_for_cpu)(union virtio_map map, dma_addr_t map_handle,
+ size_t size, enum dma_data_direction dir);
+ void (*sync_single_for_device)(union virtio_map map,
+ dma_addr_t map_handle, size_t size,
+ enum dma_data_direction dir);
+ void *(*alloc)(union virtio_map map, size_t size,
+ dma_addr_t *map_handle, gfp_t gfp);
+ void (*free)(union virtio_map map, size_t size, void *vaddr,
+ dma_addr_t map_handle, unsigned long attrs);
+ bool (*need_sync)(union virtio_map map, dma_addr_t map_handle);
+ int (*mapping_error)(union virtio_map map, dma_addr_t map_handle);
+ size_t (*max_mapping_size)(union virtio_map map);
};
/* If driver didn't advertise the feature, it will never appear. */
@@ -77,6 +216,41 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
unsigned int fbit);
/**
+ * __virtio_test_bit - helper to test feature bits. For use by transports.
+ * Devices should normally use virtio_has_feature,
+ * which includes more checks.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline bool __virtio_test_bit(const struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ return virtio_features_test_bit(vdev->features_array, fbit);
+}
+
+/**
+ * __virtio_set_bit - helper to set feature bits. For use by transports.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline void __virtio_set_bit(struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ virtio_features_set_bit(vdev->features_array, fbit);
+}
+
+/**
+ * __virtio_clear_bit - helper to clear feature bits. For use by transports.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline void __virtio_clear_bit(struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ virtio_features_clear_bit(vdev->features_array, fbit);
+}
+
+/**
* virtio_has_feature - helper to determine if this device has this feature.
* @vdev: the device
* @fbit: the feature bit
@@ -84,58 +258,119 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
static inline bool virtio_has_feature(const struct virtio_device *vdev,
unsigned int fbit)
{
- /* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 32);
- else
- BUG_ON(fbit >= 32);
-
if (fbit < VIRTIO_TRANSPORT_F_START)
virtio_check_driver_offered_feature(vdev, fbit);
- return test_bit(fbit, vdev->features);
+ return __virtio_test_bit(vdev, fbit);
}
-/**
- * virtio_config_val - look for a feature and get a virtio config entry.
- * @vdev: the virtio device
- * @fbit: the feature bit
- * @offset: the type to search for.
- * @v: a pointer to the value to fill in.
- *
- * The return value is -ENOENT if the feature doesn't exist. Otherwise
- * the config value is copied into whatever is pointed to by v. */
-#define virtio_config_val(vdev, fbit, offset, v) \
- virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v))
+static inline void virtio_get_features(struct virtio_device *vdev,
+ u64 *features_out)
+{
+ if (vdev->config->get_extended_features) {
+ vdev->config->get_extended_features(vdev, features_out);
+ return;
+ }
-#define virtio_config_val_len(vdev, fbit, offset, v, len) \
- virtio_config_buf((vdev), (fbit), (offset), (v), (len))
+ virtio_features_from_u64(features_out,
+ vdev->config->get_features(vdev));
+}
-static inline int virtio_config_buf(struct virtio_device *vdev,
- unsigned int fbit,
- unsigned int offset,
- void *buf, unsigned len)
+/**
+ * virtio_has_dma_quirk - determine whether this device has the DMA quirk
+ * @vdev: the device
+ */
+static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
{
- if (!virtio_has_feature(vdev, fbit))
- return -ENOENT;
+ /*
+ * Note the reverse polarity of the quirk feature (compared to most
+ * other features), this is for compatibility with legacy systems.
+ */
+ return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
+}
- vdev->config->get(vdev, offset, buf, len);
- return 0;
+static inline
+int virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ struct virtqueue_info vqs_info[],
+ struct irq_affinity *desc)
+{
+ return vdev->config->find_vqs(vdev, nvqs, vqs, vqs_info, desc);
}
static inline
struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
vq_callback_t *c, const char *n)
{
- vq_callback_t *callbacks[] = { c };
- const char *names[] = { n };
+ struct virtqueue_info vqs_info[] = {
+ { n, c },
+ };
struct virtqueue *vq;
- int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names);
+ int err = virtio_find_vqs(vdev, 1, &vq, vqs_info, NULL);
+
if (err < 0)
return ERR_PTR(err);
return vq;
}
+/**
+ * virtio_synchronize_cbs - synchronize with virtqueue callbacks
+ * @dev: the virtio device
+ */
+static inline
+void virtio_synchronize_cbs(struct virtio_device *dev)
+{
+ if (dev->config->synchronize_cbs) {
+ dev->config->synchronize_cbs(dev);
+ } else {
+ /*
+ * A best effort fallback to synchronize with
+ * interrupts, preemption and softirq disabled
+ * regions. See comment above synchronize_rcu().
+ */
+ synchronize_rcu();
+ }
+}
+
+/**
+ * virtio_device_ready - enable vq use in probe function
+ * @dev: the virtio device
+ *
+ * Driver must call this to use vqs in the probe function.
+ *
+ * Note: vqs are enabled automatically after probe returns.
+ */
+static inline
+void virtio_device_ready(struct virtio_device *dev)
+{
+ unsigned status = dev->config->get_status(dev);
+
+ WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ /*
+ * The virtio_synchronize_cbs() makes sure vring_interrupt()
+ * will see the driver specific setup if it sees vq->broken
+ * as false (even if the notifications come before DRIVER_OK).
+ */
+ virtio_synchronize_cbs(dev);
+ __virtio_unbreak_device(dev);
+#endif
+ /*
+ * The transport should ensure the visibility of vq->broken
+ * before setting DRIVER_OK. See the comments for the transport
+ * specific set_status() method.
+ *
+ * A well behaved device will only notify a virtqueue after
+ * DRIVER_OK, this means the device should "see" the coherent
+ * memory write that set vq->broken as false which is done by
+ * the driver when it sees DRIVER_OK, then the following
+ * driver's vring_interrupt() will see vq->broken as false so
+ * we won't lose any notification.
+ */
+ dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
+}
+
static inline
const char *virtio_bus_name(struct virtio_device *vdev)
{
@@ -147,20 +382,318 @@ const char *virtio_bus_name(struct virtio_device *vdev)
/**
* virtqueue_set_affinity - setting affinity for a virtqueue
* @vq: the virtqueue
- * @cpu: the cpu no.
+ * @cpu_mask: the cpu mask
*
- * Pay attention the function are best-effort: the affinity hint may not be set
+ * Note that this function is best-effort: the affinity hint may not be set
* due to config support, irq type and sharing.
*
*/
static inline
-int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
+int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
{
struct virtio_device *vdev = vq->vdev;
if (vdev->config->set_vq_affinity)
- return vdev->config->set_vq_affinity(vq, cpu);
+ return vdev->config->set_vq_affinity(vq, cpu_mask);
return 0;
}
+static inline
+bool virtio_get_shm_region(struct virtio_device *vdev,
+ struct virtio_shm_region *region_out, u8 id)
+{
+ if (!vdev->config->get_shm_region)
+ return false;
+ return vdev->config->get_shm_region(vdev, region_out, id);
+}
+
+static inline bool virtio_is_little_endian(struct virtio_device *vdev)
+{
+ return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
+ virtio_legacy_is_little_endian();
+}
+
+/* Memory accessors */
+static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
+{
+ return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
+}
+
+static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
+{
+ return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
+}
+
+static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
+{
+ return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
+}
+
+static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
+{
+ return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
+}
+
+static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
+{
+ return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
+}
+
+static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
+{
+ return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
+}
+
+#define virtio_to_cpu(vdev, x) \
+ _Generic((x), \
+ __u8: (x), \
+ __virtio16: virtio16_to_cpu((vdev), (x)), \
+ __virtio32: virtio32_to_cpu((vdev), (x)), \
+ __virtio64: virtio64_to_cpu((vdev), (x)) \
+ )
+
+#define cpu_to_virtio(vdev, x, m) \
+ _Generic((m), \
+ __u8: (x), \
+ __virtio16: cpu_to_virtio16((vdev), (x)), \
+ __virtio32: cpu_to_virtio32((vdev), (x)), \
+ __virtio64: cpu_to_virtio64((vdev), (x)) \
+ )
+
+#define __virtio_native_type(structname, member) \
+ typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
+
+/* Config space accessors. */
+#define virtio_cread(vdev, structname, member, ptr) \
+ do { \
+ typeof(((structname*)0)->member) virtio_cread_v; \
+ \
+ might_sleep(); \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
+ \
+ switch (sizeof(virtio_cread_v)) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ vdev->config->get((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ sizeof(virtio_cread_v)); \
+ break; \
+ default: \
+ __virtio_cread_many((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ 1, \
+ sizeof(virtio_cread_v)); \
+ break; \
+ } \
+ *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
+ } while(0)
+
+/* Config space accessors. */
+#define virtio_cwrite(vdev, structname, member, ptr) \
+ do { \
+ typeof(((structname*)0)->member) virtio_cwrite_v = \
+ cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
+ \
+ might_sleep(); \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
+ \
+ vdev->config->set((vdev), offsetof(structname, member), \
+ &virtio_cwrite_v, \
+ sizeof(virtio_cwrite_v)); \
+ } while(0)
+
+/*
+ * Nothing virtio-specific about these, but let's worry about generalizing
+ * these later.
+ */
+#define virtio_le_to_cpu(x) \
+ _Generic((x), \
+ __u8: (u8)(x), \
+ __le16: (u16)le16_to_cpu(x), \
+ __le32: (u32)le32_to_cpu(x), \
+ __le64: (u64)le64_to_cpu(x) \
+ )
+
+#define virtio_cpu_to_le(x, m) \
+ _Generic((m), \
+ __u8: (x), \
+ __le16: cpu_to_le16(x), \
+ __le32: cpu_to_le32(x), \
+ __le64: cpu_to_le64(x) \
+ )
+
+/* LE (e.g. modern) Config space accessors. */
+#define virtio_cread_le(vdev, structname, member, ptr) \
+ do { \
+ typeof(((structname*)0)->member) virtio_cread_v; \
+ \
+ might_sleep(); \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
+ \
+ switch (sizeof(virtio_cread_v)) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ vdev->config->get((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ sizeof(virtio_cread_v)); \
+ break; \
+ default: \
+ __virtio_cread_many((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ 1, \
+ sizeof(virtio_cread_v)); \
+ break; \
+ } \
+ *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
+ } while(0)
+
+#define virtio_cwrite_le(vdev, structname, member, ptr) \
+ do { \
+ typeof(((structname*)0)->member) virtio_cwrite_v = \
+ virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
+ \
+ might_sleep(); \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
+ \
+ vdev->config->set((vdev), offsetof(structname, member), \
+ &virtio_cwrite_v, \
+ sizeof(virtio_cwrite_v)); \
+ } while(0)
+
+
+/* Read @count fields, @bytes each. */
+static inline void __virtio_cread_many(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf, size_t count, size_t bytes)
+{
+ u32 old, gen = vdev->config->generation ?
+ vdev->config->generation(vdev) : 0;
+ int i;
+
+ might_sleep();
+ do {
+ old = gen;
+
+ for (i = 0; i < count; i++)
+ vdev->config->get(vdev, offset + bytes * i,
+ buf + i * bytes, bytes);
+
+ gen = vdev->config->generation ?
+ vdev->config->generation(vdev) : 0;
+ } while (gen != old);
+}
+
+static inline void virtio_cread_bytes(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf, size_t len)
+{
+ __virtio_cread_many(vdev, offset, buf, len, 1);
+}
+
+static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
+{
+ u8 ret;
+
+ might_sleep();
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
+static inline void virtio_cwrite8(struct virtio_device *vdev,
+ unsigned int offset, u8 val)
+{
+ might_sleep();
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u16 virtio_cread16(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ __virtio16 ret;
+
+ might_sleep();
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return virtio16_to_cpu(vdev, ret);
+}
+
+static inline void virtio_cwrite16(struct virtio_device *vdev,
+ unsigned int offset, u16 val)
+{
+ __virtio16 v;
+
+ might_sleep();
+ v = cpu_to_virtio16(vdev, val);
+ vdev->config->set(vdev, offset, &v, sizeof(v));
+}
+
+static inline u32 virtio_cread32(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ __virtio32 ret;
+
+ might_sleep();
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return virtio32_to_cpu(vdev, ret);
+}
+
+static inline void virtio_cwrite32(struct virtio_device *vdev,
+ unsigned int offset, u32 val)
+{
+ __virtio32 v;
+
+ might_sleep();
+ v = cpu_to_virtio32(vdev, val);
+ vdev->config->set(vdev, offset, &v, sizeof(v));
+}
+
+static inline u64 virtio_cread64(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ __virtio64 ret;
+
+ __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
+ return virtio64_to_cpu(vdev, ret);
+}
+
+static inline void virtio_cwrite64(struct virtio_device *vdev,
+ unsigned int offset, u64 val)
+{
+ __virtio64 v;
+
+ might_sleep();
+ v = cpu_to_virtio64(vdev, val);
+ vdev->config->set(vdev, offset, &v, sizeof(v));
+}
+
+/* Conditional config space accessors. */
+#define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
+ ({ \
+ int _r = 0; \
+ if (!virtio_has_feature(vdev, fbit)) \
+ _r = -ENOENT; \
+ else \
+ virtio_cread((vdev), structname, member, ptr); \
+ _r; \
+ })
+
+/* Conditional config space accessors. */
+#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
+ ({ \
+ int _r = 0; \
+ if (!virtio_has_feature(vdev, fbit)) \
+ _r = -ENOENT; \
+ else \
+ virtio_cread_le((vdev), structname, member, ptr); \
+ _r; \
+ })
#endif /* _LINUX_VIRTIO_CONFIG_H */