summaryrefslogtreecommitdiff
path: root/drivers/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/Kconfig91
-rw-r--r--drivers/virtio/Makefile7
-rw-r--r--drivers/virtio/virtio.c267
-rw-r--r--drivers/virtio/virtio_balloon.c234
-rw-r--r--drivers/virtio/virtio_debug.c117
-rw-r--r--drivers/virtio/virtio_dma_buf.c5
-rw-r--r--drivers/virtio/virtio_input.c14
-rw-r--r--drivers/virtio/virtio_mem.c388
-rw-r--r--drivers/virtio/virtio_mmio.c111
-rw-r--r--drivers/virtio/virtio_pci_admin_legacy_io.c244
-rw-r--r--drivers/virtio/virtio_pci_common.c335
-rw-r--r--drivers/virtio/virtio_pci_common.h76
-rw-r--r--drivers/virtio/virtio_pci_legacy.c1
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c4
-rw-r--r--drivers/virtio/virtio_pci_modern.c777
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c139
-rw-r--r--drivers/virtio/virtio_ring.c1237
-rw-r--r--drivers/virtio/virtio_rtc_arm.c23
-rw-r--r--drivers/virtio/virtio_rtc_class.c262
-rw-r--r--drivers/virtio/virtio_rtc_driver.c1407
-rw-r--r--drivers/virtio/virtio_rtc_internal.h122
-rw-r--r--drivers/virtio/virtio_rtc_ptp.c347
-rw-r--r--drivers/virtio/virtio_vdpa.c214
23 files changed, 5589 insertions, 833 deletions
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 0a53a61231c2..6db5235a7693 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -60,6 +60,11 @@ config VIRTIO_PCI
If unsure, say M.
+config VIRTIO_PCI_ADMIN_LEGACY
+ bool
+ depends on VIRTIO_PCI && (X86 || COMPILE_TEST)
+ default y
+
config VIRTIO_PCI_LEGACY
bool "Support for legacy virtio draft 0.9.X and older devices"
default y
@@ -117,7 +122,7 @@ config VIRTIO_BALLOON
config VIRTIO_MEM
tristate "Virtio mem driver"
- depends on X86_64 || ARM64
+ depends on X86_64 || ARM64 || RISCV || S390
depends on VIRTIO
depends on MEMORY_HOTPLUG
depends on MEMORY_HOTREMOVE
@@ -127,11 +132,11 @@ config VIRTIO_MEM
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
- This driver currently only supports x86-64 and arm64. Although it
- should compile on other architectures that implement memory
- hot(un)plug, architecture-specific and/or common
- code changes may be required for virtio-mem, kdump and kexec to work as
- expected.
+ This driver currently supports x86-64, arm64, riscv and s390.
+ Although it should compile on other architectures that implement
+ memory hot(un)plug, architecture-specific and/or common
+ code changes may be required for virtio-mem, kdump and kexec to
+ work as expected.
If unsure, say M.
@@ -173,4 +178,78 @@ config VIRTIO_DMA_SHARED_BUFFER
This option adds a flavor of dma buffers that are backed by
virtio resources.
+config VIRTIO_DEBUG
+ bool "Debug facilities"
+ depends on VIRTIO
+ help
+ Enable this to expose debug facilities over debugfs.
+ This allows to debug features, to see what features the device
+ advertises and to set filter for features used by driver.
+
+ If unsure, say N.
+
+config VIRTIO_RTC
+ tristate "Virtio RTC driver"
+ depends on VIRTIO
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver provides current time from a Virtio RTC device. The driver
+ provides the time through one or more clocks. The Virtio RTC PTP
+ clocks and/or the Real Time Clock driver for Virtio RTC must be
+ enabled to expose the clocks to userspace.
+
+ To compile this code as a module, choose M here: the module will be
+ called virtio_rtc.
+
+ If unsure, say M.
+
+if VIRTIO_RTC
+
+comment "WARNING: Consider enabling VIRTIO_RTC_PTP and/or VIRTIO_RTC_CLASS."
+ depends on !VIRTIO_RTC_PTP && !VIRTIO_RTC_CLASS
+
+comment "Enable PTP_1588_CLOCK in order to enable VIRTIO_RTC_PTP."
+ depends on PTP_1588_CLOCK=n
+
+config VIRTIO_RTC_PTP
+ bool "Virtio RTC PTP clocks"
+ default y
+ depends on PTP_1588_CLOCK
+ help
+ This exposes any Virtio RTC clocks as PTP Hardware Clocks (PHCs) to
+ userspace. The PHC sysfs attribute "clock_name" describes the clock
+ type.
+
+ If unsure, say Y.
+
+config VIRTIO_RTC_ARM
+ bool "Virtio RTC cross-timestamping using Arm Generic Timer"
+ default y
+ depends on VIRTIO_RTC_PTP && ARM_ARCH_TIMER
+ help
+ This enables Virtio RTC cross-timestamping using the Arm Generic Timer.
+ It only has an effect if the Virtio RTC device also supports this. The
+ cross-timestamp is available through the PTP clock driver precise
+ cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE2 aka
+ PTP_SYS_OFFSET_PRECISE).
+
+ If unsure, say Y.
+
+comment "Enable RTC_CLASS in order to enable VIRTIO_RTC_CLASS."
+ depends on RTC_CLASS=n
+
+config VIRTIO_RTC_CLASS
+ bool "Real Time Clock driver for Virtio RTC"
+ default y
+ depends on RTC_CLASS
+ help
+ This exposes the Virtio RTC UTC-like clock as a Linux Real Time Clock.
+ It only has an effect if the Virtio RTC device has a UTC-like clock
+ which smears leap seconds to avoid steps. The Real Time Clock is
+ read-only, and may support setting an alarm.
+
+ If unsure, say Y.
+
+endif # VIRTIO_RTC
+
endif # VIRTIO_MENU
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 8e98d24917cc..eefcfe90d6b8 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -7,8 +7,15 @@ obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
+virtio_pci-$(CONFIG_VIRTIO_PCI_ADMIN_LEGACY) += virtio_pci_admin_legacy_io.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o
obj-$(CONFIG_VIRTIO_DMA_SHARED_BUFFER) += virtio_dma_buf.o
+obj-$(CONFIG_VIRTIO_DEBUG) += virtio_debug.o
+obj-$(CONFIG_VIRTIO_RTC) += virtio_rtc.o
+virtio_rtc-y := virtio_rtc_driver.o
+virtio_rtc-$(CONFIG_VIRTIO_RTC_PTP) += virtio_rtc_ptp.o
+virtio_rtc-$(CONFIG_VIRTIO_RTC_ARM) += virtio_rtc_arm.o
+virtio_rtc-$(CONFIG_VIRTIO_RTC_CLASS) += virtio_rtc_class.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b9a80aedee1b..5bdc6b82b30b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -53,7 +53,7 @@ static ssize_t features_show(struct device *_d,
/* We actually represent this as a bitstring, as it could be
* arbitrary length in future. */
- for (i = 0; i < sizeof(dev->features)*8; i++)
+ for (i = 0; i < VIRTIO_FEATURES_BITS; i++)
len += sysfs_emit_at(buf, len, "%c",
__virtio_test_bit(dev, i) ? '1' : '0');
len += sysfs_emit_at(buf, len, "\n");
@@ -82,7 +82,7 @@ static inline int virtio_id_match(const struct virtio_device *dev,
/* This looks through all the IDs a driver claims to support. If any of them
* match, we return 1 and the kernel will call virtio_dev_probe(). */
-static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
+static int virtio_dev_match(struct device *_dv, const struct device_driver *_dr)
{
unsigned int i;
struct virtio_device *dev = dev_to_virtio(_dv);
@@ -95,9 +95,9 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
return 0;
}
-static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env)
+static int virtio_uevent(const struct device *_dv, struct kobj_uevent_env *env)
{
- struct virtio_device *dev = dev_to_virtio(_dv);
+ const struct virtio_device *dev = dev_to_virtio(_dv);
return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X",
dev->id.device, dev->id.vendor);
@@ -127,10 +127,12 @@ static void __virtio_config_changed(struct virtio_device *dev)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- if (!dev->config_enabled)
+ if (!dev->config_core_enabled || dev->config_driver_disabled)
dev->config_change_pending = true;
- else if (drv && drv->config_changed)
+ else if (drv && drv->config_changed) {
drv->config_changed(dev);
+ dev->config_change_pending = false;
+ }
}
void virtio_config_changed(struct virtio_device *dev)
@@ -143,20 +145,51 @@ void virtio_config_changed(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_config_changed);
-static void virtio_config_disable(struct virtio_device *dev)
+/**
+ * virtio_config_driver_disable - disable config change reporting by drivers
+ * @dev: the device to disable
+ *
+ * This is only allowed to be called by a driver and disabling can't
+ * be nested.
+ */
+void virtio_config_driver_disable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
- dev->config_enabled = false;
+ dev->config_driver_disabled = true;
spin_unlock_irq(&dev->config_lock);
}
+EXPORT_SYMBOL_GPL(virtio_config_driver_disable);
-static void virtio_config_enable(struct virtio_device *dev)
+/**
+ * virtio_config_driver_enable - enable config change reporting by drivers
+ * @dev: the device to enable
+ *
+ * This is only allowed to be called by a driver and enabling can't
+ * be nested.
+ */
+void virtio_config_driver_enable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
- dev->config_enabled = true;
+ dev->config_driver_disabled = false;
+ if (dev->config_change_pending)
+ __virtio_config_changed(dev);
+ spin_unlock_irq(&dev->config_lock);
+}
+EXPORT_SYMBOL_GPL(virtio_config_driver_enable);
+
+static void virtio_config_core_disable(struct virtio_device *dev)
+{
+ spin_lock_irq(&dev->config_lock);
+ dev->config_core_enabled = false;
+ spin_unlock_irq(&dev->config_lock);
+}
+
+static void virtio_config_core_enable(struct virtio_device *dev)
+{
+ spin_lock_irq(&dev->config_lock);
+ dev->config_core_enabled = true;
if (dev->config_change_pending)
__virtio_config_changed(dev);
- dev->config_change_pending = false;
spin_unlock_irq(&dev->config_lock);
}
@@ -239,22 +272,22 @@ static int virtio_dev_probe(struct device *_d)
int err, i;
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- u64 device_features;
- u64 driver_features;
+ u64 device_features[VIRTIO_FEATURES_U64S];
+ u64 driver_features[VIRTIO_FEATURES_U64S];
u64 driver_features_legacy;
/* We have a driver! */
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
/* Figure out what features the device supports. */
- device_features = dev->config->get_features(dev);
+ virtio_get_features(dev, device_features);
/* Figure out what features the driver supports. */
- driver_features = 0;
+ virtio_features_zero(driver_features);
for (i = 0; i < drv->feature_table_size; i++) {
unsigned int f = drv->feature_table[i];
- BUG_ON(f >= 64);
- driver_features |= (1ULL << f);
+ if (!WARN_ON_ONCE(f >= VIRTIO_FEATURES_BITS))
+ virtio_features_set_bit(driver_features, f);
}
/* Some drivers have a separate feature table for virtio v1.0 */
@@ -262,21 +295,29 @@ static int virtio_dev_probe(struct device *_d)
driver_features_legacy = 0;
for (i = 0; i < drv->feature_table_size_legacy; i++) {
unsigned int f = drv->feature_table_legacy[i];
- BUG_ON(f >= 64);
- driver_features_legacy |= (1ULL << f);
+ if (!WARN_ON_ONCE(f >= 64))
+ driver_features_legacy |= (1ULL << f);
}
} else {
- driver_features_legacy = driver_features;
+ driver_features_legacy = driver_features[0];
+ }
+
+ if (virtio_features_test_bit(device_features, VIRTIO_F_VERSION_1)) {
+ for (i = 0; i < VIRTIO_FEATURES_U64S; ++i)
+ dev->features_array[i] = driver_features[i] &
+ device_features[i];
+ } else {
+ virtio_features_from_u64(dev->features_array,
+ driver_features_legacy &
+ device_features[0]);
}
- if (device_features & (1ULL << VIRTIO_F_VERSION_1))
- dev->features = driver_features & device_features;
- else
- dev->features = driver_features_legacy & device_features;
+ /* When debugging, user may filter some features by hand. */
+ virtio_debug_device_filter_features(dev);
/* Transport features always preserved to pass to finalize_features. */
for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++)
- if (device_features & (1ULL << i))
+ if (virtio_features_test_bit(device_features, i))
__virtio_set_bit(dev, i);
err = dev->config->finalize_features(dev);
@@ -284,14 +325,15 @@ static int virtio_dev_probe(struct device *_d)
goto err;
if (drv->validate) {
- u64 features = dev->features;
+ u64 features[VIRTIO_FEATURES_U64S];
+ virtio_features_copy(features, dev->features_array);
err = drv->validate(dev);
if (err)
goto err;
/* Did validation change any features? Then write them again. */
- if (features != dev->features) {
+ if (!virtio_features_equal(features, dev->features_array)) {
err = dev->config->finalize_features(dev);
if (err)
goto err;
@@ -313,9 +355,10 @@ static int virtio_dev_probe(struct device *_d)
if (drv->scan)
drv->scan(dev);
- virtio_config_enable(dev);
+ virtio_config_core_enable(dev);
return 0;
+
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
@@ -327,7 +370,7 @@ static void virtio_dev_remove(struct device *_d)
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- virtio_config_disable(dev);
+ virtio_config_core_disable(dev);
drv->remove(dev);
@@ -340,23 +383,79 @@ static void virtio_dev_remove(struct device *_d)
of_node_put(dev->dev.of_node);
}
-static struct bus_type virtio_bus = {
+/*
+ * virtio_irq_get_affinity - get IRQ affinity mask for device
+ * @_d: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @_d and @irq_vec.
+ */
+static const struct cpumask *virtio_irq_get_affinity(struct device *_d,
+ unsigned int irq_vec)
+{
+ struct virtio_device *dev = dev_to_virtio(_d);
+
+ if (!dev->config->get_vq_affinity)
+ return NULL;
+
+ return dev->config->get_vq_affinity(dev, irq_vec);
+}
+
+static void virtio_dev_shutdown(struct device *_d)
+{
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+
+ /*
+ * Stop accesses to or from the device.
+ * We only need to do it if there's a driver - no accesses otherwise.
+ */
+ if (!drv)
+ return;
+
+ /* If the driver has its own shutdown method, use that. */
+ if (drv->shutdown) {
+ drv->shutdown(dev);
+ return;
+ }
+
+ /*
+ * Some devices get wedged if you kick them after they are
+ * reset. Mark all vqs as broken to make sure we don't.
+ */
+ virtio_break_device(dev);
+ /*
+ * Guarantee that any callback will see vq->broken as true.
+ */
+ virtio_synchronize_cbs(dev);
+ /*
+ * As IOMMUs are reset on shutdown, this will block device access to memory.
+ * Some devices get wedged if this happens, so reset to make sure it does not.
+ */
+ dev->config->reset(dev);
+}
+
+static const struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
.dev_groups = virtio_dev_groups,
.uevent = virtio_uevent,
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
+ .irq_get_affinity = virtio_irq_get_affinity,
+ .shutdown = virtio_dev_shutdown,
};
-int register_virtio_driver(struct virtio_driver *driver)
+int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
{
/* Catch this early. */
BUG_ON(driver->feature_table_size && !driver->feature_table);
driver->driver.bus = &virtio_bus;
+ driver->driver.owner = owner;
+
return driver_register(&driver->driver);
}
-EXPORT_SYMBOL_GPL(register_virtio_driver);
+EXPORT_SYMBOL_GPL(__register_virtio_driver);
void unregister_virtio_driver(struct virtio_driver *driver)
{
@@ -413,7 +512,7 @@ out:
* On error, the caller must call put_device on &@dev->dev (and not kfree),
* as another code path may have obtained a reference to @dev.
*
- * Returns: 0 on suceess, -error on failure
+ * Returns: 0 on success, -error on failure
*/
int register_virtio_device(struct virtio_device *dev)
{
@@ -437,7 +536,8 @@ int register_virtio_device(struct virtio_device *dev)
goto out_ida_remove;
spin_lock_init(&dev->config_lock);
- dev->config_enabled = false;
+ dev->config_driver_disabled = false;
+ dev->config_core_enabled = false;
dev->config_change_pending = false;
INIT_LIST_HEAD(&dev->vqs);
@@ -450,6 +550,8 @@ int register_virtio_device(struct virtio_device *dev)
/* Acknowledge that we've seen the device. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ virtio_debug_device_init(dev);
+
/*
* device_add() causes the bus infrastructure to look for a matching
* driver.
@@ -481,27 +583,12 @@ void unregister_virtio_device(struct virtio_device *dev)
int index = dev->index; /* save for after device release */
device_unregister(&dev->dev);
+ virtio_debug_device_exit(dev);
ida_free(&virtio_index_ida, index);
}
EXPORT_SYMBOL_GPL(unregister_virtio_device);
-#ifdef CONFIG_PM_SLEEP
-int virtio_device_freeze(struct virtio_device *dev)
-{
- struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
-
- virtio_config_disable(dev);
-
- dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
-
- if (drv && drv->freeze)
- return drv->freeze(dev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(virtio_device_freeze);
-
-int virtio_device_restore(struct virtio_device *dev)
+static int virtio_device_restore_priv(struct virtio_device *dev, bool restore)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
int ret;
@@ -532,8 +619,14 @@ int virtio_device_restore(struct virtio_device *dev)
if (ret)
goto err;
- if (drv->restore) {
- ret = drv->restore(dev);
+ if (restore) {
+ if (drv->restore) {
+ ret = drv->restore(dev);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = drv->reset_done(dev);
if (ret)
goto err;
}
@@ -542,7 +635,7 @@ int virtio_device_restore(struct virtio_device *dev)
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
virtio_device_ready(dev);
- virtio_config_enable(dev);
+ virtio_config_core_enable(dev);
return 0;
@@ -550,22 +643,88 @@ err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return ret;
}
+
+#ifdef CONFIG_PM_SLEEP
+int virtio_device_freeze(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
+
+ virtio_config_core_disable(dev);
+
+ dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+
+ if (drv && drv->freeze) {
+ ret = drv->freeze(dev);
+ if (ret) {
+ virtio_config_core_enable(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_device_freeze);
+
+int virtio_device_restore(struct virtio_device *dev)
+{
+ return virtio_device_restore_priv(dev, true);
+}
EXPORT_SYMBOL_GPL(virtio_device_restore);
#endif
+int virtio_device_reset_prepare(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
+
+ if (!drv || !drv->reset_prepare)
+ return -EOPNOTSUPP;
+
+ virtio_config_core_disable(dev);
+
+ dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+
+ ret = drv->reset_prepare(dev);
+ if (ret) {
+ virtio_config_core_enable(dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_device_reset_prepare);
+
+int virtio_device_reset_done(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+
+ if (!drv || !drv->reset_done)
+ return -EOPNOTSUPP;
+
+ return virtio_device_restore_priv(dev, false);
+}
+EXPORT_SYMBOL_GPL(virtio_device_reset_done);
+
static int virtio_init(void)
{
+ BUILD_BUG_ON(offsetof(struct virtio_device, features) !=
+ offsetof(struct virtio_device, features_array[0]));
+
if (bus_register(&virtio_bus) != 0)
panic("virtio bus registration failed");
+ virtio_debug_init();
return 0;
}
static void __exit virtio_exit(void)
{
+ virtio_debug_exit();
bus_unregister(&virtio_bus);
ida_destroy(&virtio_index_ida);
}
core_initcall(virtio_init);
module_exit(virtio_exit);
+MODULE_DESCRIPTION("Virtio core interface");
MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 3f78a3a1eb75..74fe59f5a78c 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -33,7 +33,7 @@
#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
__GFP_NOMEMALLOC)
/* The order of free page blocks to report to host */
-#define VIRTIO_BALLOON_HINT_BLOCK_ORDER (MAX_ORDER - 1)
+#define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_PAGE_ORDER
/* The size of a free page block in bytes */
#define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
(1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
@@ -111,7 +111,7 @@ struct virtio_balloon {
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
/* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
- struct shrinker shrinker;
+ struct shrinker *shrinker;
/* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
struct notifier_block oom_nb;
@@ -119,8 +119,16 @@ struct virtio_balloon {
/* Free page reporting device */
struct virtqueue *reporting_vq;
struct page_reporting_dev_info pr_dev_info;
+
+ /* State for keeping the wakeup_source active while adjusting the balloon */
+ spinlock_t wakeup_lock;
+ bool processing_wakeup_event;
+ u32 wakeup_signal_mask;
};
+#define VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST (1 << 0)
+#define VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS (1 << 1)
+
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -135,6 +143,36 @@ static u32 page_to_balloon_pfn(struct page *page)
return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
}
+static void start_wakeup_event(struct virtio_balloon *vb, u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vb->wakeup_lock, flags);
+ vb->wakeup_signal_mask |= mask;
+ if (!vb->processing_wakeup_event) {
+ vb->processing_wakeup_event = true;
+ pm_stay_awake(&vb->vdev->dev);
+ }
+ spin_unlock_irqrestore(&vb->wakeup_lock, flags);
+}
+
+static void process_wakeup_event(struct virtio_balloon *vb, u32 mask)
+{
+ spin_lock_irq(&vb->wakeup_lock);
+ vb->wakeup_signal_mask &= ~mask;
+ spin_unlock_irq(&vb->wakeup_lock);
+}
+
+static void finish_wakeup_event(struct virtio_balloon *vb)
+{
+ spin_lock_irq(&vb->wakeup_lock);
+ if (!vb->wakeup_signal_mask && vb->processing_wakeup_event) {
+ vb->processing_wakeup_event = false;
+ pm_relax(&vb->vdev->dev);
+ }
+ spin_unlock_irq(&vb->wakeup_lock);
+}
+
static void balloon_ack(struct virtqueue *vq)
{
struct virtio_balloon *vb = vq->vdev->priv;
@@ -167,7 +205,7 @@ static int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_i
unsigned int unused, err;
/* We should always be able to add these buffers to an empty queue. */
- err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN);
+ err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT);
/*
* In the extremely unlikely case that something has occurred and we
@@ -213,7 +251,7 @@ static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num)
for (num_pfns = 0; num_pfns < num;
num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- struct page *page = balloon_page_alloc();
+ page = balloon_page_alloc();
if (!page) {
dev_info_ratelimited(&vb->vdev->dev,
@@ -311,34 +349,67 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
-static unsigned int update_balloon_stats(struct virtio_balloon *vb)
+#ifdef CONFIG_VM_EVENT_COUNTERS
+/* Return the number of entries filled by vm events */
+static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
{
unsigned long events[NR_VM_EVENT_ITEMS];
- struct sysinfo i;
unsigned int idx = 0;
- long available;
- unsigned long caches;
+ unsigned int zid;
+ unsigned long stall = 0;
all_vm_events(events);
- si_meminfo(&i);
-
- available = si_mem_available();
- caches = global_node_page_state(NR_FILE_PAGES);
-
-#ifdef CONFIG_VM_EVENT_COUNTERS
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
- pages_to_bytes(events[PSWPIN]));
+ pages_to_bytes(events[PSWPIN]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
- pages_to_bytes(events[PSWPOUT]));
+ pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_OOM_KILL, events[OOM_KILL]);
+
+ /* sum all the stall events */
+ for (zid = 0; zid < MAX_NR_ZONES; zid++)
+ stall += events[ALLOCSTALL_NORMAL - ZONE_NORMAL + zid];
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ALLOC_STALL, stall);
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_SCAN,
+ pages_to_bytes(events[PGSCAN_KSWAPD]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_SCAN,
+ pages_to_bytes(events[PGSCAN_DIRECT]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_RECLAIM,
+ pages_to_bytes(events[PGSTEAL_KSWAPD]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_RECLAIM,
+ pages_to_bytes(events[PGSTEAL_DIRECT]));
+
#ifdef CONFIG_HUGETLB_PAGE
update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC,
events[HTLB_BUDDY_PGALLOC]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGFAIL,
events[HTLB_BUDDY_PGALLOC_FAIL]);
-#endif
-#endif
+#endif /* CONFIG_HUGETLB_PAGE */
+
+ return idx;
+}
+#else /* CONFIG_VM_EVENT_COUNTERS */
+static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
+{
+ return 0;
+}
+#endif /* CONFIG_VM_EVENT_COUNTERS */
+
+static unsigned int update_balloon_stats(struct virtio_balloon *vb)
+{
+ struct sysinfo i;
+ unsigned int idx;
+ long available;
+ unsigned long caches;
+
+ idx = update_balloon_vm_stats(vb);
+
+ si_meminfo(&i);
+ available = si_mem_available();
+ caches = global_node_page_state(NR_FILE_PAGES);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
pages_to_bytes(i.freeram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
@@ -365,8 +436,10 @@ static void stats_request(struct virtqueue *vq)
struct virtio_balloon *vb = vq->vdev->priv;
spin_lock(&vb->stop_update_lock);
- if (!vb->stop_update)
+ if (!vb->stop_update) {
+ start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS);
queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
+ }
spin_unlock(&vb->stop_update_lock);
}
@@ -395,7 +468,11 @@ static inline s64 towards_target(struct virtio_balloon *vb)
virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
&num_pages);
- target = num_pages;
+ /*
+ * Aligned up to guest page size to avoid inflating and deflating
+ * balloon endlessly.
+ */
+ target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
return target - vb->num_pages;
}
@@ -411,8 +488,7 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
page = balloon_page_pop(&vb->free_page_list);
if (!page)
break;
- free_pages((unsigned long)page_address(page),
- VIRTIO_BALLOON_HINT_BLOCK_ORDER);
+ __free_pages(page, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
}
vb->num_free_page_blocks -= num_returned;
spin_unlock_irq(&vb->free_page_list_lock);
@@ -433,6 +509,12 @@ static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
queue_work(vb->balloon_wq, &vb->report_free_page_work);
}
+static void start_update_balloon_size(struct virtio_balloon *vb)
+{
+ start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST);
+ queue_work(system_freezable_wq, &vb->update_balloon_size_work);
+}
+
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@@ -440,8 +522,7 @@ static void virtballoon_changed(struct virtio_device *vdev)
spin_lock_irqsave(&vb->stop_update_lock, flags);
if (!vb->stop_update) {
- queue_work(system_freezable_wq,
- &vb->update_balloon_size_work);
+ start_update_balloon_size(vb);
virtio_balloon_queue_free_page_work(vb);
}
spin_unlock_irqrestore(&vb->stop_update_lock, flags);
@@ -462,7 +543,10 @@ static void update_balloon_stats_func(struct work_struct *work)
vb = container_of(work, struct virtio_balloon,
update_balloon_stats_work);
+
+ process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS);
stats_handle_request(vb);
+ finish_wakeup_event(vb);
}
static void update_balloon_size_func(struct work_struct *work)
@@ -472,26 +556,29 @@ static void update_balloon_size_func(struct work_struct *work)
vb = container_of(work, struct virtio_balloon,
update_balloon_size_work);
- diff = towards_target(vb);
- if (!diff)
- return;
+ process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST);
- if (diff > 0)
- diff -= fill_balloon(vb, diff);
- else
- diff += leak_balloon(vb, -diff);
- update_balloon_size(vb);
+ diff = towards_target(vb);
+
+ if (diff) {
+ if (diff > 0)
+ diff -= fill_balloon(vb, diff);
+ else
+ diff += leak_balloon(vb, -diff);
+ update_balloon_size(vb);
+ }
if (diff)
queue_work(system_freezable_wq, work);
+ else
+ finish_wakeup_event(vb);
}
static int init_vqs(struct virtio_balloon *vb)
{
+ struct virtqueue_info vqs_info[VIRTIO_BALLOON_VQ_MAX] = {};
struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX];
- vq_callback_t *callbacks[VIRTIO_BALLOON_VQ_MAX];
- const char *names[VIRTIO_BALLOON_VQ_MAX];
int err;
/*
@@ -499,33 +586,26 @@ static int init_vqs(struct virtio_balloon *vb)
* will be NULL if the related feature is not enabled, which will
* cause no allocation for the corresponding virtqueue in find_vqs.
*/
- callbacks[VIRTIO_BALLOON_VQ_INFLATE] = balloon_ack;
- names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate";
- callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack;
- names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate";
- callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL;
- names[VIRTIO_BALLOON_VQ_STATS] = NULL;
- callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
- names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
- names[VIRTIO_BALLOON_VQ_REPORTING] = NULL;
+ vqs_info[VIRTIO_BALLOON_VQ_INFLATE].callback = balloon_ack;
+ vqs_info[VIRTIO_BALLOON_VQ_INFLATE].name = "inflate";
+ vqs_info[VIRTIO_BALLOON_VQ_DEFLATE].callback = balloon_ack;
+ vqs_info[VIRTIO_BALLOON_VQ_DEFLATE].name = "deflate";
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
- names[VIRTIO_BALLOON_VQ_STATS] = "stats";
- callbacks[VIRTIO_BALLOON_VQ_STATS] = stats_request;
+ vqs_info[VIRTIO_BALLOON_VQ_STATS].name = "stats";
+ vqs_info[VIRTIO_BALLOON_VQ_STATS].callback = stats_request;
}
- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
- names[VIRTIO_BALLOON_VQ_FREE_PAGE] = "free_page_vq";
- callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
- }
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ vqs_info[VIRTIO_BALLOON_VQ_FREE_PAGE].name = "free_page_vq";
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
- names[VIRTIO_BALLOON_VQ_REPORTING] = "reporting_vq";
- callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack;
+ vqs_info[VIRTIO_BALLOON_VQ_REPORTING].name = "reporting_vq";
+ vqs_info[VIRTIO_BALLOON_VQ_REPORTING].callback = balloon_ack;
}
err = virtio_find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs,
- callbacks, names, NULL);
+ vqs_info, NULL);
if (err)
return err;
@@ -638,8 +718,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb)
if (vq->num_free > 1) {
err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
if (unlikely(err)) {
- free_pages((unsigned long)p,
- VIRTIO_BALLOON_HINT_BLOCK_ORDER);
+ __free_pages(page, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
return err;
}
virtqueue_kick(vq);
@@ -652,7 +731,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb)
* The vq has no available entry to add this page block, so
* just free it.
*/
- free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
+ __free_pages(page, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
}
return 0;
@@ -741,7 +820,7 @@ static void report_free_page_func(struct work_struct *work)
* 2) update the host about the old page removed from vb->pages list;
*
* This function preforms the balloon page migration task.
- * Called through balloon_mapping->a_ops->migratepage
+ * Called through movable_operations->migrate_page
*/
static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
struct page *newpage, struct page *page, enum migrate_mode mode)
@@ -785,18 +864,16 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
tell_host(vb, vb->inflate_vq);
/* balloon's page migration 2nd step -- deflate "page" */
- spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
- balloon_page_delete(page);
- spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
set_page_pfns(vb, vb->pfns, page);
tell_host(vb, vb->deflate_vq);
mutex_unlock(&vb->balloon_lock);
+ balloon_page_finalize(page);
put_page(page); /* balloon reference */
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
#endif /* CONFIG_BALLOON_COMPACTION */
@@ -816,8 +893,7 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
- struct virtio_balloon *vb = container_of(shrinker,
- struct virtio_balloon, shrinker);
+ struct virtio_balloon *vb = shrinker->private_data;
return shrink_free_pages(vb, sc->nr_to_scan);
}
@@ -825,8 +901,7 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
struct shrink_control *sc)
{
- struct virtio_balloon *vb = container_of(shrinker,
- struct virtio_balloon, shrinker);
+ struct virtio_balloon *vb = shrinker->private_data;
return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
}
@@ -847,16 +922,22 @@ static int virtio_balloon_oom_notify(struct notifier_block *nb,
static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
{
- unregister_shrinker(&vb->shrinker);
+ shrinker_free(vb->shrinker);
}
static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
{
- vb->shrinker.scan_objects = virtio_balloon_shrinker_scan;
- vb->shrinker.count_objects = virtio_balloon_shrinker_count;
- vb->shrinker.seeks = DEFAULT_SEEKS;
+ vb->shrinker = shrinker_alloc(0, "virtio-balloon");
+ if (!vb->shrinker)
+ return -ENOMEM;
+
+ vb->shrinker->scan_objects = virtio_balloon_shrinker_scan;
+ vb->shrinker->count_objects = virtio_balloon_shrinker_count;
+ vb->shrinker->private_data = vb;
- return register_shrinker(&vb->shrinker, "virtio-balloon");
+ shrinker_register(vb->shrinker);
+
+ return 0;
}
static int virtballoon_probe(struct virtio_device *vdev)
@@ -902,7 +983,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_del_vqs;
}
vb->balloon_wq = alloc_workqueue("balloon-wq",
- WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
+ WQ_FREEZABLE | WQ_CPU_INTENSIVE | WQ_PERCPU,
+ 0);
if (!vb->balloon_wq) {
err = -ENOMEM;
goto out_del_vqs;
@@ -984,6 +1066,17 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_unregister_oom;
}
+ spin_lock_init(&vb->wakeup_lock);
+
+ /*
+ * The virtio balloon itself can't wake up the device, but it is
+ * responsible for processing wakeup events passed up from the transport
+ * layer. Wakeup sources don't support nesting/chaining calls, so we use
+ * our own wakeup source to ensure wakeup events are properly handled
+ * without trampling on the transport layer's wakeup source.
+ */
+ device_set_wakeup_capable(&vb->vdev->dev, true);
+
virtio_device_ready(vdev);
if (towards_target(vb))
@@ -1110,7 +1203,6 @@ static struct virtio_driver virtio_balloon_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.validate = virtballoon_validate,
.probe = virtballoon_probe,
diff --git a/drivers/virtio/virtio_debug.c b/drivers/virtio/virtio_debug.c
new file mode 100644
index 000000000000..ccf1955a1183
--- /dev/null
+++ b/drivers/virtio/virtio_debug.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/debugfs.h>
+
+static struct dentry *virtio_debugfs_dir;
+
+static int virtio_debug_device_features_show(struct seq_file *s, void *data)
+{
+ u64 device_features[VIRTIO_FEATURES_U64S];
+ struct virtio_device *dev = s->private;
+ unsigned int i;
+
+ virtio_get_features(dev, device_features);
+ for (i = 0; i < VIRTIO_FEATURES_BITS; i++) {
+ if (virtio_features_test_bit(device_features, i))
+ seq_printf(s, "%u\n", i);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(virtio_debug_device_features);
+
+static int virtio_debug_filter_features_show(struct seq_file *s, void *data)
+{
+ struct virtio_device *dev = s->private;
+ unsigned int i;
+
+ for (i = 0; i < VIRTIO_FEATURES_BITS; i++) {
+ if (virtio_features_test_bit(dev->debugfs_filter_features, i))
+ seq_printf(s, "%u\n", i);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(virtio_debug_filter_features);
+
+static int virtio_debug_filter_features_clear(void *data, u64 val)
+{
+ struct virtio_device *dev = data;
+
+ if (val == 1)
+ virtio_features_zero(dev->debugfs_filter_features);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(virtio_debug_filter_features_clear_fops, NULL,
+ virtio_debug_filter_features_clear, "%llu\n");
+
+static int virtio_debug_filter_feature_add(void *data, u64 val)
+{
+ struct virtio_device *dev = data;
+
+ if (val >= VIRTIO_FEATURES_BITS)
+ return -EINVAL;
+
+ virtio_features_set_bit(dev->debugfs_filter_features, val);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(virtio_debug_filter_feature_add_fops, NULL,
+ virtio_debug_filter_feature_add, "%llu\n");
+
+static int virtio_debug_filter_feature_del(void *data, u64 val)
+{
+ struct virtio_device *dev = data;
+
+ if (val >= VIRTIO_FEATURES_BITS)
+ return -EINVAL;
+
+ virtio_features_clear_bit(dev->debugfs_filter_features, val);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(virtio_debug_filter_feature_del_fops, NULL,
+ virtio_debug_filter_feature_del, "%llu\n");
+
+void virtio_debug_device_init(struct virtio_device *dev)
+{
+ dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->dev),
+ virtio_debugfs_dir);
+ debugfs_create_file("device_features", 0400, dev->debugfs_dir, dev,
+ &virtio_debug_device_features_fops);
+ debugfs_create_file("filter_features", 0400, dev->debugfs_dir, dev,
+ &virtio_debug_filter_features_fops);
+ debugfs_create_file("filter_features_clear", 0200, dev->debugfs_dir, dev,
+ &virtio_debug_filter_features_clear_fops);
+ debugfs_create_file("filter_feature_add", 0200, dev->debugfs_dir, dev,
+ &virtio_debug_filter_feature_add_fops);
+ debugfs_create_file("filter_feature_del", 0200, dev->debugfs_dir, dev,
+ &virtio_debug_filter_feature_del_fops);
+}
+EXPORT_SYMBOL_GPL(virtio_debug_device_init);
+
+void virtio_debug_device_filter_features(struct virtio_device *dev)
+{
+ virtio_features_andnot(dev->features_array, dev->features_array,
+ dev->debugfs_filter_features);
+}
+EXPORT_SYMBOL_GPL(virtio_debug_device_filter_features);
+
+void virtio_debug_device_exit(struct virtio_device *dev)
+{
+ debugfs_remove_recursive(dev->debugfs_dir);
+}
+EXPORT_SYMBOL_GPL(virtio_debug_device_exit);
+
+void virtio_debug_init(void)
+{
+ virtio_debugfs_dir = debugfs_create_dir("virtio", NULL);
+}
+EXPORT_SYMBOL_GPL(virtio_debug_init);
+
+void virtio_debug_exit(void)
+{
+ debugfs_remove_recursive(virtio_debugfs_dir);
+}
+EXPORT_SYMBOL_GPL(virtio_debug_exit);
diff --git a/drivers/virtio/virtio_dma_buf.c b/drivers/virtio/virtio_dma_buf.c
index 2521a75009c3..95c10632f84a 100644
--- a/drivers/virtio/virtio_dma_buf.c
+++ b/drivers/virtio/virtio_dma_buf.c
@@ -36,6 +36,8 @@ EXPORT_SYMBOL(virtio_dma_buf_export);
/**
* virtio_dma_buf_attach - mandatory attach callback for virtio dma-bufs
+ * @dma_buf: [in] buffer to attach
+ * @attach: [in] attachment structure
*/
int virtio_dma_buf_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
@@ -85,5 +87,6 @@ int virtio_dma_buf_get_uuid(struct dma_buf *dma_buf,
}
EXPORT_SYMBOL(virtio_dma_buf_get_uuid);
+MODULE_DESCRIPTION("dma-bufs for virtio exported objects");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 3aa46703872d..d0728285b6ce 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -185,13 +185,14 @@ static void virtinput_cfg_abs(struct virtio_input *vi, int abs)
static int virtinput_init_vqs(struct virtio_input *vi)
{
+ struct virtqueue_info vqs_info[] = {
+ { "events", virtinput_recv_events },
+ { "status", virtinput_recv_status },
+ };
struct virtqueue *vqs[2];
- vq_callback_t *cbs[] = { virtinput_recv_events,
- virtinput_recv_status };
- static const char * const names[] = { "events", "status" };
int err;
- err = virtio_find_vqs(vi->vdev, 2, vqs, cbs, names, NULL);
+ err = virtio_find_vqs(vi->vdev, 2, vqs, vqs_info, NULL);
if (err)
return err;
vi->evt = vqs[0];
@@ -359,11 +360,15 @@ static int virtinput_freeze(struct virtio_device *vdev)
{
struct virtio_input *vi = vdev->priv;
unsigned long flags;
+ void *buf;
spin_lock_irqsave(&vi->lock, flags);
vi->ready = false;
spin_unlock_irqrestore(&vi->lock, flags);
+ virtio_reset_device(vdev);
+ while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
+ kfree(buf);
vdev->config->del_vqs(vdev);
return 0;
}
@@ -394,7 +399,6 @@ static const struct virtio_device_id id_table[] = {
static struct virtio_driver virtio_input_driver = {
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.id_table = id_table,
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 0c2892ec6817..1688ecd69a04 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -21,6 +21,8 @@
#include <linux/bitmap.h>
#include <linux/lockdep.h>
#include <linux/log2.h>
+#include <linux/vmalloc.h>
+#include <linux/suspend.h>
#include <acpi/acpi_numa.h>
@@ -38,11 +40,6 @@ module_param(bbm_block_size, ulong, 0444);
MODULE_PARM_DESC(bbm_block_size,
"Big Block size in bytes. Default is 0 (auto-detection).");
-static bool bbm_safe_unplug = true;
-module_param(bbm_safe_unplug, bool, 0444);
-MODULE_PARM_DESC(bbm_safe_unplug,
- "Use a safe unplug mechanism in BBM, avoiding long/endless loops");
-
/*
* virtio-mem currently supports the following modes of operation:
*
@@ -136,6 +133,8 @@ struct virtio_mem {
uint64_t addr;
/* Maximum region size in bytes. */
uint64_t region_size;
+ /* Usable region size in bytes. */
+ uint64_t usable_region_size;
/* The parent resource for all memory added via this device. */
struct resource *parent_resource;
@@ -173,6 +172,13 @@ struct virtio_mem {
/* The number of subblocks per Linux memory block. */
uint32_t sbs_per_mb;
+ /*
+ * Some of the Linux memory blocks tracked as "partially
+ * plugged" are completely unplugged and can be offlined
+ * and removed -- which previously failed.
+ */
+ bool have_unplugged_mb;
+
/* Summary of all memory block states. */
unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
@@ -250,6 +256,9 @@ struct virtio_mem {
/* Memory notifier (online/offline events). */
struct notifier_block memory_notifier;
+ /* Notifier to block hibernation image storing/reloading. */
+ struct notifier_block pm_notifier;
+
#ifdef CONFIG_PROC_VMCORE
/* vmcore callback for /proc/vmcore handling in kdump mode */
struct vmcore_cb vmcore_cb;
@@ -746,11 +755,15 @@ static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
* immediately instead of waiting.
*/
virtio_mem_retry(vm);
- } else {
- dev_dbg(&vm->vdev->dev,
- "offlining and removing memory failed: %d\n", rc);
+ return 0;
}
- return rc;
+ dev_dbg(&vm->vdev->dev, "offlining and removing memory failed: %d\n", rc);
+ /*
+ * We don't really expect this to fail, because we fake-offlined all
+ * memory already. But it could fail in corner cases.
+ */
+ WARN_ON_ONCE(rc != -ENOMEM && rc != -EBUSY);
+ return rc == -ENOMEM ? -ENOMEM : -EBUSY;
}
/*
@@ -767,6 +780,34 @@ static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
}
/*
+ * Try (offlining and) removing memory from Linux in case all subblocks are
+ * unplugged. Can be called on online and offline memory blocks.
+ *
+ * May modify the state of memory blocks in virtio-mem.
+ */
+static int virtio_mem_sbm_try_remove_unplugged_mb(struct virtio_mem *vm,
+ unsigned long mb_id)
+{
+ int rc;
+
+ /*
+ * Once all subblocks of a memory block were unplugged, offline and
+ * remove it.
+ */
+ if (!virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
+ return 0;
+
+ /* offline_and_remove_memory() works for online and offline memory. */
+ mutex_unlock(&vm->hotplug_mutex);
+ rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
+ mutex_lock(&vm->hotplug_mutex);
+ if (!rc)
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_UNUSED);
+ return rc;
+}
+
+/*
* See virtio_mem_offline_and_remove_memory(): Try to offline and remove a
* all Linux memory blocks covered by the big block.
*/
@@ -1077,6 +1118,25 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
return rc;
}
+static int virtio_mem_pm_notifier_cb(struct notifier_block *nb,
+ unsigned long action, void *arg)
+{
+ struct virtio_mem *vm = container_of(nb, struct virtio_mem,
+ pm_notifier);
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ /*
+ * When restarting the VM, all memory is unplugged. Don't
+ * allow to hibernate and restore from an image.
+ */
+ dev_err(&vm->vdev->dev, "hibernation is not supported.\n");
+ return NOTIFY_BAD;
+ default:
+ return NOTIFY_OK;
+ }
+}
+
/*
* Set a range of pages PG_offline. Remember pages that were never onlined
* (via generic_online_page()) using PageDirty().
@@ -1088,12 +1148,16 @@ static void virtio_mem_set_fake_offline(unsigned long pfn,
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
- __SetPageOffline(page);
- if (!onlined) {
+ if (!onlined)
+ /*
+ * Pages that have not been onlined yet were initialized
+ * to PageOffline(). Remember that we have to route them
+ * through generic_online_page().
+ */
SetPageDirty(page);
- /* FIXME: remove after cleanups */
- ClearPageReserved(page);
- }
+ else
+ __SetPageOffline(page);
+ VM_WARN_ON_ONCE(!PageOffline(page));
}
page_offline_end();
}
@@ -1108,9 +1172,11 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
- __ClearPageOffline(page);
if (!onlined)
+ /* generic_online_page() will clear PageOffline(). */
ClearPageDirty(page);
+ else
+ __ClearPageOffline(page);
}
}
@@ -1120,13 +1186,13 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
*/
static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
{
- unsigned long order = MAX_ORDER - 1;
+ unsigned long order = MAX_PAGE_ORDER;
unsigned long i;
/*
* We might get called for ranges that don't cover properly aligned
- * MAX_ORDER - 1 pages; however, we can only online properly aligned
- * pages with an order of MAX_ORDER - 1 at maximum.
+ * MAX_PAGE_ORDER pages; however, we can only online properly aligned
+ * pages with an order of MAX_PAGE_ORDER at maximum.
*/
while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
order--;
@@ -1155,7 +1221,8 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
* Try to allocate a range, marking pages fake-offline, effectively
* fake-offlining them.
*/
-static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
+static int virtio_mem_fake_offline(struct virtio_mem *vm, unsigned long pfn,
+ unsigned long nr_pages)
{
const bool is_movable = is_zone_movable_page(pfn_to_page(pfn));
int rc, retry_count;
@@ -1168,7 +1235,15 @@ static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
* some guarantees.
*/
for (retry_count = 0; retry_count < 5; retry_count++) {
- rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
+ /*
+ * If the config changed, stop immediately and go back to the
+ * main loop: avoid trying to keep unplugging if the device
+ * might have decided to not remove any more memory.
+ */
+ if (atomic_read(&vm->config_changed))
+ return -EAGAIN;
+
+ rc = alloc_contig_range(pfn, pfn + nr_pages, ACR_FLAGS_NONE,
GFP_KERNEL);
if (rc == -ENOMEM)
/* whoops, out of memory */
@@ -1196,12 +1271,6 @@ static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
struct page *page;
unsigned long i;
- /*
- * Drop our reference to the pages so the memory can get offlined
- * and add the unplugged pages to the managed page counters (so
- * offlining code can correctly subtract them again).
- */
- adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
/* Drop our reference to the pages so the memory can get offlined. */
for (i = 0; i < nr_pages; i++) {
page = pfn_to_page(pfn + i);
@@ -1220,10 +1289,9 @@ static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
unsigned long i;
/*
- * Get the reference we dropped when going offline and subtract the
- * unplugged pages from the managed page counters.
+ * Get the reference again that we dropped via page_ref_dec_and_test()
+ * when going offline.
*/
- adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
for (i = 0; i < nr_pages; i++)
page_ref_inc(pfn_to_page(pfn + i));
}
@@ -1237,9 +1305,9 @@ static void virtio_mem_online_page(struct virtio_mem *vm,
bool do_online;
/*
- * We can get called with any order up to MAX_ORDER - 1. If our
- * subblock size is smaller than that and we have a mixture of plugged
- * and unplugged subblocks within such a page, we have to process in
+ * We can get called with any order up to MAX_PAGE_ORDER. If our subblock
+ * size is smaller than that and we have a mixture of plugged and
+ * unplugged subblocks within such a page, we have to process in
* smaller granularity. In that case we'll adjust the order exactly once
* within the loop.
*/
@@ -1917,7 +1985,7 @@ static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->sbm.sb_size);
- rc = virtio_mem_fake_offline(start_pfn, nr_pages);
+ rc = virtio_mem_fake_offline(vm, start_pfn, nr_pages);
if (rc)
return rc;
@@ -1989,20 +2057,10 @@ static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
}
unplugged:
- /*
- * Once all subblocks of a memory block were unplugged, offline and
- * remove it. This will usually not fail, as no memory is in use
- * anymore - however some other notifiers might NACK the request.
- */
- if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
- mutex_unlock(&vm->hotplug_mutex);
- rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
- mutex_lock(&vm->hotplug_mutex);
- if (!rc)
- virtio_mem_sbm_set_mb_state(vm, mb_id,
- VIRTIO_MEM_SBM_MB_UNUSED);
- }
-
+ rc = virtio_mem_sbm_try_remove_unplugged_mb(vm, mb_id);
+ if (rc)
+ vm->sbm.have_unplugged_mb = 1;
+ /* Ignore errors, this is not critical. We'll retry later. */
return 0;
}
@@ -2111,38 +2169,32 @@ static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
VIRTIO_MEM_BBM_BB_ADDED))
return -EINVAL;
- if (bbm_safe_unplug) {
- /*
- * Start by fake-offlining all memory. Once we marked the device
- * block as fake-offline, all newly onlined memory will
- * automatically be kept fake-offline. Protect from concurrent
- * onlining/offlining until we have a consistent state.
- */
- mutex_lock(&vm->hotplug_mutex);
- virtio_mem_bbm_set_bb_state(vm, bb_id,
- VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
+ /*
+ * Start by fake-offlining all memory. Once we marked the device
+ * block as fake-offline, all newly onlined memory will
+ * automatically be kept fake-offline. Protect from concurrent
+ * onlining/offlining until we have a consistent state.
+ */
+ mutex_lock(&vm->hotplug_mutex);
+ virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- page = pfn_to_online_page(pfn);
- if (!page)
- continue;
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ page = pfn_to_online_page(pfn);
+ if (!page)
+ continue;
- rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION);
- if (rc) {
- end_pfn = pfn;
- goto rollback_safe_unplug;
- }
+ rc = virtio_mem_fake_offline(vm, pfn, PAGES_PER_SECTION);
+ if (rc) {
+ end_pfn = pfn;
+ goto rollback;
}
- mutex_unlock(&vm->hotplug_mutex);
}
+ mutex_unlock(&vm->hotplug_mutex);
rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
if (rc) {
- if (bbm_safe_unplug) {
- mutex_lock(&vm->hotplug_mutex);
- goto rollback_safe_unplug;
- }
- return rc;
+ mutex_lock(&vm->hotplug_mutex);
+ goto rollback;
}
rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
@@ -2154,7 +2206,7 @@ static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
VIRTIO_MEM_BBM_BB_UNUSED);
return rc;
-rollback_safe_unplug:
+rollback:
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
page = pfn_to_online_page(pfn);
if (!page)
@@ -2260,12 +2312,13 @@ static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
/*
* Try to unplug all blocks that couldn't be unplugged before, for example,
- * because the hypervisor was busy.
+ * because the hypervisor was busy. Further, offline and remove any memory
+ * blocks where we previously failed.
*/
-static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
+static int virtio_mem_cleanup_pending_mb(struct virtio_mem *vm)
{
unsigned long id;
- int rc;
+ int rc = 0;
if (!vm->in_sbm) {
virtio_mem_bbm_for_each_bb(vm, id,
@@ -2287,6 +2340,27 @@ static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
VIRTIO_MEM_SBM_MB_UNUSED);
}
+ if (!vm->sbm.have_unplugged_mb)
+ return 0;
+
+ /*
+ * Let's retry (offlining and) removing completely unplugged Linux
+ * memory blocks.
+ */
+ vm->sbm.have_unplugged_mb = false;
+
+ mutex_lock(&vm->hotplug_mutex);
+ virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL)
+ rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
+ virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL)
+ rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
+ virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
+ rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
+ mutex_unlock(&vm->hotplug_mutex);
+
+ if (rc)
+ vm->sbm.have_unplugged_mb = true;
+ /* Ignore errors, this is not critical. We'll retry later. */
return 0;
}
@@ -2296,7 +2370,7 @@ static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
static void virtio_mem_refresh_config(struct virtio_mem *vm)
{
const struct range pluggable_range = mhp_get_pluggable_range(true);
- uint64_t new_plugged_size, usable_region_size, end_addr;
+ uint64_t new_plugged_size, end_addr;
/* the plugged_size is just a reflection of what _we_ did previously */
virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
@@ -2306,8 +2380,8 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
/* calculate the last usable memory block id */
virtio_cread_le(vm->vdev, struct virtio_mem_config,
- usable_region_size, &usable_region_size);
- end_addr = min(vm->addr + usable_region_size - 1,
+ usable_region_size, &vm->usable_region_size);
+ end_addr = min(vm->addr + vm->usable_region_size - 1,
pluggable_range.end);
if (vm->in_sbm) {
@@ -2368,9 +2442,9 @@ retry:
virtio_mem_refresh_config(vm);
}
- /* Unplug any leftovers from previous runs */
+ /* Cleanup any leftovers from previous runs */
if (!rc)
- rc = virtio_mem_unplug_pending_mb(vm);
+ rc = virtio_mem_cleanup_pending_mb(vm);
if (!rc && vm->requested_size != vm->plugged_size) {
if (vm->requested_size > vm->plugged_size) {
@@ -2382,6 +2456,13 @@ retry:
}
}
+ /*
+ * Keep retrying to offline and remove completely unplugged Linux
+ * memory blocks.
+ */
+ if (!rc && vm->in_sbm && vm->sbm.have_unplugged_mb)
+ rc = -EBUSY;
+
switch (rc) {
case 0:
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
@@ -2559,11 +2640,20 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
rc = register_memory_notifier(&vm->memory_notifier);
if (rc)
goto out_unreg_group;
- rc = register_virtio_mem_device(vm);
+ /* Block hibernation as early as possible. */
+ vm->pm_notifier.priority = INT_MAX;
+ vm->pm_notifier.notifier_call = virtio_mem_pm_notifier_cb;
+ rc = register_pm_notifier(&vm->pm_notifier);
if (rc)
goto out_unreg_mem;
+ rc = register_virtio_mem_device(vm);
+ if (rc)
+ goto out_unreg_pm;
+ virtio_device_ready(vm->vdev);
return 0;
+out_unreg_pm:
+ unregister_pm_notifier(&vm->pm_notifier);
out_unreg_mem:
unregister_memory_notifier(&vm->memory_notifier);
out_unreg_group:
@@ -2638,13 +2728,103 @@ static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
mutex_unlock(&vm->hotplug_mutex);
return is_ram;
}
+
+#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
+static int virtio_mem_vmcore_add_device_ram(struct virtio_mem *vm,
+ struct list_head *list, uint64_t start, uint64_t end)
+{
+ int rc;
+
+ rc = vmcore_alloc_add_range(list, start, end - start);
+ if (rc)
+ dev_err(&vm->vdev->dev,
+ "Error adding device RAM range: %d\n", rc);
+ return rc;
+}
+
+static int virtio_mem_vmcore_get_device_ram(struct vmcore_cb *cb,
+ struct list_head *list)
+{
+ struct virtio_mem *vm = container_of(cb, struct virtio_mem,
+ vmcore_cb);
+ const uint64_t device_start = vm->addr;
+ const uint64_t device_end = vm->addr + vm->usable_region_size;
+ uint64_t chunk_size, cur_start, cur_end, plugged_range_start = 0;
+ LIST_HEAD(tmp_list);
+ int rc;
+
+ if (!vm->plugged_size)
+ return 0;
+
+ /* Process memory sections, unless the device block size is bigger. */
+ chunk_size = max_t(uint64_t, PFN_PHYS(PAGES_PER_SECTION),
+ vm->device_block_size);
+
+ mutex_lock(&vm->hotplug_mutex);
+
+ /*
+ * We process larger chunks and indicate the complete chunk if any
+ * block in there is plugged. This reduces the number of pfn_is_ram()
+ * callbacks and mimic what is effectively being done when the old
+ * kernel would add complete memory sections/blocks to the elfcore hdr.
+ */
+ cur_start = device_start;
+ for (cur_start = device_start; cur_start < device_end; cur_start = cur_end) {
+ cur_end = ALIGN_DOWN(cur_start + chunk_size, chunk_size);
+ cur_end = min_t(uint64_t, cur_end, device_end);
+
+ rc = virtio_mem_send_state_request(vm, cur_start,
+ cur_end - cur_start);
+
+ if (rc < 0) {
+ dev_err(&vm->vdev->dev,
+ "Error querying block states: %d\n", rc);
+ goto out;
+ } else if (rc != VIRTIO_MEM_STATE_UNPLUGGED) {
+ /* Merge ranges with plugged memory. */
+ if (!plugged_range_start)
+ plugged_range_start = cur_start;
+ continue;
+ }
+
+ /* Flush any plugged range. */
+ if (plugged_range_start) {
+ rc = virtio_mem_vmcore_add_device_ram(vm, &tmp_list,
+ plugged_range_start,
+ cur_start);
+ if (rc)
+ goto out;
+ plugged_range_start = 0;
+ }
+ }
+
+ /* Flush any plugged range. */
+ if (plugged_range_start)
+ rc = virtio_mem_vmcore_add_device_ram(vm, &tmp_list,
+ plugged_range_start,
+ cur_start);
+out:
+ mutex_unlock(&vm->hotplug_mutex);
+ if (rc < 0) {
+ vmcore_free_ranges(&tmp_list);
+ return rc;
+ }
+ list_splice_tail(&tmp_list, list);
+ return 0;
+}
+#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
#endif /* CONFIG_PROC_VMCORE */
static int virtio_mem_init_kdump(struct virtio_mem *vm)
{
+ /* We must be prepared to receive a callback immediately. */
+ virtio_device_ready(vm->vdev);
#ifdef CONFIG_PROC_VMCORE
dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
+#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
+ vm->vmcore_cb.get_device_ram = virtio_mem_vmcore_get_device_ram;
+#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
register_vmcore_cb(&vm->vmcore_cb);
return 0;
#else /* CONFIG_PROC_VMCORE */
@@ -2673,6 +2853,8 @@ static int virtio_mem_init(struct virtio_mem *vm)
virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
&vm->region_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, usable_region_size,
+ &vm->usable_region_size);
/* Determine the nid for the device based on the lowest address. */
if (vm->nid == NUMA_NO_NODE)
@@ -2768,8 +2950,8 @@ static int virtio_mem_probe(struct virtio_device *vdev)
mutex_init(&vm->hotplug_mutex);
INIT_LIST_HEAD(&vm->next);
spin_lock_init(&vm->removal_lock);
- hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vm->retry_timer.function = virtio_mem_timer_expired;
+ hrtimer_setup(&vm->retry_timer, virtio_mem_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
vm->in_kdump = is_kdump_kernel();
@@ -2783,8 +2965,6 @@ static int virtio_mem_probe(struct virtio_device *vdev)
if (rc)
goto out_del_vq;
- virtio_device_ready(vdev);
-
/* trigger a config update to start processing the requested_size */
if (!vm->in_kdump) {
atomic_set(&vm->config_changed, 1);
@@ -2841,6 +3021,7 @@ static void virtio_mem_deinit_hotplug(struct virtio_mem *vm)
/* unregister callbacks */
unregister_virtio_mem_device(vm);
+ unregister_pm_notifier(&vm->pm_notifier);
unregister_memory_notifier(&vm->memory_notifier);
/*
@@ -2904,17 +3085,40 @@ static void virtio_mem_config_changed(struct virtio_device *vdev)
#ifdef CONFIG_PM_SLEEP
static int virtio_mem_freeze(struct virtio_device *vdev)
{
+ struct virtio_mem *vm = vdev->priv;
+
/*
- * When restarting the VM, all memory is usually unplugged. Don't
- * allow to suspend/hibernate.
+ * We block hibernation using the PM notifier completely. The workqueue
+ * is already frozen by the PM core at this point, so we simply
+ * reset the device and cleanup the queues.
*/
- dev_err(&vdev->dev, "save/restore not supported.\n");
- return -EPERM;
+ if (pm_suspend_target_state != PM_SUSPEND_TO_IDLE &&
+ vm->plugged_size &&
+ !virtio_has_feature(vm->vdev, VIRTIO_MEM_F_PERSISTENT_SUSPEND)) {
+ dev_err(&vm->vdev->dev,
+ "suspending with plugged memory is not supported\n");
+ return -EPERM;
+ }
+
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+ vm->vq = NULL;
+ return 0;
}
static int virtio_mem_restore(struct virtio_device *vdev)
{
- return -EPERM;
+ struct virtio_mem *vm = vdev->priv;
+ int ret;
+
+ ret = virtio_mem_init_vq(vm);
+ if (ret)
+ return ret;
+ virtio_device_ready(vdev);
+
+ /* Let's check if anything changed. */
+ virtio_mem_config_changed(vdev);
+ return 0;
}
#endif
@@ -2923,6 +3127,7 @@ static unsigned int virtio_mem_features[] = {
VIRTIO_MEM_F_ACPI_PXM,
#endif
VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE,
+ VIRTIO_MEM_F_PERSISTENT_SUSPEND,
};
static const struct virtio_device_id virtio_mem_id_table[] = {
@@ -2934,7 +3139,6 @@ static struct virtio_driver virtio_mem_driver = {
.feature_table = virtio_mem_features,
.feature_table_size = ARRAY_SIZE(virtio_mem_features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = virtio_mem_id_table,
.probe = virtio_mem_probe,
.remove = virtio_mem_remove,
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 3ff746e3f24a..b152a1eca05a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -61,10 +61,10 @@
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <uapi/linux/virtio_mmio.h>
@@ -87,22 +87,8 @@ struct virtio_mmio_device {
void __iomem *base;
unsigned long version;
-
- /* a list of queues so we can dispatch IRQs */
- spinlock_t lock;
- struct list_head virtqueues;
-};
-
-struct virtio_mmio_vq_info {
- /* the actual virtqueue */
- struct virtqueue *vq;
-
- /* the list node for the virtqueues list */
- struct list_head node;
};
-
-
/* Configuration interface */
static u64 vm_get_features(struct virtio_device *vdev)
@@ -285,13 +271,22 @@ static bool vm_notify(struct virtqueue *vq)
return true;
}
+static bool vm_notify_with_data(struct virtqueue *vq)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
+ u32 data = vring_notification_data(vq);
+
+ writel(data, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+
+ return true;
+}
+
/* Notify all virtqueues on an interrupt. */
static irqreturn_t vm_interrupt(int irq, void *opaque)
{
struct virtio_mmio_device *vm_dev = opaque;
- struct virtio_mmio_vq_info *info;
+ struct virtqueue *vq;
unsigned long status;
- unsigned long flags;
irqreturn_t ret = IRQ_NONE;
/* Read and acknowledge interrupts */
@@ -304,10 +299,8 @@ static irqreturn_t vm_interrupt(int irq, void *opaque)
}
if (likely(status & VIRTIO_MMIO_INT_VRING)) {
- spin_lock_irqsave(&vm_dev->lock, flags);
- list_for_each_entry(info, &vm_dev->virtqueues, node)
- ret |= vring_interrupt(irq, info->vq);
- spin_unlock_irqrestore(&vm_dev->lock, flags);
+ virtio_device_for_each_vq(&vm_dev->vdev, vq)
+ ret |= vring_interrupt(irq, vq);
}
return ret;
@@ -318,14 +311,8 @@ static irqreturn_t vm_interrupt(int irq, void *opaque)
static void vm_del_vq(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
- struct virtio_mmio_vq_info *info = vq->priv;
- unsigned long flags;
unsigned int index = vq->index;
- spin_lock_irqsave(&vm_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vm_dev->lock, flags);
-
/* Select and deactivate the queue */
writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
if (vm_dev->version == 1) {
@@ -336,8 +323,6 @@ static void vm_del_vq(struct virtqueue *vq)
}
vring_del_virtqueue(vq);
-
- kfree(info);
}
static void vm_del_vqs(struct virtio_device *vdev)
@@ -363,12 +348,16 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
const char *name, bool ctx)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
- struct virtio_mmio_vq_info *info;
+ bool (*notify)(struct virtqueue *vq);
struct virtqueue *vq;
- unsigned long flags;
unsigned int num;
int err;
+ if (__virtio_test_bit(vdev, VIRTIO_F_NOTIFICATION_DATA))
+ notify = vm_notify_with_data;
+ else
+ notify = vm_notify;
+
if (!name)
return NULL;
@@ -382,13 +371,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
goto error_available;
}
- /* Allocate and fill out our active queue description */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto error_kmalloc;
- }
-
num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
if (num == 0) {
err = -ENOENT;
@@ -397,7 +379,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
/* Create the vring */
vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
- true, true, ctx, vm_notify, callback, name);
+ true, true, ctx, notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
@@ -446,13 +428,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
}
- vq->priv = info;
- info->vq = vq;
-
- spin_lock_irqsave(&vm_dev->lock, flags);
- list_add(&info->node, &vm_dev->virtqueues);
- spin_unlock_irqrestore(&vm_dev->lock, flags);
-
return vq;
error_bad_pfn:
@@ -464,17 +439,13 @@ error_new_virtqueue:
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
}
- kfree(info);
-error_kmalloc:
error_available:
return ERR_PTR(err);
}
static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- const bool *ctx,
+ struct virtqueue_info vqs_info[],
struct irq_affinity *desc)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
@@ -493,13 +464,15 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
enable_irq_wake(irq);
for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
+ struct virtqueue_info *vqi = &vqs_info[i];
+
+ if (!vqi->name) {
vqs[i] = NULL;
continue;
}
- vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false);
+ vqs[i] = vm_setup_vq(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx);
if (IS_ERR(vqs[i])) {
vm_del_vqs(vdev);
return PTR_ERR(vqs[i]);
@@ -590,9 +563,8 @@ static void virtio_mmio_release_dev(struct device *_d)
struct virtio_device *vdev =
container_of(_d, struct virtio_device, dev);
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
- struct platform_device *pdev = vm_dev->pdev;
- devm_kfree(&pdev->dev, vm_dev);
+ kfree(vm_dev);
}
/* Platform device */
@@ -603,7 +575,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
unsigned long magic;
int rc;
- vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
+ vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
if (!vm_dev)
return -ENOMEM;
@@ -611,18 +583,19 @@ static int virtio_mmio_probe(struct platform_device *pdev)
vm_dev->vdev.dev.release = virtio_mmio_release_dev;
vm_dev->vdev.config = &virtio_mmio_config_ops;
vm_dev->pdev = pdev;
- INIT_LIST_HEAD(&vm_dev->virtqueues);
- spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(vm_dev->base))
- return PTR_ERR(vm_dev->base);
+ if (IS_ERR(vm_dev->base)) {
+ rc = PTR_ERR(vm_dev->base);
+ goto free_vm_dev;
+ }
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_vm_dev;
}
/* Check device version */
@@ -630,7 +603,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
if (vm_dev->version < 1 || vm_dev->version > 2) {
dev_err(&pdev->dev, "Version %ld not supported!\n",
vm_dev->version);
- return -ENXIO;
+ rc = -ENXIO;
+ goto free_vm_dev;
}
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@@ -639,7 +613,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
* virtio-mmio device with an ID 0 is a (dummy) placeholder
* with no function. End probing now with no error reported.
*/
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_vm_dev;
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
@@ -669,14 +644,16 @@ static int virtio_mmio_probe(struct platform_device *pdev)
put_device(&vm_dev->vdev.dev);
return rc;
+
+free_vm_dev:
+ kfree(vm_dev);
+ return rc;
}
-static int virtio_mmio_remove(struct platform_device *pdev)
+static void virtio_mmio_remove(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
unregister_virtio_device(&vm_dev->vdev);
-
- return 0;
}
diff --git a/drivers/virtio/virtio_pci_admin_legacy_io.c b/drivers/virtio/virtio_pci_admin_legacy_io.c
new file mode 100644
index 000000000000..819cfbbc67c3
--- /dev/null
+++ b/drivers/virtio/virtio_pci_admin_legacy_io.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/virtio_pci_admin.h>
+#include "virtio_pci_common.h"
+
+/*
+ * virtio_pci_admin_has_legacy_io - Checks whether the legacy IO
+ * commands are supported
+ * @dev: VF pci_dev
+ *
+ * Returns true on success.
+ */
+bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_pci_device *vp_dev;
+
+ if (!virtio_dev)
+ return false;
+
+ if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ))
+ return false;
+
+ vp_dev = to_vp_device(virtio_dev);
+
+ if ((vp_dev->admin_vq.supported_cmds & VIRTIO_LEGACY_ADMIN_CMD_BITMAP) ==
+ VIRTIO_LEGACY_ADMIN_CMD_BITMAP)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_has_legacy_io);
+
+static int virtio_pci_admin_legacy_io_write(struct pci_dev *pdev, u16 opcode,
+ u8 offset, u8 size, u8 *buf)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_legacy_wr_data *data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data) + size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->offset = offset;
+ memcpy(data->registers, buf, size);
+ sg_init_one(&data_sg, data, sizeof(*data) + size);
+ cmd.opcode = cpu_to_le16(opcode);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+ kfree(data);
+ return ret;
+}
+
+/*
+ * virtio_pci_admin_legacy_io_write_common - Write legacy common configuration
+ * of a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the common configuration area to write to
+ * @size: size of the data to write
+ * @buf: buffer which holds the data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_write(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_write);
+
+/*
+ * virtio_pci_admin_legacy_io_write_device - Write legacy device configuration
+ * of a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the device configuration area to write to
+ * @size: size of the data to write
+ * @buf: buffer which holds the data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_write(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_write);
+
+static int virtio_pci_admin_legacy_io_read(struct pci_dev *pdev, u16 opcode,
+ u8 offset, u8 size, u8 *buf)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_legacy_rd_data *data;
+ struct scatterlist data_sg, result_sg;
+ struct virtio_admin_cmd cmd = {};
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->offset = offset;
+ sg_init_one(&data_sg, data, sizeof(*data));
+ sg_init_one(&result_sg, buf, size);
+ cmd.opcode = cpu_to_le16(opcode);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = &result_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+ kfree(data);
+ return ret;
+}
+
+/*
+ * virtio_pci_admin_legacy_device_io_read - Read legacy device configuration of
+ * a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the device configuration area to read from
+ * @size: size of the data to be read
+ * @buf: buffer to hold the returned data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_read(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_read);
+
+/*
+ * virtio_pci_admin_legacy_common_io_read - Read legacy common configuration of
+ * a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the common configuration area to read from
+ * @size: size of the data to be read
+ * @buf: buffer to hold the returned data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_read(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_read);
+
+/*
+ * virtio_pci_admin_legacy_io_notify_info - Read the queue notification
+ * information for legacy interface
+ * @dev: VF pci_dev
+ * @req_bar_flags: requested bar flags
+ * @bar: on output the BAR number of the owner or member device
+ * @bar_offset: on output the offset within bar
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
+ u8 req_bar_flags, u8 *bar,
+ u64 *bar_offset)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_notify_info_result *result;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ return -ENOMEM;
+
+ sg_init_one(&result_sg, result, sizeof(*result));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.result_sg = &result_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (!ret) {
+ struct virtio_admin_cmd_notify_info_data *entry;
+ int i;
+
+ ret = -ENOENT;
+ for (i = 0; i < VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO; i++) {
+ entry = &result->entries[i];
+ if (entry->flags == VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END)
+ break;
+ if (entry->flags != req_bar_flags)
+ continue;
+ *bar = entry->bar;
+ *bar_offset = le64_to_cpu(entry->offset);
+ ret = 0;
+ break;
+ }
+ }
+
+ kfree(result);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_io_notify_info);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index a6c86f916dbd..d6d79af44569 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -24,6 +24,16 @@ MODULE_PARM_DESC(force_legacy,
"Force legacy mode for transitional virtio 1 devices");
#endif
+bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return false;
+
+ return index == vp_dev->admin_vq.vq_index;
+}
+
/* wait for pending irq handlers */
void vp_synchronize_vectors(struct virtio_device *vdev)
{
@@ -46,12 +56,26 @@ bool vp_notify(struct virtqueue *vq)
return true;
}
+/* Notify all slow path virtqueues on an interrupt. */
+static void vp_vring_slow_path_interrupt(int irq,
+ struct virtio_pci_device *vp_dev)
+{
+ struct virtio_pci_vq_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_for_each_entry(info, &vp_dev->slow_virtqueues, node)
+ vring_interrupt(irq, info->vq);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+}
+
/* Handle a configuration change: Tell driver if it wants to know. */
static irqreturn_t vp_config_changed(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
virtio_config_changed(&vp_dev->vdev);
+ vp_vring_slow_path_interrupt(irq, vp_dev);
return IRQ_HANDLED;
}
@@ -125,6 +149,9 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL))
goto error;
+ if (!per_vq_vectors)
+ desc = NULL;
+
if (desc) {
flags |= PCI_IRQ_AFFINITY;
desc->pre_vectors++; /* virtio config vector */
@@ -171,11 +198,17 @@ error:
return err;
}
+static bool vp_is_slow_path_vector(u16 msix_vec)
+{
+ return msix_vec == VP_MSIX_CONFIG_VECTOR;
+}
+
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
- u16 msix_vec)
+ u16 msix_vec,
+ struct virtio_pci_vq_info **p_info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
@@ -194,13 +227,16 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
info->vq = vq;
if (callback) {
spin_lock_irqsave(&vp_dev->lock, flags);
- list_add(&info->node, &vp_dev->virtqueues);
+ if (!vp_is_slow_path_vector(msix_vec))
+ list_add(&info->node, &vp_dev->virtqueues);
+ else
+ list_add(&info->node, &vp_dev->slow_virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags);
} else {
INIT_LIST_HEAD(&info->node);
}
- vp_dev->vqs[index] = info;
+ *p_info = info;
return vq;
out_info:
@@ -208,10 +244,9 @@ out_info:
return vq;
}
-static void vp_del_vq(struct virtqueue *vq)
+static void vp_del_vq(struct virtqueue *vq, struct virtio_pci_vq_info *info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
/*
@@ -232,21 +267,25 @@ static void vp_del_vq(struct virtqueue *vq)
void vp_del_vqs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_vq_info *info;
struct virtqueue *vq, *n;
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- if (vp_dev->per_vq_vectors) {
- int v = vp_dev->vqs[vq->index]->msix_vector;
+ info = vp_is_avq(vdev, vq->index) ? vp_dev->admin_vq.info :
+ vp_dev->vqs[vq->index];
- if (v != VIRTIO_MSI_NO_VECTOR) {
+ if (vp_dev->per_vq_vectors) {
+ int v = info->msix_vector;
+ if (v != VIRTIO_MSI_NO_VECTOR &&
+ !vp_is_slow_path_vector(v)) {
int irq = pci_irq_vector(vp_dev->pci_dev, v);
- irq_set_affinity_hint(irq, NULL);
+ irq_update_affinity_hint(irq, NULL);
free_irq(irq, vq);
}
}
- vp_del_vq(vq);
+ vp_del_vq(vq, info);
}
vp_dev->per_vq_vectors = false;
@@ -281,73 +320,133 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_dev->vqs = NULL;
}
-static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], bool per_vq_vectors,
- const bool *ctx,
- struct irq_affinity *desc)
+enum vp_vq_vector_policy {
+ VP_VQ_VECTOR_POLICY_EACH,
+ VP_VQ_VECTOR_POLICY_SHARED_SLOW,
+ VP_VQ_VECTOR_POLICY_SHARED,
+};
+
+static struct virtqueue *
+vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
+ vq_callback_t *callback, const char *name, bool ctx,
+ bool slow_path, int *allocated_vectors,
+ enum vp_vq_vector_policy vector_policy,
+ struct virtio_pci_vq_info **p_info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtqueue *vq;
u16 msix_vec;
+ int err;
+
+ if (!callback)
+ msix_vec = VIRTIO_MSI_NO_VECTOR;
+ else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH ||
+ (vector_policy == VP_VQ_VECTOR_POLICY_SHARED_SLOW &&
+ !slow_path))
+ msix_vec = (*allocated_vectors)++;
+ else if (vector_policy != VP_VQ_VECTOR_POLICY_EACH &&
+ slow_path)
+ msix_vec = VP_MSIX_CONFIG_VECTOR;
+ else
+ msix_vec = VP_MSIX_VQ_VECTOR;
+ vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec,
+ p_info);
+ if (IS_ERR(vq))
+ return vq;
+
+ if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED ||
+ msix_vec == VIRTIO_MSI_NO_VECTOR ||
+ vp_is_slow_path_vector(msix_vec))
+ return vq;
+
+ /* allocate per-vq irq if available and necessary */
+ snprintf(vp_dev->msix_names[msix_vec], sizeof(*vp_dev->msix_names),
+ "%s-%s", dev_name(&vp_dev->vdev.dev), name);
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
+ vring_interrupt, 0,
+ vp_dev->msix_names[msix_vec], vq);
+ if (err) {
+ vp_del_vq(vq, *p_info);
+ return ERR_PTR(err);
+ }
+
+ return vq;
+}
+
+static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ struct virtqueue_info vqs_info[],
+ enum vp_vq_vector_policy vector_policy,
+ struct irq_affinity *desc)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
+ struct virtqueue_info *vqi;
int i, err, nvectors, allocated_vectors, queue_idx = 0;
+ struct virtqueue *vq;
+ bool per_vq_vectors;
+ u16 avq_num = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
+ if (vp_dev->avq_index) {
+ err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
+ if (err)
+ goto error_find;
+ }
+
+ per_vq_vectors = vector_policy != VP_VQ_VECTOR_POLICY_SHARED;
+
if (per_vq_vectors) {
/* Best option: one for change interrupt, one per vq. */
nvectors = 1;
- for (i = 0; i < nvqs; ++i)
- if (names[i] && callbacks[i])
+ for (i = 0; i < nvqs; ++i) {
+ vqi = &vqs_info[i];
+ if (vqi->name && vqi->callback)
++nvectors;
+ }
+ if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
+ ++nvectors;
} else {
/* Second best: one for change, shared for all vqs. */
nvectors = 2;
}
- err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
- per_vq_vectors ? desc : NULL);
+ err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, desc);
if (err)
goto error_find;
vp_dev->per_vq_vectors = per_vq_vectors;
allocated_vectors = vp_dev->msix_used_vectors;
for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
+ vqi = &vqs_info[i];
+ if (!vqi->name) {
vqs[i] = NULL;
continue;
}
-
- if (!callbacks[i])
- msix_vec = VIRTIO_MSI_NO_VECTOR;
- else if (vp_dev->per_vq_vectors)
- msix_vec = allocated_vectors++;
- else
- msix_vec = VP_MSIX_VQ_VECTOR;
- vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false,
- msix_vec);
+ vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx, false,
+ &allocated_vectors, vector_policy,
+ &vp_dev->vqs[i]);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
}
+ }
- if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
- continue;
-
- /* allocate per-vq irq if available and necessary */
- snprintf(vp_dev->msix_names[msix_vec],
- sizeof *vp_dev->msix_names,
- "%s-%s",
- dev_name(&vp_dev->vdev.dev), names[i]);
- err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
- vring_interrupt, 0,
- vp_dev->msix_names[msix_vec],
- vqs[i]);
- if (err)
- goto error_find;
+ if (!avq_num)
+ return 0;
+ sprintf(avq->name, "avq.%u", avq->vq_index);
+ vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
+ avq->name, false, true, &allocated_vectors,
+ vector_policy, &vp_dev->admin_vq.info);
+ if (IS_ERR(vq)) {
+ err = PTR_ERR(vq);
+ goto error_find;
}
+
return 0;
error_find:
@@ -356,16 +455,25 @@ error_find:
}
static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx)
+ struct virtqueue *vqs[],
+ struct virtqueue_info vqs_info[])
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
int i, err, queue_idx = 0;
+ struct virtqueue *vq;
+ u16 avq_num = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
+ if (vp_dev->avq_index) {
+ err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
+ if (err)
+ goto out_del_vqs;
+ }
+
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vp_dev);
if (err)
@@ -374,19 +482,32 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
vp_dev->intx_enabled = 1;
vp_dev->per_vq_vectors = false;
for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
+ struct virtqueue_info *vqi = &vqs_info[i];
+
+ if (!vqi->name) {
vqs[i] = NULL;
continue;
}
- vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false,
- VIRTIO_MSI_NO_VECTOR);
+ vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx,
+ VIRTIO_MSI_NO_VECTOR, &vp_dev->vqs[i]);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto out_del_vqs;
}
}
+ if (!avq_num)
+ return 0;
+ sprintf(avq->name, "avq.%u", avq->vq_index);
+ vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
+ false, VIRTIO_MSI_NO_VECTOR,
+ &vp_dev->admin_vq.info);
+ if (IS_ERR(vq)) {
+ err = PTR_ERR(vq);
+ goto out_del_vqs;
+ }
+
return 0;
out_del_vqs:
vp_del_vqs(vdev);
@@ -395,25 +516,33 @@ out_del_vqs:
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
+ struct virtqueue *vqs[], struct virtqueue_info vqs_info[],
struct irq_affinity *desc)
{
int err;
/* Try MSI-X with one vector per queue. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_EACH, desc);
+ if (!err)
+ return 0;
+ /* Fallback: MSI-X with one shared vector for config and
+ * slow path queues, one vector per queue for the rest.
+ */
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_SHARED_SLOW, desc);
if (!err)
return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_SHARED, desc);
if (!err)
return 0;
/* Is there an interrupt? If not give up. */
if (!(to_vp_device(vdev)->pci_dev->irq))
return err;
/* Finally fall back to regular interrupts. */
- return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
+ return vp_find_vqs_intx(vdev, nvqs, vqs, vqs_info);
}
const char *vp_bus_name(struct virtio_device *vdev)
@@ -443,10 +572,10 @@ int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
mask = vp_dev->msix_affinity_masks[info->msix_vector];
irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
if (!cpu_mask)
- irq_set_affinity_hint(irq, NULL);
+ irq_update_affinity_hint(irq, NULL);
else {
cpumask_copy(mask, cpu_mask);
- irq_set_affinity_hint(irq, mask);
+ irq_set_affinity_and_hint(irq, mask);
}
}
return 0;
@@ -457,7 +586,8 @@ const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
if (!vp_dev->per_vq_vectors ||
- vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
+ vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR ||
+ vp_is_slow_path_vector(vp_dev->vqs[index]->msix_vector))
return NULL;
return pci_irq_get_affinity(vp_dev->pci_dev,
@@ -492,8 +622,40 @@ static int virtio_pci_restore(struct device *dev)
return virtio_device_restore(&vp_dev->vdev);
}
+static bool vp_supports_pm_no_reset(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u16 pmcsr;
+
+ if (!pci_dev->pm_cap)
+ return false;
+
+ pci_read_config_word(pci_dev, pci_dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (PCI_POSSIBLE_ERROR(pmcsr)) {
+ dev_err(dev, "Unable to query pmcsr");
+ return false;
+ }
+
+ return pmcsr & PCI_PM_CTRL_NO_SOFT_RESET;
+}
+
+static int virtio_pci_suspend(struct device *dev)
+{
+ return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_freeze(dev);
+}
+
+static int virtio_pci_resume(struct device *dev)
+{
+ return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_restore(dev);
+}
+
static const struct dev_pm_ops virtio_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
+ .suspend = virtio_pci_suspend,
+ .resume = virtio_pci_resume,
+ .freeze = virtio_pci_freeze,
+ .thaw = virtio_pci_restore,
+ .poweroff = virtio_pci_freeze,
+ .restore = virtio_pci_restore,
};
#endif
@@ -533,6 +695,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->pci_dev = pci_dev;
INIT_LIST_HEAD(&vp_dev->virtqueues);
+ INIT_LIST_HEAD(&vp_dev->slow_virtqueues);
spin_lock_init(&vp_dev->lock);
/* enable the device */
@@ -557,8 +720,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
- vp_dev->is_legacy = vp_dev->ldev.ioaddr ? true : false;
-
rc = register_virtio_device(&vp_dev->vdev);
reg_dev = vp_dev;
if (rc)
@@ -633,6 +794,46 @@ static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
return num_vfs;
}
+static void virtio_pci_reset_prepare(struct pci_dev *pci_dev)
+{
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret = 0;
+
+ ret = virtio_device_reset_prepare(&vp_dev->vdev);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ dev_warn(&pci_dev->dev, "Reset prepare failure: %d",
+ ret);
+ return;
+ }
+
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static void virtio_pci_reset_done(struct pci_dev *pci_dev)
+{
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret;
+
+ if (pci_is_enabled(pci_dev))
+ return;
+
+ ret = pci_enable_device(pci_dev);
+ if (!ret) {
+ pci_set_master(pci_dev);
+ ret = virtio_device_reset_done(&vp_dev->vdev);
+ }
+
+ if (ret && ret != -EOPNOTSUPP)
+ dev_warn(&pci_dev->dev, "Reset done failure: %d", ret);
+}
+
+static const struct pci_error_handlers virtio_pci_err_handler = {
+ .reset_prepare = virtio_pci_reset_prepare,
+ .reset_done = virtio_pci_reset_done,
+};
+
static struct pci_driver virtio_pci_driver = {
.name = "virtio-pci",
.id_table = virtio_pci_id_table,
@@ -642,8 +843,20 @@ static struct pci_driver virtio_pci_driver = {
.driver.pm = &virtio_pci_pm_ops,
#endif
.sriov_configure = virtio_pci_sriov_configure,
+ .err_handler = &virtio_pci_err_handler,
};
+struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev)
+{
+ struct virtio_pci_device *pf_vp_dev;
+
+ pf_vp_dev = pci_iov_get_pf_drvdata(pdev, &virtio_pci_driver);
+ if (IS_ERR(pf_vp_dev))
+ return NULL;
+
+ return &pf_vp_dev->vdev;
+}
+
module_pci_driver(virtio_pci_driver);
MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 23112d84218f..8cd01de27baf 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -29,37 +29,60 @@
#include <linux/virtio_pci_modern.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
struct virtio_pci_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
- /* the list node for the virtqueues list */
+ /* the list node for the virtqueues or slow_virtqueues list */
struct list_head node;
/* MSI-X vector (or none) */
unsigned int msix_vector;
};
+struct virtio_pci_admin_vq {
+ /* Virtqueue info associated with this admin queue. */
+ struct virtio_pci_vq_info *info;
+ /* Protects virtqueue access. */
+ spinlock_t lock;
+ u64 supported_cmds;
+ u64 supported_caps;
+ u8 max_dev_parts_objects;
+ struct ida dev_parts_ida;
+ /* Name of the admin queue: avq.$vq_index. */
+ char name[10];
+ u16 vq_index;
+};
+
/* Our device structure */
struct virtio_pci_device {
struct virtio_device vdev;
struct pci_dev *pci_dev;
- struct virtio_pci_legacy_device ldev;
- struct virtio_pci_modern_device mdev;
-
+ union {
+ struct virtio_pci_legacy_device ldev;
+ struct virtio_pci_modern_device mdev;
+ };
bool is_legacy;
/* Where to read and clear interrupt */
u8 __iomem *isr;
- /* a list of queues so we can dispatch IRQs */
+ /* Lists of queues and potentially slow path queues
+ * so we can dispatch IRQs.
+ */
spinlock_t lock;
struct list_head virtqueues;
+ struct list_head slow_virtqueues;
- /* array of all queues for house-keeping */
+ /* Array of all virtqueues reported in the
+ * PCI common config num_queues field
+ */
struct virtio_pci_vq_info **vqs;
+ struct virtio_pci_admin_vq admin_vq;
+
/* MSI-X support */
int msix_enabled;
int intx_enabled;
@@ -85,6 +108,7 @@ struct virtio_pci_device {
void (*del_vq)(struct virtio_pci_vq_info *info);
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
+ int (*avq_index)(struct virtio_device *vdev, u16 *index, u16 *num);
};
/* Constants for MSI-X */
@@ -109,8 +133,7 @@ bool vp_notify(struct virtqueue *vq);
void vp_del_vqs(struct virtio_device *vdev);
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
+ struct virtqueue *vqs[], struct virtqueue_info vqs_info[],
struct irq_affinity *desc);
const char *vp_bus_name(struct virtio_device *vdev);
@@ -138,4 +161,41 @@ static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
int virtio_pci_modern_probe(struct virtio_pci_device *);
void virtio_pci_modern_remove(struct virtio_pci_device *);
+struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
+
+#define VIRTIO_LEGACY_ADMIN_CMD_BITMAP \
+ (BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO))
+
+#define VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP \
+ (BIT_ULL(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_GET) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_SET) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DEV_MODE_SET))
+
+/* Unlike modern drivers which support hardware virtio devices, legacy drivers
+ * assume software-based devices: e.g. they don't use proper memory barriers
+ * on ARM, use big endian on PPC, etc. X86 drivers are mostly ok though, more
+ * or less by chance. For now, only support legacy IO on X86.
+ */
+#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
+#define VIRTIO_ADMIN_CMD_BITMAP (VIRTIO_LEGACY_ADMIN_CMD_BITMAP | \
+ VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP)
+#else
+#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP
+#endif
+
+bool vp_is_avq(struct virtio_device *vdev, unsigned int index);
+void vp_modern_avq_done(struct virtqueue *vq);
+int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
+ struct virtio_admin_cmd *cmd);
+
#endif
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 2257f1b3d8ae..d9cbb02b35a1 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -223,6 +223,7 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
+ vp_dev->is_legacy = true;
return 0;
}
diff --git a/drivers/virtio/virtio_pci_legacy_dev.c b/drivers/virtio/virtio_pci_legacy_dev.c
index 677d1f68bc9b..bbbf89c22880 100644
--- a/drivers/virtio/virtio_pci_legacy_dev.c
+++ b/drivers/virtio/virtio_pci_legacy_dev.c
@@ -140,9 +140,9 @@ EXPORT_SYMBOL_GPL(vp_legacy_set_status);
* vp_legacy_queue_vector - set the MSIX vector for a specific virtqueue
* @ldev: the legacy virtio-pci device
* @index: queue index
- * @vector: the config vector
+ * @vector: the queue vector
*
- * Returns the config vector read from the device
+ * Returns the queue vector read from the device
*/
u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev,
u16 index, u16 vector)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 9e496e288cfa..dd0e65f71d41 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -15,15 +15,353 @@
*/
#include <linux/delay.h>
+#include <linux/virtio_pci_admin.h>
#define VIRTIO_PCI_NO_LEGACY
#define VIRTIO_RING_NO_LEGACY
#include "virtio_pci_common.h"
-static u64 vp_get_features(struct virtio_device *vdev)
+#define VIRTIO_AVQ_SGS_MAX 4
+
+static void vp_get_features(struct virtio_device *vdev, u64 *features)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ vp_modern_get_extended_features(&vp_dev->mdev, features);
+}
+
+static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ *num = 0;
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return 0;
+
+ *num = vp_modern_avq_num(&vp_dev->mdev);
+ if (!(*num))
+ return -EINVAL;
+ *index = vp_modern_avq_index(&vp_dev->mdev);
+ return 0;
+}
+
+void vp_modern_avq_done(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+ unsigned int status_size = sizeof(struct virtio_admin_cmd_status);
+ struct virtio_admin_cmd *cmd;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&admin_vq->lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((cmd = virtqueue_get_buf(vq, &len))) {
+ /* If the number of bytes written by the device is less
+ * than the size of struct virtio_admin_cmd_status, the
+ * remaining status bytes will remain zero-initialized,
+ * since the buffer was zeroed during allocation.
+ * In this case, set the size of command_specific_result
+ * to 0.
+ */
+ if (len < status_size)
+ cmd->result_sg_size = 0;
+ else
+ cmd->result_sg_size = len - status_size;
+ complete(&cmd->completion);
+ }
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+}
+
+static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
+ u16 opcode,
+ struct scatterlist **sgs,
+ unsigned int out_num,
+ unsigned int in_num,
+ struct virtio_admin_cmd *cmd)
+{
+ struct virtqueue *vq;
+ unsigned long flags;
+ int ret;
+
+ vq = admin_vq->info->vq;
+ if (!vq)
+ return -EIO;
+
+ if (opcode != VIRTIO_ADMIN_CMD_LIST_QUERY &&
+ opcode != VIRTIO_ADMIN_CMD_LIST_USE &&
+ !((1ULL << opcode) & admin_vq->supported_cmds))
+ return -EOPNOTSUPP;
+
+ init_completion(&cmd->completion);
+
+again:
+ if (virtqueue_is_broken(vq))
+ return -EIO;
+
+ spin_lock_irqsave(&admin_vq->lock, flags);
+ ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+ cpu_relax();
+ goto again;
+ }
+ goto unlock_err;
+ }
+ if (!virtqueue_kick(vq))
+ goto unlock_err;
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+
+ wait_for_completion(&cmd->completion);
+
+ return cmd->ret;
+
+unlock_err:
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+ return -EIO;
+}
+
+int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
+ struct virtio_admin_cmd *cmd)
+{
+ struct scatterlist *sgs[VIRTIO_AVQ_SGS_MAX], hdr, stat;
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_admin_cmd_status *va_status;
+ unsigned int out_num = 0, in_num = 0;
+ struct virtio_admin_cmd_hdr *va_hdr;
+ u16 status;
+ int ret;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return -EOPNOTSUPP;
+
+ va_status = kzalloc(sizeof(*va_status), GFP_KERNEL);
+ if (!va_status)
+ return -ENOMEM;
+
+ va_hdr = kzalloc(sizeof(*va_hdr), GFP_KERNEL);
+ if (!va_hdr) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ va_hdr->opcode = cmd->opcode;
+ va_hdr->group_type = cmd->group_type;
+ va_hdr->group_member_id = cmd->group_member_id;
+
+ /* Add header */
+ sg_init_one(&hdr, va_hdr, sizeof(*va_hdr));
+ sgs[out_num] = &hdr;
+ out_num++;
+
+ if (cmd->data_sg) {
+ sgs[out_num] = cmd->data_sg;
+ out_num++;
+ }
+
+ /* Add return status */
+ sg_init_one(&stat, va_status, sizeof(*va_status));
+ sgs[out_num + in_num] = &stat;
+ in_num++;
+
+ if (cmd->result_sg) {
+ sgs[out_num + in_num] = cmd->result_sg;
+ in_num++;
+ }
+
+ ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
+ le16_to_cpu(cmd->opcode),
+ sgs, out_num, in_num, cmd);
+ if (ret) {
+ dev_err(&vdev->dev,
+ "Failed to execute command on admin vq: %d\n.", ret);
+ goto err_cmd_exec;
+ }
+
+ status = le16_to_cpu(va_status->status);
+ if (status != VIRTIO_ADMIN_STATUS_OK) {
+ dev_err(&vdev->dev,
+ "admin command error: status(%#x) qualifier(%#x)\n",
+ status, le16_to_cpu(va_status->status_qualifier));
+ ret = -status;
+ }
+
+err_cmd_exec:
+ kfree(va_hdr);
+err_alloc:
+ kfree(va_status);
+ return ret;
+}
+
+static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ struct scatterlist data_sg;
+ __le64 *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ sg_init_one(&result_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.result_sg = &result_sg;
+
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto end;
+
+ *data &= cpu_to_le64(VIRTIO_ADMIN_CMD_BITMAP);
+ sg_init_one(&data_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = NULL;
+
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto end;
+
+ vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data);
+end:
+ kfree(data);
+}
+
+static void
+virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
+ struct virtio_admin_cmd_cap_get_data *get_data;
+ struct virtio_admin_cmd_cap_set_data *set_data;
+ struct virtio_dev_parts_cap *result;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ struct scatterlist data_sg;
+ u8 resource_objects_limit;
+ u16 set_data_size;
+ int ret;
+
+ get_data = kzalloc(sizeof(*get_data), GFP_KERNEL);
+ if (!get_data)
+ return;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ goto end;
+
+ get_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP);
+ sg_init_one(&data_sg, get_data, sizeof(*get_data));
+ sg_init_one(&result_sg, result, sizeof(*result));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = &result_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto err_get;
+
+ set_data_size = sizeof(*set_data) + sizeof(*result);
+ set_data = kzalloc(set_data_size, GFP_KERNEL);
+ if (!set_data)
+ goto err_get;
+
+ set_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP);
+
+ /* Set the limit to the minimum value between the GET and SET values
+ * supported by the device. Since the obj_id for VIRTIO_DEV_PARTS_CAP
+ * is a globally unique value per PF, there is no possibility of
+ * overlap between GET and SET operations.
+ */
+ resource_objects_limit = min(result->get_parts_resource_objects_limit,
+ result->set_parts_resource_objects_limit);
+ result->get_parts_resource_objects_limit = resource_objects_limit;
+ result->set_parts_resource_objects_limit = resource_objects_limit;
+ memcpy(set_data->cap_specific_data, result, sizeof(*result));
+ sg_init_one(&data_sg, set_data, set_data_size);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = NULL;
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET);
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto err_set;
+
+ /* Allocate IDR to manage the dev caps objects */
+ ida_init(&vp_dev->admin_vq.dev_parts_ida);
+ vp_dev->admin_vq.max_dev_parts_objects = resource_objects_limit;
+
+err_set:
+ kfree(set_data);
+err_get:
+ kfree(result);
+end:
+ kfree(get_data);
+}
+
+static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
+ struct virtio_admin_cmd_query_cap_id_result *data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ sg_init_one(&result_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
+ cmd.result_sg = &result_sg;
+
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto end;
+
+ /* Max number of caps fits into a single u64 */
+ BUILD_BUG_ON(sizeof(data->supported_caps) > sizeof(u64));
+
+ vp_dev->admin_vq.supported_caps = le64_to_cpu(data->supported_caps[0]);
+
+ if (!(vp_dev->admin_vq.supported_caps & (1 << VIRTIO_DEV_PARTS_CAP)))
+ goto end;
+
+ virtio_pci_admin_cmd_dev_parts_objects_enable(virtio_dev);
+end:
+ kfree(data);
+}
+
+static void vp_modern_avq_activate(struct virtio_device *vdev)
+{
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return;
+
+ virtio_pci_admin_cmd_list_init(vdev);
+ virtio_pci_admin_cmd_cap_init(vdev);
+}
+
+static void vp_modern_avq_cleanup(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_admin_cmd *cmd;
+ struct virtqueue *vq;
- return vp_modern_get_features(&vp_dev->mdev);
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return;
+
+ vq = vp_dev->admin_vq.info->vq;
+ if (!vq)
+ return;
+
+ while ((cmd = virtqueue_detach_unused_buf(vq))) {
+ cmd->ret = -EIO;
+ complete(&cmd->completion);
+ }
}
static void vp_transport_features(struct virtio_device *vdev, u64 features)
@@ -37,6 +375,45 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
if (features & BIT_ULL(VIRTIO_F_RING_RESET))
__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
+
+ if (features & BIT_ULL(VIRTIO_F_ADMIN_VQ))
+ __virtio_set_bit(vdev, VIRTIO_F_ADMIN_VQ);
+}
+
+static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit,
+ u32 offset, const char *fname)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ if (!__virtio_test_bit(vdev, fbit))
+ return 0;
+
+ if (likely(vp_dev->mdev.common_len >= offset))
+ return 0;
+
+ dev_err(&vdev->dev,
+ "virtio: common cfg size(%zu) does not match the feature %s\n",
+ vp_dev->mdev.common_len, fname);
+
+ return -EINVAL;
+}
+
+#define vp_check_common_size_one_feature(vdev, fbit, field) \
+ __vp_check_common_size_one_feature(vdev, fbit, \
+ offsetofend(struct virtio_pci_modern_common_cfg, field), #fbit)
+
+static int vp_check_common_size(struct virtio_device *vdev)
+{
+ if (vp_check_common_size_one_feature(vdev, VIRTIO_F_NOTIF_CONFIG_DATA, queue_notify_data))
+ return -EINVAL;
+
+ if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset))
+ return -EINVAL;
+
+ if (vp_check_common_size_one_feature(vdev, VIRTIO_F_ADMIN_VQ, admin_queue_num))
+ return -EINVAL;
+
+ return 0;
}
/* virtio config->finalize_features() implementation */
@@ -57,7 +434,10 @@ static int vp_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- vp_modern_set_features(&vp_dev->mdev, vdev->features);
+ if (vp_check_common_size(vdev))
+ return -EINVAL;
+
+ vp_modern_set_extended_features(&vp_dev->mdev, vdev->features_array);
return 0;
}
@@ -159,6 +539,8 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
/* We should never be setting status to 0. */
BUG_ON(status == 0);
vp_modern_set_status(&vp_dev->mdev, status);
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK)
+ vp_modern_avq_activate(vdev);
}
static void vp_reset(struct virtio_device *vdev)
@@ -175,6 +557,9 @@ static void vp_reset(struct virtio_device *vdev)
*/
while (vp_modern_get_status(mdev))
msleep(1);
+
+ vp_modern_avq_cleanup(vdev);
+
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
@@ -288,6 +673,15 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
return vp_modern_config_vector(&vp_dev->mdev, vector);
}
+static bool vp_notify_with_data(struct virtqueue *vq)
+{
+ u32 data = vring_notification_data(vq);
+
+ iowrite32(data, (void __iomem *)vq->priv);
+
+ return true;
+}
+
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
unsigned int index,
@@ -298,30 +692,33 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ bool (*notify)(struct virtqueue *vq);
struct virtqueue *vq;
+ bool is_avq;
u16 num;
int err;
- if (index >= vp_modern_get_num_queues(mdev))
+ if (__virtio_test_bit(&vp_dev->vdev, VIRTIO_F_NOTIFICATION_DATA))
+ notify = vp_notify_with_data;
+ else
+ notify = vp_notify;
+
+ is_avq = vp_is_avq(&vp_dev->vdev, index);
+ if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
return ERR_PTR(-EINVAL);
- /* Check if queue is either not available or already active. */
num = vp_modern_get_queue_size(mdev, index);
+ /* Check if queue is either not available or already active. */
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
- if (!is_power_of_2(num)) {
- dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
- return ERR_PTR(-EINVAL);
- }
-
info->msix_vector = msix_vec;
/* create the vring */
vq = vring_create_virtqueue(index, num,
SMP_CACHE_BYTES, &vp_dev->vdev,
true, true, ctx,
- vp_notify, callback, name);
+ notify, callback, name);
if (!vq)
return ERR_PTR(-ENOMEM);
@@ -346,13 +743,12 @@ err:
static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
+ struct virtqueue_info vqs_info[],
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq;
- int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
+ int rc = vp_find_vqs(vdev, nvqs, vqs, vqs_info, desc);
if (rc)
return rc;
@@ -481,6 +877,353 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
return true;
}
+/*
+ * virtio_pci_admin_has_dev_parts - Checks whether the device parts
+ * functionality is supported
+ * @pdev: VF pci_dev
+ *
+ * Returns true on success.
+ */
+bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_pci_device *vp_dev;
+
+ if (!virtio_dev)
+ return false;
+
+ if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ))
+ return false;
+
+ vp_dev = to_vp_device(virtio_dev);
+
+ if (!((vp_dev->admin_vq.supported_cmds & VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) ==
+ VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP))
+ return false;
+
+ return vp_dev->admin_vq.max_dev_parts_objects;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_has_dev_parts);
+
+/*
+ * virtio_pci_admin_mode_set - Sets the mode of a member device
+ * @pdev: VF pci_dev
+ * @flags: device mode's flags
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 flags)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_dev_mode_set_data *data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->flags = flags;
+ sg_init_one(&data_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_MODE_SET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_mode_set);
+
+/*
+ * virtio_pci_admin_obj_create - Creates an object for a given type and operation,
+ * following the max objects that can be created for that request.
+ * @pdev: VF pci_dev
+ * @obj_type: Object type
+ * @operation_type: Operation type
+ * @obj_id: Output unique object id
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type,
+ u32 *obj_id)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ u16 data_size = sizeof(struct virtio_admin_cmd_resource_obj_create_data);
+ struct virtio_admin_cmd_resource_obj_create_data *obj_create_data;
+ struct virtio_resource_obj_dev_parts obj_dev_parts = {};
+ struct virtio_pci_admin_vq *avq;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ void *data;
+ int id = -1;
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS)
+ return -EOPNOTSUPP;
+
+ if (operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET &&
+ operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET)
+ return -EINVAL;
+
+ avq = &to_vp_device(virtio_dev)->admin_vq;
+ if (!avq->max_dev_parts_objects)
+ return -EOPNOTSUPP;
+
+ id = ida_alloc_range(&avq->dev_parts_ida, 0,
+ avq->max_dev_parts_objects - 1, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *obj_id = id;
+ data_size += sizeof(obj_dev_parts);
+ data = kzalloc(data_size, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ obj_create_data = data;
+ obj_create_data->hdr.type = cpu_to_le16(obj_type);
+ obj_create_data->hdr.id = cpu_to_le32(*obj_id);
+ obj_dev_parts.type = operation_type;
+ memcpy(obj_create_data->resource_obj_specific_data, &obj_dev_parts,
+ sizeof(obj_dev_parts));
+ sg_init_one(&data_sg, data, data_size);
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+ kfree(data);
+end:
+ if (ret)
+ ida_free(&avq->dev_parts_ida, id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_create);
+
+/*
+ * virtio_pci_admin_obj_destroy - Destroys an object of a given type and id
+ * @pdev: VF pci_dev
+ * @obj_type: Object type
+ * @id: Object id
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_resource_obj_cmd_hdr *data;
+ struct virtio_pci_device *vp_dev;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->type = cpu_to_le16(obj_type);
+ data->id = cpu_to_le32(id);
+ sg_init_one(&data_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (!ret) {
+ vp_dev = to_vp_device(virtio_dev);
+ ida_free(&vp_dev->admin_vq.dev_parts_ida, id);
+ }
+
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_destroy);
+
+/*
+ * virtio_pci_admin_dev_parts_metadata_get - Gets the metadata of the device parts
+ * identified by the below attributes.
+ * @pdev: VF pci_dev
+ * @obj_type: Object type
+ * @id: Object id
+ * @metadata_type: Metadata type
+ * @out: Upon success holds the output for 'metadata type size'
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type,
+ u32 id, u8 metadata_type, u32 *out)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_dev_parts_metadata_result *result;
+ struct virtio_admin_cmd_dev_parts_metadata_data *data;
+ struct scatterlist data_sg, result_sg;
+ struct virtio_admin_cmd cmd = {};
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ if (metadata_type != VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE)
+ return -EOPNOTSUPP;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ data->hdr.type = cpu_to_le16(obj_type);
+ data->hdr.id = cpu_to_le32(id);
+ data->type = metadata_type;
+ sg_init_one(&data_sg, data, sizeof(*data));
+ sg_init_one(&result_sg, result, sizeof(*result));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = &result_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (!ret)
+ *out = le32_to_cpu(result->parts_size.size);
+
+ kfree(result);
+end:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_metadata_get);
+
+/*
+ * virtio_pci_admin_dev_parts_get - Gets the device parts identified by the below attributes.
+ * @pdev: VF pci_dev
+ * @obj_type: Object type
+ * @id: Object id
+ * @get_type: Get type
+ * @res_sg: Upon success holds the output result data
+ * @res_size: Upon success holds the output result size
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id,
+ u8 get_type, struct scatterlist *res_sg,
+ u32 *res_size)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_dev_parts_get_data *data;
+ struct scatterlist data_sg;
+ struct virtio_admin_cmd cmd = {};
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ if (get_type != VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL)
+ return -EOPNOTSUPP;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->hdr.type = cpu_to_le16(obj_type);
+ data->hdr.id = cpu_to_le32(id);
+ data->type = get_type;
+ sg_init_one(&data_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_GET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = res_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (!ret)
+ *res_size = cmd.result_sg_size;
+
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_get);
+
+/*
+ * virtio_pci_admin_dev_parts_set - Sets the device parts identified by the below attributes.
+ * @pdev: VF pci_dev
+ * @data_sg: The device parts data, its layout follows struct virtio_admin_cmd_dev_parts_set_data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd cmd = {};
+ int vf_id;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_SET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = data_sg;
+ return vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_set);
+
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get = NULL,
.set = NULL,
@@ -491,7 +1234,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.find_vqs = vp_modern_find_vqs,
.del_vqs = vp_del_vqs,
.synchronize_cbs = vp_synchronize_vectors,
- .get_features = vp_get_features,
+ .get_extended_features = vp_get_features,
.finalize_features = vp_finalize_features,
.bus_name = vp_bus_name,
.set_vq_affinity = vp_set_vq_affinity,
@@ -511,7 +1254,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.find_vqs = vp_modern_find_vqs,
.del_vqs = vp_del_vqs,
.synchronize_cbs = vp_synchronize_vectors,
- .get_features = vp_get_features,
+ .get_extended_features = vp_get_features,
.finalize_features = vp_finalize_features,
.bus_name = vp_bus_name,
.set_vq_affinity = vp_set_vq_affinity,
@@ -542,9 +1285,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
+ vp_dev->avq_index = vp_avq_index;
vp_dev->isr = mdev->isr;
vp_dev->vdev.id = mdev->id;
+ spin_lock_init(&vp_dev->admin_vq.lock);
return 0;
}
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index 869cb46bef96..413a8c353463 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -203,6 +203,14 @@ static inline void check_offsets(void)
offsetof(struct virtio_pci_common_cfg, queue_used_lo));
BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
offsetof(struct virtio_pci_common_cfg, queue_used_hi));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NDATA !=
+ offsetof(struct virtio_pci_modern_common_cfg, queue_notify_data));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_RESET !=
+ offsetof(struct virtio_pci_modern_common_cfg, queue_reset));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_ADM_Q_IDX !=
+ offsetof(struct virtio_pci_modern_common_cfg, admin_queue_index));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_ADM_Q_NUM !=
+ offsetof(struct virtio_pci_modern_common_cfg, admin_queue_num));
}
/*
@@ -218,21 +226,29 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
int err, common, isr, notify, device;
u32 notify_length;
u32 notify_offset;
+ int devid;
check_offsets();
- /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
- if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
- return -ENODEV;
-
- if (pci_dev->device < 0x1040) {
- /* Transitional devices: use the PCI subsystem device id as
- * virtio device id, same as legacy driver always did.
- */
- mdev->id.device = pci_dev->subsystem_device;
+ if (mdev->device_id_check) {
+ devid = mdev->device_id_check(pci_dev);
+ if (devid < 0)
+ return devid;
+ mdev->id.device = devid;
} else {
- /* Modern devices: simply use PCI device id, but start from 0x1040. */
- mdev->id.device = pci_dev->device - 0x1040;
+ /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
+ if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
+ return -ENODEV;
+
+ if (pci_dev->device < 0x1040) {
+ /* Transitional devices: use the PCI subsystem device id as
+ * virtio device id, same as legacy driver always did.
+ */
+ mdev->id.device = pci_dev->subsystem_device;
+ } else {
+ /* Modern devices: simply use PCI device id, but start from 0x1040. */
+ mdev->id.device = pci_dev->device - 0x1040;
+ }
}
mdev->id.vendor = pci_dev->subsystem_vendor;
@@ -260,7 +276,8 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
return -EINVAL;
}
- err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pci_dev->dev,
+ mdev->dma_mask ? : DMA_BIT_MASK(64));
if (err)
err = dma_set_mask_and_coherent(&pci_dev->dev,
DMA_BIT_MASK(32));
@@ -281,9 +298,10 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
err = -EINVAL;
mdev->common = vp_modern_map_capability(mdev, common,
- sizeof(struct virtio_pci_common_cfg), 4,
- 0, sizeof(struct virtio_pci_common_cfg),
- NULL, NULL);
+ sizeof(struct virtio_pci_common_cfg), 4, 0,
+ offsetofend(struct virtio_pci_modern_common_cfg,
+ admin_queue_num),
+ &mdev->common_len, NULL);
if (!mdev->common)
goto err_map_common;
mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
@@ -370,63 +388,74 @@ void vp_modern_remove(struct virtio_pci_modern_device *mdev)
EXPORT_SYMBOL_GPL(vp_modern_remove);
/*
- * vp_modern_get_features - get features from device
+ * vp_modern_get_extended_features - get features from device
* @mdev: the modern virtio-pci device
+ * @features: the features array to be filled
*
- * Returns the features read from the device
+ * Fill the specified features array with the features read from the device
*/
-u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
+void vp_modern_get_extended_features(struct virtio_pci_modern_device *mdev,
+ u64 *features)
{
struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+ int i;
- u64 features;
+ virtio_features_zero(features);
+ for (i = 0; i < VIRTIO_FEATURES_BITS / 32; i++) {
+ u64 cur;
- vp_iowrite32(0, &cfg->device_feature_select);
- features = vp_ioread32(&cfg->device_feature);
- vp_iowrite32(1, &cfg->device_feature_select);
- features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
-
- return features;
+ vp_iowrite32(i, &cfg->device_feature_select);
+ cur = vp_ioread32(&cfg->device_feature);
+ features[i >> 1] |= cur << (32 * (i & 1));
+ }
}
-EXPORT_SYMBOL_GPL(vp_modern_get_features);
+EXPORT_SYMBOL_GPL(vp_modern_get_extended_features);
/*
* vp_modern_get_driver_features - get driver features from device
* @mdev: the modern virtio-pci device
+ * @features: the features array to be filled
*
- * Returns the driver features read from the device
+ * Fill the specified features array with the driver features read from the
+ * device
*/
-u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev)
+void
+vp_modern_get_driver_extended_features(struct virtio_pci_modern_device *mdev,
+ u64 *features)
{
struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+ int i;
- u64 features;
-
- vp_iowrite32(0, &cfg->guest_feature_select);
- features = vp_ioread32(&cfg->guest_feature);
- vp_iowrite32(1, &cfg->guest_feature_select);
- features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
+ virtio_features_zero(features);
+ for (i = 0; i < VIRTIO_FEATURES_BITS / 32; i++) {
+ u64 cur;
- return features;
+ vp_iowrite32(i, &cfg->guest_feature_select);
+ cur = vp_ioread32(&cfg->guest_feature);
+ features[i >> 1] |= cur << (32 * (i & 1));
+ }
}
-EXPORT_SYMBOL_GPL(vp_modern_get_driver_features);
+EXPORT_SYMBOL_GPL(vp_modern_get_driver_extended_features);
/*
- * vp_modern_set_features - set features to device
+ * vp_modern_set_extended_features - set features to device
* @mdev: the modern virtio-pci device
* @features: the features set to device
*/
-void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
- u64 features)
+void vp_modern_set_extended_features(struct virtio_pci_modern_device *mdev,
+ const u64 *features)
{
struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+ int i;
+
+ for (i = 0; i < VIRTIO_FEATURES_BITS / 32; i++) {
+ u32 cur = features[i >> 1] >> (32 * (i & 1));
- vp_iowrite32(0, &cfg->guest_feature_select);
- vp_iowrite32((u32)features, &cfg->guest_feature);
- vp_iowrite32(1, &cfg->guest_feature_select);
- vp_iowrite32(features >> 32, &cfg->guest_feature);
+ vp_iowrite32(i, &cfg->guest_feature_select);
+ vp_iowrite32(cur, &cfg->guest_feature);
+ }
}
-EXPORT_SYMBOL_GPL(vp_modern_set_features);
+EXPORT_SYMBOL_GPL(vp_modern_set_extended_features);
/*
* vp_modern_generation - get the device genreation
@@ -517,9 +546,9 @@ EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
* vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
* @mdev: the modern virtio-pci device
* @index: queue index
- * @vector: the config vector
+ * @vector: the queue vector
*
- * Returns the config vector read from the device
+ * Returns the queue vector read from the device
*/
u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
u16 index, u16 vector)
@@ -705,6 +734,24 @@ void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
}
EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
+u16 vp_modern_avq_num(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+ return vp_ioread16(&cfg->admin_queue_num);
+}
+EXPORT_SYMBOL_GPL(vp_modern_avq_num);
+
+u16 vp_modern_avq_index(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+ return vp_ioread16(&cfg->admin_queue_index);
+}
+EXPORT_SYMBOL_GPL(vp_modern_avq_index);
+
MODULE_VERSION("0.1");
MODULE_DESCRIPTION("Modern Virtio PCI Device");
MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 723c4e29e1d3..ddab68959671 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -69,12 +69,20 @@
struct vring_desc_state_split {
void *data; /* Data for callback. */
- struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
+
+ /* Indirect desc table and extra table, if any. These two will be
+ * allocated together. So we won't stress more to the memory allocator.
+ */
+ struct vring_desc *indir_desc;
};
struct vring_desc_state_packed {
void *data; /* Data for callback. */
- struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
+
+ /* Indirect desc table and extra table, if any. These two will be
+ * allocated together. So we won't stress more to the memory allocator.
+ */
+ struct vring_packed_desc *indir_desc;
u16 num; /* Descriptor list length. */
u16 last; /* The last desc state in a list. */
};
@@ -158,7 +166,7 @@ struct vring_virtqueue {
bool packed_ring;
/* Is DMA API used? */
- bool use_dma_api;
+ bool use_map_api;
/* Can we use weak barriers? */
bool weak_barriers;
@@ -202,6 +210,8 @@ struct vring_virtqueue {
/* DMA, allocation, and size information */
bool we_own_ring;
+ union virtio_map map;
+
#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
@@ -212,14 +222,6 @@ struct vring_virtqueue {
#endif
};
-static struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring_virtqueue_split *vring_split,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name);
static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
static void vring_free(struct virtqueue *_vq);
@@ -227,10 +229,10 @@ static void vring_free(struct virtqueue *_vq);
* Helpers.
*/
-#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
+#define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
-static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
- unsigned int total_sg)
+static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
+ unsigned int total_sg)
{
/*
* If the host supports indirect descriptor tables, and we have multiple
@@ -265,7 +267,7 @@ static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
* unconditionally on data path.
*/
-static bool vring_use_dma_api(struct virtio_device *vdev)
+static bool vring_use_map_api(const struct virtio_device *vdev)
{
if (!virtio_has_dma_quirk(vdev))
return true;
@@ -285,29 +287,42 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
return false;
}
-size_t virtio_max_dma_size(struct virtio_device *vdev)
+static bool vring_need_unmap_buffer(const struct vring_virtqueue *vring,
+ const struct vring_desc_extra *extra)
+{
+ return vring->use_map_api && (extra->addr != DMA_MAPPING_ERROR);
+}
+
+size_t virtio_max_dma_size(const struct virtio_device *vdev)
{
size_t max_segment_size = SIZE_MAX;
- if (vring_use_dma_api(vdev))
- max_segment_size = dma_max_mapping_size(vdev->dev.parent);
+ if (vring_use_map_api(vdev)) {
+ if (vdev->map) {
+ max_segment_size =
+ vdev->map->max_mapping_size(vdev->vmap);
+ } else
+ max_segment_size =
+ dma_max_mapping_size(vdev->dev.parent);
+ }
return max_segment_size;
}
EXPORT_SYMBOL_GPL(virtio_max_dma_size);
static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *map_handle, gfp_t flag,
+ union virtio_map map)
{
- if (vring_use_dma_api(vdev)) {
- return dma_alloc_coherent(vdev->dev.parent, size,
- dma_handle, flag);
+ if (vring_use_map_api(vdev)) {
+ return virtqueue_map_alloc_coherent(vdev, map, size,
+ map_handle, flag);
} else {
void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
if (queue) {
phys_addr_t phys_addr = virt_to_phys(queue);
- *dma_handle = (dma_addr_t)phys_addr;
+ *map_handle = (dma_addr_t)phys_addr;
/*
* Sanity check: make sure we dind't truncate
@@ -320,7 +335,7 @@ static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
* warning and abort if we end up with an
* unrepresentable address.
*/
- if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
+ if (WARN_ON_ONCE(*map_handle != phys_addr)) {
free_pages_exact(queue, PAGE_ALIGN(size));
return NULL;
}
@@ -330,10 +345,12 @@ static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
}
static void vring_free_queue(struct virtio_device *vdev, size_t size,
- void *queue, dma_addr_t dma_handle)
+ void *queue, dma_addr_t map_handle,
+ union virtio_map map)
{
- if (vring_use_dma_api(vdev))
- dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
+ if (vring_use_map_api(vdev))
+ virtqueue_map_free_coherent(vdev, map, size,
+ queue, map_handle);
else
free_pages_exact(queue, PAGE_ALIGN(size));
}
@@ -341,26 +358,49 @@ static void vring_free_queue(struct virtio_device *vdev, size_t size,
/*
* The DMA ops on various arches are rather gnarly right now, and
* making all of the arch DMA ops work on the vring device itself
- * is a mess. For now, we use the parent device for DMA ops.
+ * is a mess.
*/
-static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+{
+ return vq->map.dma_dev;
+}
+
+static int vring_mapping_error(const struct vring_virtqueue *vq,
+ dma_addr_t addr)
{
- return vq->vq.vdev->dev.parent;
+ struct virtio_device *vdev = vq->vq.vdev;
+
+ if (!vq->use_map_api)
+ return 0;
+
+ if (vdev->map)
+ return vdev->map->mapping_error(vq->map, addr);
+ else
+ return dma_mapping_error(vring_dma_dev(vq), addr);
}
/* Map one sg entry. */
-static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
- struct scatterlist *sg,
- enum dma_data_direction direction)
+static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
+ enum dma_data_direction direction, dma_addr_t *addr,
+ u32 *len, bool premapped)
{
- if (!vq->use_dma_api) {
+ if (premapped) {
+ *addr = sg_dma_address(sg);
+ *len = sg_dma_len(sg);
+ return 0;
+ }
+
+ *len = sg->length;
+
+ if (!vq->use_map_api) {
/*
* If DMA is not used, KMSAN doesn't know that the scatterlist
* is initialized by the hardware. Explicitly check/unpoison it
* depending on the direction.
*/
- kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
- return (dma_addr_t)sg_phys(sg);
+ kmsan_handle_dma(sg_phys(sg), sg->length, direction);
+ *addr = (dma_addr_t)sg_phys(sg);
+ return 0;
}
/*
@@ -368,29 +408,25 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
* the way it expects (we don't guarantee that the scatterlist
* will exist for the lifetime of the mapping).
*/
- return dma_map_page(vring_dma_dev(vq),
- sg_page(sg), sg->offset, sg->length,
- direction);
+ *addr = virtqueue_map_page_attrs(&vq->vq, sg_page(sg),
+ sg->offset, sg->length,
+ direction, 0);
+
+ if (vring_mapping_error(vq, *addr))
+ return -ENOMEM;
+
+ return 0;
}
static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
- if (!vq->use_dma_api)
+ if (!vq->use_map_api)
return (dma_addr_t)virt_to_phys(cpu_addr);
- return dma_map_single(vring_dma_dev(vq),
- cpu_addr, size, direction);
-}
-
-static int vring_mapping_error(const struct vring_virtqueue *vq,
- dma_addr_t addr)
-{
- if (!vq->use_dma_api)
- return 0;
-
- return dma_mapping_error(vring_dma_dev(vq), addr);
+ return virtqueue_map_single_attrs(&vq->vq, cpu_addr,
+ size, direction, 0);
}
static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
@@ -416,58 +452,37 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
* Split ring specific functions - *_split().
*/
-static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
- struct vring_desc *desc)
-{
- u16 flags;
-
- if (!vq->use_dma_api)
- return;
-
- flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
-
- dma_unmap_page(vring_dma_dev(vq),
- virtio64_to_cpu(vq->vq.vdev, desc->addr),
- virtio32_to_cpu(vq->vq.vdev, desc->len),
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
-}
-
static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
- unsigned int i)
+ struct vring_desc_extra *extra)
{
- struct vring_desc_extra *extra = vq->split.desc_extra;
u16 flags;
- if (!vq->use_dma_api)
- goto out;
-
- flags = extra[i].flags;
+ flags = extra->flags;
if (flags & VRING_DESC_F_INDIRECT) {
- dma_unmap_single(vring_dma_dev(vq),
- extra[i].addr,
- extra[i].len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- } else {
- dma_unmap_page(vring_dma_dev(vq),
- extra[i].addr,
- extra[i].len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
+ if (!vq->use_map_api)
+ goto out;
+ } else if (!vring_need_unmap_buffer(vq, extra))
+ goto out;
+
+ virtqueue_unmap_page_attrs(&vq->vq,
+ extra->addr,
+ extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ 0);
out:
- return extra[i].next;
+ return extra->next;
}
static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
unsigned int total_sg,
gfp_t gfp)
{
+ struct vring_desc_extra *extra;
struct vring_desc *desc;
- unsigned int i;
+ unsigned int i, size;
/*
* We require lowmem mappings for the descriptors because
@@ -476,40 +491,41 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
*/
gfp &= ~__GFP_HIGHMEM;
- desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
+ size = sizeof(*desc) * total_sg + sizeof(*extra) * total_sg;
+
+ desc = kmalloc(size, gfp);
if (!desc)
return NULL;
+ extra = (struct vring_desc_extra *)&desc[total_sg];
+
for (i = 0; i < total_sg; i++)
- desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
+ extra[i].next = i + 1;
+
return desc;
}
static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
struct vring_desc *desc,
+ struct vring_desc_extra *extra,
unsigned int i,
dma_addr_t addr,
unsigned int len,
- u16 flags,
- bool indirect)
+ u16 flags, bool premapped)
{
- struct vring_virtqueue *vring = to_vvq(vq);
- struct vring_desc_extra *extra = vring->split.desc_extra;
u16 next;
desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
desc[i].len = cpu_to_virtio32(vq->vdev, len);
- if (!indirect) {
- next = extra[i].next;
- desc[i].next = cpu_to_virtio16(vq->vdev, next);
+ extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr;
+ extra[i].len = len;
+ extra[i].flags = flags;
- extra[i].addr = addr;
- extra[i].len = len;
- extra[i].flags = flags;
- } else
- next = virtio16_to_cpu(vq->vdev, desc[i].next);
+ next = extra[i].next;
+
+ desc[i].next = cpu_to_virtio16(vq->vdev, next);
return next;
}
@@ -521,9 +537,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
unsigned int in_sgs,
void *data,
void *ctx,
+ bool premapped,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ struct vring_desc_extra *extra;
struct scatterlist *sg;
struct vring_desc *desc;
unsigned int i, n, avail, descs_used, prev, err_idx;
@@ -559,9 +577,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
/* Set up rest to use this indirect table. */
i = 0;
descs_used = 1;
+ extra = (struct vring_desc_extra *)&desc[total_sg];
} else {
indirect = false;
desc = vq->split.vring.desc;
+ extra = vq->split.desc_extra;
i = head;
descs_used = total_sg;
}
@@ -582,39 +602,42 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
for (n = 0; n < out_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr))
+ dma_addr_t addr;
+ u32 len;
+
+ if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped))
goto unmap_release;
prev = i;
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
- i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
+ i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
VRING_DESC_F_NEXT,
- indirect);
+ premapped);
}
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
+ dma_addr_t addr;
+ u32 len;
+
+ if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped))
goto unmap_release;
prev = i;
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
- i = virtqueue_add_desc_split(_vq, desc, i, addr,
- sg->length,
+ i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
VRING_DESC_F_NEXT |
VRING_DESC_F_WRITE,
- indirect);
+ premapped);
}
}
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
- if (!indirect && vq->use_dma_api)
+ if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
~VRING_DESC_F_NEXT;
@@ -627,10 +650,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
goto unmap_release;
virtqueue_add_desc_split(_vq, vq->split.vring.desc,
+ vq->split.desc_extra,
head, addr,
total_sg * sizeof(struct vring_desc),
- VRING_DESC_F_INDIRECT,
- false);
+ VRING_DESC_F_INDIRECT, false);
}
/* We're using some buffers from the free list. */
@@ -683,11 +706,8 @@ unmap_release:
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
- if (indirect) {
- vring_unmap_one_split_indirect(vq, &desc[i]);
- i = virtio16_to_cpu(_vq->vdev, desc[i].next);
- } else
- i = vring_unmap_one_split(vq, i);
+
+ i = vring_unmap_one_split(vq, &extra[i]);
}
if (indirect)
@@ -731,22 +751,25 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
void **ctx)
{
+ struct vring_desc_extra *extra;
unsigned int i, j;
__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
/* Clear data ptr. */
vq->split.desc_state[head].data = NULL;
+ extra = vq->split.desc_extra;
+
/* Put back on free list: unmap first-level descriptors and find end */
i = head;
while (vq->split.vring.desc[i].flags & nextflag) {
- vring_unmap_one_split(vq, i);
+ vring_unmap_one_split(vq, &extra[i]);
i = vq->split.desc_extra[i].next;
vq->vq.num_free++;
}
- vring_unmap_one_split(vq, i);
+ vring_unmap_one_split(vq, &extra[i]);
vq->split.desc_extra[i].next = vq->free_head;
vq->free_head = head;
@@ -756,20 +779,25 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
if (vq->indirect) {
struct vring_desc *indir_desc =
vq->split.desc_state[head].indir_desc;
- u32 len;
+ u32 len, num;
/* Free the indirect table, if any, now that it's unmapped. */
if (!indir_desc)
return;
-
len = vq->split.desc_extra[head].len;
BUG_ON(!(vq->split.desc_extra[head].flags &
VRING_DESC_F_INDIRECT));
BUG_ON(len == 0 || len % sizeof(struct vring_desc));
- for (j = 0; j < len / sizeof(struct vring_desc); j++)
- vring_unmap_one_split_indirect(vq, &indir_desc[j]);
+ num = len / sizeof(struct vring_desc);
+
+ extra = (struct vring_desc_extra *)&indir_desc[num];
+
+ if (vq->use_map_api) {
+ for (j = 0; j < num; j++)
+ vring_unmap_one_split(vq, &extra[j]);
+ }
kfree(indir_desc);
vq->split.desc_state[head].indir_desc = NULL;
@@ -778,7 +806,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
}
}
-static inline bool more_used_split(const struct vring_virtqueue *vq)
+static bool more_used_split(const struct vring_virtqueue *vq)
{
return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
vq->split.vring.used->idx);
@@ -848,6 +876,14 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)
if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+
+ /*
+ * If device triggered an event already it won't trigger one again:
+ * no need to disable.
+ */
+ if (vq->event_triggered)
+ return;
+
if (vq->event)
/* TODO: this is a hack. Figure out a cleaner value to write. */
vring_used_event(&vq->split.vring) = 0x0;
@@ -1032,11 +1068,13 @@ err_state:
}
static void vring_free_split(struct vring_virtqueue_split *vring_split,
- struct virtio_device *vdev)
+ struct virtio_device *vdev,
+ union virtio_map map)
{
vring_free_queue(vdev, vring_split->queue_size_in_bytes,
vring_split->vring.desc,
- vring_split->queue_dma_addr);
+ vring_split->queue_dma_addr,
+ map);
kfree(vring_split->desc_state);
kfree(vring_split->desc_extra);
@@ -1046,7 +1084,8 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
struct virtio_device *vdev,
u32 num,
unsigned int vring_align,
- bool may_reduce_num)
+ bool may_reduce_num,
+ union virtio_map map)
{
void *queue = NULL;
dma_addr_t dma_addr;
@@ -1061,7 +1100,8 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
&dma_addr,
- GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
+ map);
if (queue)
break;
if (!may_reduce_num)
@@ -1074,7 +1114,8 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
if (!queue) {
/* Try to get a single page. You are my only hope! */
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
- &dma_addr, GFP_KERNEL | __GFP_ZERO);
+ &dma_addr, GFP_KERNEL | __GFP_ZERO,
+ map);
}
if (!queue)
return -ENOMEM;
@@ -1090,6 +1131,64 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
return 0;
}
+static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name,
+ union virtio_map map)
+{
+ struct vring_virtqueue *vq;
+ int err;
+
+ vq = kmalloc(sizeof(*vq), GFP_KERNEL);
+ if (!vq)
+ return NULL;
+
+ vq->packed_ring = false;
+ vq->vq.callback = callback;
+ vq->vq.vdev = vdev;
+ vq->vq.name = name;
+ vq->vq.index = index;
+ vq->vq.reset = false;
+ vq->we_own_ring = false;
+ vq->notify = notify;
+ vq->weak_barriers = weak_barriers;
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ vq->broken = true;
+#else
+ vq->broken = false;
+#endif
+ vq->map = map;
+ vq->use_map_api = vring_use_map_api(vdev);
+
+ vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
+ !context;
+ vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
+
+ if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
+ vq->weak_barriers = false;
+
+ err = vring_alloc_state_extra_split(vring_split);
+ if (err) {
+ kfree(vq);
+ return NULL;
+ }
+
+ virtqueue_vring_init_split(vring_split, vq);
+
+ virtqueue_init(vq, vring_split->vring.num);
+ virtqueue_vring_attach_split(vq, vring_split);
+
+ spin_lock(&vdev->vqs_list_lock);
+ list_add_tail(&vq->vq.list, &vdev->vqs);
+ spin_unlock(&vdev->vqs_list_lock);
+ return &vq->vq;
+}
+
static struct virtqueue *vring_create_virtqueue_split(
unsigned int index,
unsigned int num,
@@ -1100,21 +1199,22 @@ static struct virtqueue *vring_create_virtqueue_split(
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
- const char *name)
+ const char *name,
+ union virtio_map map)
{
struct vring_virtqueue_split vring_split = {};
struct virtqueue *vq;
int err;
err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
- may_reduce_num);
+ may_reduce_num, map);
if (err)
return NULL;
- vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
- context, notify, callback, name);
+ vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name, map);
if (!vq) {
- vring_free_split(&vring_split, vdev);
+ vring_free_split(&vring_split, vdev, map);
return NULL;
}
@@ -1132,7 +1232,8 @@ static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
err = vring_alloc_queue_split(&vring_split, vdev, num,
vq->split.vring_align,
- vq->split.may_reduce_num);
+ vq->split.may_reduce_num,
+ vq->map);
if (err)
goto err;
@@ -1150,7 +1251,7 @@ static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
return 0;
err_state_extra:
- vring_free_split(&vring_split, vdev);
+ vring_free_split(&vring_split, vdev, vq->map);
err:
virtqueue_reinit_split(vq);
return -ENOMEM;
@@ -1160,60 +1261,42 @@ err:
/*
* Packed ring specific functions - *_packed().
*/
-static inline bool packed_used_wrap_counter(u16 last_used_idx)
+static bool packed_used_wrap_counter(u16 last_used_idx)
{
return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
}
-static inline u16 packed_last_used(u16 last_used_idx)
+static u16 packed_last_used(u16 last_used_idx)
{
return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
}
static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
- struct vring_desc_extra *extra)
+ const struct vring_desc_extra *extra)
{
u16 flags;
- if (!vq->use_dma_api)
- return;
-
flags = extra->flags;
if (flags & VRING_DESC_F_INDIRECT) {
- dma_unmap_single(vring_dma_dev(vq),
- extra->addr, extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- } else {
- dma_unmap_page(vring_dma_dev(vq),
- extra->addr, extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
-}
-
-static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
- struct vring_packed_desc *desc)
-{
- u16 flags;
-
- if (!vq->use_dma_api)
+ if (!vq->use_map_api)
+ return;
+ } else if (!vring_need_unmap_buffer(vq, extra))
return;
- flags = le16_to_cpu(desc->flags);
-
- dma_unmap_page(vring_dma_dev(vq),
- le64_to_cpu(desc->addr),
- le32_to_cpu(desc->len),
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ virtqueue_unmap_page_attrs(&vq->vq,
+ extra->addr, extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ 0);
}
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
gfp_t gfp)
{
+ struct vring_desc_extra *extra;
struct vring_packed_desc *desc;
+ int i, size;
/*
* We require lowmem mappings for the descriptors because
@@ -1222,7 +1305,16 @@ static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
*/
gfp &= ~__GFP_HIGHMEM;
- desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
+ size = (sizeof(*desc) + sizeof(*extra)) * total_sg;
+
+ desc = kmalloc(size, gfp);
+ if (!desc)
+ return NULL;
+
+ extra = (struct vring_desc_extra *)&desc[total_sg];
+
+ for (i = 0; i < total_sg; i++)
+ extra[i].next = i + 1;
return desc;
}
@@ -1233,11 +1325,13 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
+ bool premapped,
gfp_t gfp)
{
+ struct vring_desc_extra *extra;
struct vring_packed_desc *desc;
struct scatterlist *sg;
- unsigned int i, n, err_idx;
+ unsigned int i, n, err_idx, len;
u16 head, id;
dma_addr_t addr;
@@ -1246,6 +1340,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
if (!desc)
return -ENOMEM;
+ extra = (struct vring_desc_extra *)&desc[total_sg];
+
if (unlikely(vq->vq.num_free < 1)) {
pr_debug("Can't add buf len 1 - avail = 0\n");
kfree(desc);
@@ -1259,15 +1355,22 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- addr = vring_map_one_sg(vq, sg, n < out_sgs ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
+ if (vring_map_one_sg(vq, sg, n < out_sgs ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ &addr, &len, premapped))
goto unmap_release;
desc[i].flags = cpu_to_le16(n < out_sgs ?
0 : VRING_DESC_F_WRITE);
desc[i].addr = cpu_to_le64(addr);
- desc[i].len = cpu_to_le32(sg->length);
+ desc[i].len = cpu_to_le32(len);
+
+ if (unlikely(vq->use_map_api)) {
+ extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr;
+ extra[i].len = len;
+ extra[i].flags = n < out_sgs ? 0 : VRING_DESC_F_WRITE;
+ }
+
i++;
}
}
@@ -1284,7 +1387,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
sizeof(struct vring_packed_desc));
vq->packed.vring.desc[head].id = cpu_to_le16(id);
- if (vq->use_dma_api) {
+ if (vq->use_map_api) {
vq->packed.desc_extra[id].addr = addr;
vq->packed.desc_extra[id].len = total_sg *
sizeof(struct vring_packed_desc);
@@ -1333,7 +1436,7 @@ unmap_release:
err_idx = i;
for (i = 0; i < err_idx; i++)
- vring_unmap_desc_packed(vq, &desc[i]);
+ vring_unmap_extra_packed(vq, &extra[i]);
kfree(desc);
@@ -1348,12 +1451,13 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
unsigned int in_sgs,
void *data,
void *ctx,
+ bool premapped,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
struct vring_packed_desc *desc;
struct scatterlist *sg;
- unsigned int i, n, c, descs_used, err_idx;
+ unsigned int i, n, c, descs_used, err_idx, len;
__le16 head_flags, flags;
u16 head, id, prev, curr, avail_used_flags;
int err;
@@ -1374,7 +1478,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
if (virtqueue_use_indirect(vq, total_sg)) {
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
- in_sgs, data, gfp);
+ in_sgs, data, premapped, gfp);
if (err != -ENOMEM) {
END_USE(vq);
return err;
@@ -1406,9 +1510,11 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
c = 0;
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
+ dma_addr_t addr;
+
+ if (vring_map_one_sg(vq, sg, n < out_sgs ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ &addr, &len, premapped))
goto unmap_release;
flags = cpu_to_le16(vq->packed.avail_used_flags |
@@ -1420,12 +1526,13 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
desc[i].flags = flags;
desc[i].addr = cpu_to_le64(addr);
- desc[i].len = cpu_to_le32(sg->length);
+ desc[i].len = cpu_to_le32(len);
desc[i].id = cpu_to_le16(id);
- if (unlikely(vq->use_dma_api)) {
- vq->packed.desc_extra[curr].addr = addr;
- vq->packed.desc_extra[curr].len = sg->length;
+ if (unlikely(vq->use_map_api)) {
+ vq->packed.desc_extra[curr].addr = premapped ?
+ DMA_MAPPING_ERROR : addr;
+ vq->packed.desc_extra[curr].len = len;
vq->packed.desc_extra[curr].flags =
le16_to_cpu(flags);
}
@@ -1441,7 +1548,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
}
}
- if (i < head)
+ if (i <= head)
vq->packed.avail_wrap_counter ^= 1;
/* We're using some buffers from the free list. */
@@ -1557,7 +1664,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
vq->free_head = id;
vq->vq.num_free += state->num;
- if (unlikely(vq->use_dma_api)) {
+ if (unlikely(vq->use_map_api)) {
curr = id;
for (i = 0; i < state->num; i++) {
vring_unmap_extra_packed(vq,
@@ -1567,18 +1674,22 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
}
if (vq->indirect) {
- u32 len;
+ struct vring_desc_extra *extra;
+ u32 len, num;
/* Free the indirect table, if any, now that it's unmapped. */
desc = state->indir_desc;
if (!desc)
return;
- if (vq->use_dma_api) {
+ if (vq->use_map_api) {
len = vq->packed.desc_extra[id].len;
- for (i = 0; i < len / sizeof(struct vring_packed_desc);
- i++)
- vring_unmap_desc_packed(vq, &desc[i]);
+ num = len / sizeof(struct vring_packed_desc);
+
+ extra = (struct vring_desc_extra *)&desc[num];
+
+ for (i = 0; i < num; i++)
+ vring_unmap_extra_packed(vq, &extra[i]);
}
kfree(desc);
state->indir_desc = NULL;
@@ -1600,7 +1711,7 @@ static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
return avail == used && used == used_wrap_counter;
}
-static inline bool more_used_packed(const struct vring_virtqueue *vq)
+static bool more_used_packed(const struct vring_virtqueue *vq)
{
u16 last_used;
u16 last_used_idx;
@@ -1687,6 +1798,14 @@ static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+
+ /*
+ * If device triggered an event already it won't trigger one again:
+ * no need to disable.
+ */
+ if (vq->event_triggered)
+ return;
+
vq->packed.vring.driver->flags =
cpu_to_le16(vq->packed.event_flags_shadow);
}
@@ -1841,22 +1960,26 @@ static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
}
static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
- struct virtio_device *vdev)
+ struct virtio_device *vdev,
+ union virtio_map map)
{
if (vring_packed->vring.desc)
vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
vring_packed->vring.desc,
- vring_packed->ring_dma_addr);
+ vring_packed->ring_dma_addr,
+ map);
if (vring_packed->vring.driver)
vring_free_queue(vdev, vring_packed->event_size_in_bytes,
vring_packed->vring.driver,
- vring_packed->driver_event_dma_addr);
+ vring_packed->driver_event_dma_addr,
+ map);
if (vring_packed->vring.device)
vring_free_queue(vdev, vring_packed->event_size_in_bytes,
vring_packed->vring.device,
- vring_packed->device_event_dma_addr);
+ vring_packed->device_event_dma_addr,
+ map);
kfree(vring_packed->desc_state);
kfree(vring_packed->desc_extra);
@@ -1864,7 +1987,7 @@ static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
struct virtio_device *vdev,
- u32 num)
+ u32 num, union virtio_map map)
{
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
@@ -1875,7 +1998,8 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
ring = vring_alloc_queue(vdev, ring_size_in_bytes,
&ring_dma_addr,
- GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
+ map);
if (!ring)
goto err;
@@ -1887,7 +2011,8 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
driver = vring_alloc_queue(vdev, event_size_in_bytes,
&driver_event_dma_addr,
- GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
+ map);
if (!driver)
goto err;
@@ -1897,7 +2022,8 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
- GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
+ map);
if (!device)
goto err;
@@ -1909,7 +2035,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
return 0;
err:
- vring_free_packed(vring_packed, vdev);
+ vring_free_packed(vring_packed, vdev, map);
return -ENOMEM;
}
@@ -1977,35 +2103,29 @@ static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
}
-static struct virtqueue *vring_create_virtqueue_packed(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index,
+ struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name,
+ union virtio_map map)
{
- struct vring_virtqueue_packed vring_packed = {};
struct vring_virtqueue *vq;
int err;
- if (vring_alloc_queue_packed(&vring_packed, vdev, num))
- goto err_ring;
-
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
- goto err_vq;
+ return NULL;
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
vq->vq.index = index;
vq->vq.reset = false;
- vq->we_own_ring = true;
+ vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
@@ -2014,7 +2134,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->broken = false;
#endif
vq->packed_ring = true;
- vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->map = map;
+ vq->use_map_api = vring_use_map_api(vdev);
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2023,26 +2144,52 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- err = vring_alloc_state_extra_packed(&vring_packed);
- if (err)
- goto err_state_extra;
+ err = vring_alloc_state_extra_packed(vring_packed);
+ if (err) {
+ kfree(vq);
+ return NULL;
+ }
- virtqueue_vring_init_packed(&vring_packed, !!callback);
+ virtqueue_vring_init_packed(vring_packed, !!callback);
- virtqueue_init(vq, num);
- virtqueue_vring_attach_packed(vq, &vring_packed);
+ virtqueue_init(vq, vring_packed->vring.num);
+ virtqueue_vring_attach_packed(vq, vring_packed);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
+}
-err_state_extra:
- kfree(vq);
-err_vq:
- vring_free_packed(&vring_packed, vdev);
-err_ring:
- return NULL;
+static struct virtqueue *vring_create_virtqueue_packed(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name,
+ union virtio_map map)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct virtqueue *vq;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num, map))
+ return NULL;
+
+ vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers,
+ context, notify, callback, name, map);
+ if (!vq) {
+ vring_free_packed(&vring_packed, vdev, map);
+ return NULL;
+ }
+
+ to_vvq(vq)->we_own_ring = true;
+
+ return vq;
}
static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
@@ -2052,7 +2199,7 @@ static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
struct virtio_device *vdev = _vq->vdev;
int err;
- if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num, vq->map))
goto err_ring;
err = vring_alloc_state_extra_packed(&vring_packed);
@@ -2069,12 +2216,49 @@ static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
return 0;
err_state_extra:
- vring_free_packed(&vring_packed, vdev);
+ vring_free_packed(&vring_packed, vdev, vq->map);
err_ring:
virtqueue_reinit_packed(vq);
return -ENOMEM;
}
+static int virtqueue_disable_and_recycle(struct virtqueue *_vq,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+ void *buf;
+ int err;
+
+ if (!vq->we_own_ring)
+ return -EPERM;
+
+ if (!vdev->config->disable_vq_and_reset)
+ return -ENOENT;
+
+ if (!vdev->config->enable_vq_after_reset)
+ return -ENOENT;
+
+ err = vdev->config->disable_vq_and_reset(_vq);
+ if (err)
+ return err;
+
+ while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+ recycle(_vq, buf);
+
+ return 0;
+}
+
+static int virtqueue_enable_after_reset(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+
+ if (vdev->config->enable_vq_after_reset(_vq))
+ return -EBUSY;
+
+ return 0;
+}
/*
* Generic functions and exported symbols.
@@ -2087,14 +2271,15 @@ static inline int virtqueue_add(struct virtqueue *_vq,
unsigned int in_sgs,
void *data,
void *ctx,
+ bool premapped,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
- out_sgs, in_sgs, data, ctx, gfp) :
+ out_sgs, in_sgs, data, ctx, premapped, gfp) :
virtqueue_add_split(_vq, sgs, total_sg,
- out_sgs, in_sgs, data, ctx, gfp);
+ out_sgs, in_sgs, data, ctx, premapped, gfp);
}
/**
@@ -2110,6 +2295,10 @@ static inline int virtqueue_add(struct virtqueue *_vq,
* at the same time (except where noted).
*
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ *
+ * NB: ENOSPC is a special code that is only returned on an attempt to add a
+ * buffer to a full VQ. It indicates that some buffers are outstanding and that
+ * the operation can be retried after some buffers have been used.
*/
int virtqueue_add_sgs(struct virtqueue *_vq,
struct scatterlist *sgs[],
@@ -2128,7 +2317,7 @@ int virtqueue_add_sgs(struct virtqueue *_vq,
total_sg++;
}
return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
- data, NULL, gfp);
+ data, NULL, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
@@ -2150,11 +2339,34 @@ int virtqueue_add_outbuf(struct virtqueue *vq,
void *data,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
+ return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
/**
+ * virtqueue_add_outbuf_premapped - expose output buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg readable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Return:
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
+ struct scatterlist *sg, unsigned int num,
+ void *data,
+ gfp_t gfp)
+{
+ return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_premapped);
+
+/**
* virtqueue_add_inbuf - expose input buffers to other end
* @vq: the struct virtqueue we're talking about.
* @sg: scatterlist (must be well-formed and terminated!)
@@ -2172,7 +2384,7 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
void *data,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
+ return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
@@ -2196,11 +2408,53 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
void *ctx,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
+ return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/**
+ * virtqueue_add_inbuf_premapped - expose input buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg writable by other side
+ * @data: the token identifying the buffer.
+ * @ctx: extra context for the token
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Return:
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
+ struct scatterlist *sg, unsigned int num,
+ void *data,
+ void *ctx,
+ gfp_t gfp)
+{
+ return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_premapped);
+
+/**
+ * virtqueue_dma_dev - get the dma dev
+ * @_vq: the struct virtqueue we're talking about.
+ *
+ * Returns the dma dev. That can been used for dma api.
+ */
+struct device *virtqueue_dma_dev(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (vq->use_map_api && !_vq->vdev->map)
+ return vq->map.dma_dev;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_dev);
+
+/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
* @_vq: the struct virtqueue
*
@@ -2309,12 +2563,6 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- /* If device triggered an event already it won't trigger one again:
- * no need to disable.
- */
- if (vq->event_triggered)
- return;
-
if (vq->packed_ring)
virtqueue_disable_cb_packed(_vq);
else
@@ -2405,7 +2653,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->event_triggered)
- vq->event_triggered = false;
+ data_race(vq->event_triggered = false);
return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
virtqueue_enable_cb_delayed_split(_vq);
@@ -2463,7 +2711,7 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
/* Just a hint for performance: so it's ok that this can be racy! */
if (vq->event)
- vq->event_triggered = true;
+ data_race(vq->event_triggered = true);
pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
if (vq->vq.callback)
@@ -2473,67 +2721,32 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
}
EXPORT_SYMBOL_GPL(vring_interrupt);
-/* Only available for split ring */
-static struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring_virtqueue_split *vring_split,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+struct virtqueue *vring_create_virtqueue(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
{
- struct vring_virtqueue *vq;
- int err;
+ union virtio_map map = {.dma_dev = vdev->dev.parent};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
- return NULL;
-
- vq = kmalloc(sizeof(*vq), GFP_KERNEL);
- if (!vq)
- return NULL;
-
- vq->packed_ring = false;
- vq->vq.callback = callback;
- vq->vq.vdev = vdev;
- vq->vq.name = name;
- vq->vq.index = index;
- vq->vq.reset = false;
- vq->we_own_ring = false;
- vq->notify = notify;
- vq->weak_barriers = weak_barriers;
-#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
- vq->broken = true;
-#else
- vq->broken = false;
-#endif
- vq->use_dma_api = vring_use_dma_api(vdev);
-
- vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
- !context;
- vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
-
- if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
- vq->weak_barriers = false;
-
- err = vring_alloc_state_extra_split(vring_split);
- if (err) {
- kfree(vq);
- return NULL;
- }
-
- virtqueue_vring_init_split(vring_split, vq);
-
- virtqueue_init(vq, vring_split->vring.num);
- virtqueue_vring_attach_split(vq, vring_split);
+ return vring_create_virtqueue_packed(index, num, vring_align,
+ vdev, weak_barriers, may_reduce_num,
+ context, notify, callback, name, map);
- spin_lock(&vdev->vqs_list_lock);
- list_add_tail(&vq->vq.list, &vdev->vqs);
- spin_unlock(&vdev->vqs_list_lock);
- return &vq->vq;
+ return vring_create_virtqueue_split(index, num, vring_align,
+ vdev, weak_barriers, may_reduce_num,
+ context, notify, callback, name, map);
}
+EXPORT_SYMBOL_GPL(vring_create_virtqueue);
-struct virtqueue *vring_create_virtqueue(
+struct virtqueue *vring_create_virtqueue_map(
unsigned int index,
unsigned int num,
unsigned int vring_align,
@@ -2543,25 +2756,27 @@ struct virtqueue *vring_create_virtqueue(
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
- const char *name)
+ const char *name,
+ union virtio_map map)
{
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return vring_create_virtqueue_packed(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name);
+ context, notify, callback, name, map);
return vring_create_virtqueue_split(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name);
+ context, notify, callback, name, map);
}
-EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+EXPORT_SYMBOL_GPL(vring_create_virtqueue_map);
/**
* virtqueue_resize - resize the vring of vq
* @_vq: the struct virtqueue we're talking about.
* @num: new ring num
- * @recycle: callback for recycle the useless buffer
+ * @recycle: callback to recycle unused buffers
+ * @recycle_done: callback to be invoked when recycle for all unused buffers done
*
* When it is really necessary to create a new vring, it will set the current vq
* into the reset state. Then call the passed callback to recycle the buffer
@@ -2582,15 +2797,11 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue);
*
*/
int virtqueue_resize(struct virtqueue *_vq, u32 num,
- void (*recycle)(struct virtqueue *vq, void *buf))
+ void (*recycle)(struct virtqueue *vq, void *buf),
+ void (*recycle_done)(struct virtqueue *vq))
{
struct vring_virtqueue *vq = to_vvq(_vq);
- struct virtio_device *vdev = vq->vq.vdev;
- void *buf;
- int err;
-
- if (!vq->we_own_ring)
- return -EPERM;
+ int err, err_reset;
if (num > vq->vq.num_max)
return -E2BIG;
@@ -2601,32 +2812,62 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
return 0;
- if (!vdev->config->disable_vq_and_reset)
- return -ENOENT;
-
- if (!vdev->config->enable_vq_after_reset)
- return -ENOENT;
-
- err = vdev->config->disable_vq_and_reset(_vq);
+ err = virtqueue_disable_and_recycle(_vq, recycle);
if (err)
return err;
-
- while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
- recycle(_vq, buf);
+ if (recycle_done)
+ recycle_done(_vq);
if (vq->packed_ring)
err = virtqueue_resize_packed(_vq, num);
else
err = virtqueue_resize_split(_vq, num);
- if (vdev->config->enable_vq_after_reset(_vq))
- return -EBUSY;
+ err_reset = virtqueue_enable_after_reset(_vq);
+ if (err_reset)
+ return err_reset;
return err;
}
EXPORT_SYMBOL_GPL(virtqueue_resize);
-/* Only available for split ring */
+/**
+ * virtqueue_reset - detach and recycle all unused buffers
+ * @_vq: the struct virtqueue we're talking about.
+ * @recycle: callback to recycle unused buffers
+ * @recycle_done: callback to be invoked when recycle for all unused buffers done
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -EPERM: Operation not permitted
+ */
+int virtqueue_reset(struct virtqueue *_vq,
+ void (*recycle)(struct virtqueue *vq, void *buf),
+ void (*recycle_done)(struct virtqueue *vq))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ int err;
+
+ err = virtqueue_disable_and_recycle(_vq, recycle);
+ if (err)
+ return err;
+ if (recycle_done)
+ recycle_done(_vq);
+
+ if (vq->packed_ring)
+ virtqueue_reinit_packed(vq);
+ else
+ virtqueue_reinit_split(vq);
+
+ return virtqueue_enable_after_reset(_vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_reset);
+
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
@@ -2639,13 +2880,23 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
const char *name)
{
struct vring_virtqueue_split vring_split = {};
+ union virtio_map map = {.dma_dev = vdev->dev.parent};
- if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
- return NULL;
+ if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+ struct vring_virtqueue_packed vring_packed = {};
+
+ vring_packed.vring.num = num;
+ vring_packed.vring.desc = pages;
+ return __vring_new_virtqueue_packed(index, &vring_packed,
+ vdev, weak_barriers,
+ context, notify, callback,
+ name, map);
+ }
vring_init(&vring_split.vring, num, pages, vring_align);
- return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
- context, notify, callback, name);
+ return __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name,
+ map);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
@@ -2658,17 +2909,20 @@ static void vring_free(struct virtqueue *_vq)
vring_free_queue(vq->vq.vdev,
vq->packed.ring_size_in_bytes,
vq->packed.vring.desc,
- vq->packed.ring_dma_addr);
+ vq->packed.ring_dma_addr,
+ vq->map);
vring_free_queue(vq->vq.vdev,
vq->packed.event_size_in_bytes,
vq->packed.vring.driver,
- vq->packed.driver_event_dma_addr);
+ vq->packed.driver_event_dma_addr,
+ vq->map);
vring_free_queue(vq->vq.vdev,
vq->packed.event_size_in_bytes,
vq->packed.vring.device,
- vq->packed.device_event_dma_addr);
+ vq->packed.device_event_dma_addr,
+ vq->map);
kfree(vq->packed.desc_state);
kfree(vq->packed.desc_extra);
@@ -2676,7 +2930,8 @@ static void vring_free(struct virtqueue *_vq)
vring_free_queue(vq->vq.vdev,
vq->split.queue_size_in_bytes,
vq->split.vring.desc,
- vq->split.queue_dma_addr);
+ vq->split.queue_dma_addr,
+ vq->map);
}
}
if (!vq->packed_ring) {
@@ -2699,6 +2954,23 @@ void vring_del_virtqueue(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
+u32 vring_notification_data(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 next;
+
+ if (vq->packed_ring)
+ next = (vq->packed.next_avail_idx &
+ ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) |
+ vq->packed.avail_wrap_counter <<
+ VRING_PACKED_EVENT_F_WRAP_CTR;
+ else
+ next = vq->split.avail_idx_shadow;
+
+ return next << 16 | _vq->index;
+}
+EXPORT_SYMBOL_GPL(vring_notification_data);
+
/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{
@@ -2718,6 +2990,8 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_F_ORDER_PLATFORM:
break;
+ case VIRTIO_F_NOTIFICATION_DATA:
+ break;
default:
/* We don't understand this bit. */
__virtio_clear_bit(vdev, i);
@@ -2733,10 +3007,10 @@ EXPORT_SYMBOL_GPL(vring_transport_features);
* Returns the size of the vring. This is mainly used for boasting to
* userspace. Unlike other operations, this need not be serialized.
*/
-unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
+unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
}
@@ -2766,9 +3040,9 @@ void __virtqueue_unbreak(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
-bool virtqueue_is_broken(struct virtqueue *_vq)
+bool virtqueue_is_broken(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
return READ_ONCE(vq->broken);
}
@@ -2815,9 +3089,9 @@ void __virtio_unbreak_device(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
-dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
@@ -2828,9 +3102,9 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
-dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
@@ -2842,9 +3116,9 @@ dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
-dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
@@ -2857,10 +3131,277 @@ dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
/* Only available for split ring */
-const struct vring *virtqueue_get_vring(struct virtqueue *vq)
+const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
{
return &to_vvq(vq)->split.vring;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
+/**
+ * virtqueue_map_alloc_coherent - alloc coherent mapping
+ * @vdev: the virtio device we are talking to
+ * @map: metadata for performing mapping
+ * @size: the size of the buffer
+ * @map_handle: the pointer to the mapped address
+ * @gfp: allocation flag (GFP_XXX)
+ *
+ * return virtual address or NULL on error
+ */
+void *virtqueue_map_alloc_coherent(struct virtio_device *vdev,
+ union virtio_map map,
+ size_t size, dma_addr_t *map_handle,
+ gfp_t gfp)
+{
+ if (vdev->map)
+ return vdev->map->alloc(map, size,
+ map_handle, gfp);
+ else
+ return dma_alloc_coherent(map.dma_dev, size,
+ map_handle, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_alloc_coherent);
+
+/**
+ * virtqueue_map_free_coherent - free coherent mapping
+ * @vdev: the virtio device we are talking to
+ * @map: metadata for performing mapping
+ * @size: the size of the buffer
+ * @vaddr: the virtual address that needs to be freed
+ * @map_handle: the mapped address that needs to be freed
+ *
+ */
+void virtqueue_map_free_coherent(struct virtio_device *vdev,
+ union virtio_map map, size_t size, void *vaddr,
+ dma_addr_t map_handle)
+{
+ if (vdev->map)
+ vdev->map->free(map, size, vaddr,
+ map_handle, 0);
+ else
+ dma_free_coherent(map.dma_dev, size, vaddr, map_handle);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_free_coherent);
+
+/**
+ * virtqueue_map_page_attrs - map a page to the device
+ * @_vq: the virtqueue we are talking to
+ * @page: the page that will be mapped by the device
+ * @offset: the offset in the page for a buffer
+ * @size: the buffer size
+ * @dir: mapping direction
+ * @attrs: mapping attributes
+ *
+ * Returns mapped address. Caller should check that by virtqueue_map_mapping_error().
+ */
+dma_addr_t virtqueue_map_page_attrs(const struct virtqueue *_vq,
+ struct page *page,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+
+ if (vdev->map)
+ return vdev->map->map_page(vq->map,
+ page, offset, size,
+ dir, attrs);
+
+ return dma_map_page_attrs(vring_dma_dev(vq),
+ page, offset, size,
+ dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_page_attrs);
+
+/**
+ * virtqueue_unmap_page_attrs - map a page to the device
+ * @_vq: the virtqueue we are talking to
+ * @map_handle: the mapped address
+ * @size: the buffer size
+ * @dir: mapping direction
+ * @attrs: unmapping attributes
+ */
+void virtqueue_unmap_page_attrs(const struct virtqueue *_vq,
+ dma_addr_t map_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+
+ if (vdev->map)
+ vdev->map->unmap_page(vq->map,
+ map_handle, size, dir, attrs);
+ else
+ dma_unmap_page_attrs(vring_dma_dev(vq), map_handle,
+ size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_unmap_page_attrs);
+
+/**
+ * virtqueue_map_single_attrs - map DMA for _vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @ptr: the pointer of the buffer to do dma
+ * @size: the size of the buffer to do dma
+ * @dir: DMA direction
+ * @attrs: DMA Attrs
+ *
+ * The caller calls this to do dma mapping in advance. The DMA address can be
+ * passed to this _vq when it is in pre-mapped mode.
+ *
+ * return mapped address. Caller should check that by virtqueue_map_mapping_error().
+ */
+dma_addr_t virtqueue_map_single_attrs(const struct virtqueue *_vq, void *ptr,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_map_api) {
+ kmsan_handle_dma(virt_to_phys(ptr), size, dir);
+ return (dma_addr_t)virt_to_phys(ptr);
+ }
+
+ /* DMA must never operate on areas that might be remapped. */
+ if (dev_WARN_ONCE(&_vq->vdev->dev, is_vmalloc_addr(ptr),
+ "rejecting DMA map of vmalloc memory\n"))
+ return DMA_MAPPING_ERROR;
+
+ return virtqueue_map_page_attrs(&vq->vq, virt_to_page(ptr),
+ offset_in_page(ptr), size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_single_attrs);
+
+/**
+ * virtqueue_unmap_single_attrs - unmap map for _vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: the dma address to unmap
+ * @size: the size of the buffer
+ * @dir: DMA direction
+ * @attrs: DMA Attrs
+ *
+ * Unmap the address that is mapped by the virtqueue_map_* APIs.
+ *
+ */
+void virtqueue_unmap_single_attrs(const struct virtqueue *_vq,
+ dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_map_api)
+ return;
+
+ virtqueue_unmap_page_attrs(_vq, addr, size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_unmap_single_attrs);
+
+/**
+ * virtqueue_map_mapping_error - check dma address
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ *
+ * Returns 0 means dma valid. Other means invalid dma address.
+ */
+int virtqueue_map_mapping_error(const struct virtqueue *_vq, dma_addr_t addr)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vring_mapping_error(vq, addr);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_mapping_error);
+
+/**
+ * virtqueue_map_need_sync - check a dma address needs sync
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ *
+ * Check if the dma address mapped by the virtqueue_map_* APIs needs to be
+ * synchronized
+ *
+ * return bool
+ */
+bool virtqueue_map_need_sync(const struct virtqueue *_vq, dma_addr_t addr)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+
+ if (!vq->use_map_api)
+ return false;
+
+ if (vdev->map)
+ return vdev->map->need_sync(vq->map, addr);
+ else
+ return dma_need_sync(vring_dma_dev(vq), addr);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_need_sync);
+
+/**
+ * virtqueue_map_sync_single_range_for_cpu - map sync for cpu
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ * @offset: DMA address offset
+ * @size: buf size for sync
+ * @dir: DMA direction
+ *
+ * Before calling this function, use virtqueue_map_need_sync() to confirm that
+ * the DMA address really needs to be synchronized
+ *
+ */
+void virtqueue_map_sync_single_range_for_cpu(const struct virtqueue *_vq,
+ dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+
+ if (!vq->use_map_api)
+ return;
+
+ if (vdev->map)
+ vdev->map->sync_single_for_cpu(vq->map,
+ addr + offset, size, dir);
+ else
+ dma_sync_single_range_for_cpu(vring_dma_dev(vq),
+ addr, offset, size, dir);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_sync_single_range_for_cpu);
+
+/**
+ * virtqueue_map_sync_single_range_for_device - map sync for device
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ * @offset: DMA address offset
+ * @size: buf size for sync
+ * @dir: DMA direction
+ *
+ * Before calling this function, use virtqueue_map_need_sync() to confirm that
+ * the DMA address really needs to be synchronized
+ */
+void virtqueue_map_sync_single_range_for_device(const struct virtqueue *_vq,
+ dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+
+ if (!vq->use_map_api)
+ return;
+
+ if (vdev->map)
+ vdev->map->sync_single_for_device(vq->map,
+ addr + offset,
+ size, dir);
+ else
+ dma_sync_single_range_for_device(vring_dma_dev(vq), addr,
+ offset, size, dir);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_sync_single_range_for_device);
+
+MODULE_DESCRIPTION("Virtio ring implementation");
MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_rtc_arm.c b/drivers/virtio/virtio_rtc_arm.c
new file mode 100644
index 000000000000..211299d72870
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_arm.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Provides cross-timestamp params for Arm.
+ *
+ * Copyright (C) 2022-2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clocksource_ids.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+/* see header for doc */
+
+int viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id)
+{
+ *hw_counter = VIRTIO_RTC_COUNTER_ARM_VCT;
+ *cs_id = CSID_ARM_ARCH_COUNTER;
+
+ return 0;
+}
diff --git a/drivers/virtio/virtio_rtc_class.c b/drivers/virtio/virtio_rtc_class.c
new file mode 100644
index 000000000000..05d6d28255cf
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_class.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * virtio_rtc RTC class driver
+ *
+ * Copyright (C) 2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/math64.h>
+#include <linux/overflow.h>
+#include <linux/rtc.h>
+#include <linux/time64.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+/**
+ * struct viortc_class - RTC class wrapper
+ * @viortc: virtio_rtc device data
+ * @rtc: RTC device
+ * @vio_clk_id: virtio_rtc clock id
+ * @stopped: Whether RTC ops are disallowed. Access protected by rtc_lock().
+ */
+struct viortc_class {
+ struct viortc_dev *viortc;
+ struct rtc_device *rtc;
+ u16 vio_clk_id;
+ bool stopped;
+};
+
+/**
+ * viortc_class_get_locked() - get RTC class wrapper, if ops allowed
+ * @dev: virtio device
+ *
+ * Gets the RTC class wrapper from the virtio device, if it is available and
+ * ops are allowed.
+ *
+ * Context: Caller must hold rtc_lock().
+ * Return: RTC class wrapper if available and ops allowed, ERR_PTR otherwise.
+ */
+static struct viortc_class *viortc_class_get_locked(struct device *dev)
+{
+ struct viortc_class *viortc_class;
+
+ viortc_class = viortc_class_from_dev(dev);
+ if (IS_ERR(viortc_class))
+ return viortc_class;
+
+ if (viortc_class->stopped)
+ return ERR_PTR(-EBUSY);
+
+ return viortc_class;
+}
+
+/**
+ * viortc_class_read_time() - RTC class op read_time
+ * @dev: virtio device
+ * @tm: read time
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct viortc_class *viortc_class;
+ time64_t sec;
+ int ret;
+ u64 ns;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ ret = viortc_read(viortc_class->viortc, viortc_class->vio_clk_id, &ns);
+ if (ret)
+ return ret;
+
+ sec = div_u64(ns, NSEC_PER_SEC);
+
+ rtc_time64_to_tm(sec, tm);
+
+ return 0;
+}
+
+/**
+ * viortc_class_read_alarm() - RTC class op read_alarm
+ * @dev: virtio device
+ * @alrm: alarm read out
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct viortc_class *viortc_class;
+ time64_t alarm_time_sec;
+ u64 alarm_time_ns;
+ bool enabled;
+ int ret;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ ret = viortc_read_alarm(viortc_class->viortc, viortc_class->vio_clk_id,
+ &alarm_time_ns, &enabled);
+ if (ret)
+ return ret;
+
+ alarm_time_sec = div_u64(alarm_time_ns, NSEC_PER_SEC);
+ rtc_time64_to_tm(alarm_time_sec, &alrm->time);
+
+ alrm->enabled = enabled;
+
+ return 0;
+}
+
+/**
+ * viortc_class_set_alarm() - RTC class op set_alarm
+ * @dev: virtio device
+ * @alrm: alarm to set
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct viortc_class *viortc_class;
+ time64_t alarm_time_sec;
+ u64 alarm_time_ns;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ alarm_time_sec = rtc_tm_to_time64(&alrm->time);
+
+ if (alarm_time_sec < 0)
+ return -EINVAL;
+
+ if (check_mul_overflow((u64)alarm_time_sec, (u64)NSEC_PER_SEC,
+ &alarm_time_ns))
+ return -EINVAL;
+
+ return viortc_set_alarm(viortc_class->viortc, viortc_class->vio_clk_id,
+ alarm_time_ns, alrm->enabled);
+}
+
+/**
+ * viortc_class_alarm_irq_enable() - RTC class op alarm_irq_enable
+ * @dev: virtio device
+ * @enabled: enable or disable alarm IRQ
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct viortc_class *viortc_class;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ return viortc_set_alarm_enabled(viortc_class->viortc,
+ viortc_class->vio_clk_id, enabled);
+}
+
+static const struct rtc_class_ops viortc_class_ops = {
+ .read_time = viortc_class_read_time,
+ .read_alarm = viortc_class_read_alarm,
+ .set_alarm = viortc_class_set_alarm,
+ .alarm_irq_enable = viortc_class_alarm_irq_enable,
+};
+
+/**
+ * viortc_class_alarm() - propagate alarm notification as alarm interrupt
+ * @viortc_class: RTC class wrapper
+ * @vio_clk_id: virtio_rtc clock id
+ *
+ * Context: Any context.
+ */
+void viortc_class_alarm(struct viortc_class *viortc_class, u16 vio_clk_id)
+{
+ if (vio_clk_id != viortc_class->vio_clk_id) {
+ dev_warn_ratelimited(&viortc_class->rtc->dev,
+ "ignoring alarm for clock id %d, expected id %d\n",
+ vio_clk_id, viortc_class->vio_clk_id);
+ return;
+ }
+
+ rtc_update_irq(viortc_class->rtc, 1, RTC_AF | RTC_IRQF);
+}
+
+/**
+ * viortc_class_stop() - disallow RTC class ops
+ * @viortc_class: RTC class wrapper
+ *
+ * Context: Process context. Caller must NOT hold rtc_lock().
+ */
+void viortc_class_stop(struct viortc_class *viortc_class)
+{
+ rtc_lock(viortc_class->rtc);
+
+ viortc_class->stopped = true;
+
+ rtc_unlock(viortc_class->rtc);
+}
+
+/**
+ * viortc_class_register() - register RTC class device
+ * @viortc_class: RTC class wrapper
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_class_register(struct viortc_class *viortc_class)
+{
+ return devm_rtc_register_device(viortc_class->rtc);
+}
+
+/**
+ * viortc_class_init() - init RTC class wrapper and device
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @have_alarm: have alarm feature
+ * @parent_dev: virtio device
+ *
+ * Context: Process context.
+ * Return: RTC class wrapper on success, ERR_PTR otherwise.
+ */
+struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
+ u16 vio_clk_id, bool have_alarm,
+ struct device *parent_dev)
+{
+ struct viortc_class *viortc_class;
+ struct rtc_device *rtc;
+
+ viortc_class =
+ devm_kzalloc(parent_dev, sizeof(*viortc_class), GFP_KERNEL);
+ if (!viortc_class)
+ return ERR_PTR(-ENOMEM);
+
+ rtc = devm_rtc_allocate_device(parent_dev);
+ if (IS_ERR(rtc))
+ return ERR_CAST(rtc);
+
+ viortc_class->viortc = viortc;
+ viortc_class->rtc = rtc;
+ viortc_class->vio_clk_id = vio_clk_id;
+
+ if (!have_alarm)
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
+
+ rtc->ops = &viortc_class_ops;
+ rtc->range_max = div_u64(U64_MAX, NSEC_PER_SEC);
+
+ return viortc_class;
+}
diff --git a/drivers/virtio/virtio_rtc_driver.c b/drivers/virtio/virtio_rtc_driver.c
new file mode 100644
index 000000000000..a57d5e06e19d
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_driver.c
@@ -0,0 +1,1407 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * virtio_rtc driver core
+ *
+ * Copyright (C) 2022-2024 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+#define VIORTC_ALARMQ_BUF_CAP sizeof(union virtio_rtc_notif_alarmq)
+
+/* virtqueue order */
+enum {
+ VIORTC_REQUESTQ,
+ VIORTC_ALARMQ,
+ VIORTC_MAX_NR_QUEUES,
+};
+
+/**
+ * struct viortc_vq - virtqueue abstraction
+ * @vq: virtqueue
+ * @lock: protects access to vq
+ */
+struct viortc_vq {
+ struct virtqueue *vq;
+ spinlock_t lock;
+};
+
+/**
+ * struct viortc_dev - virtio_rtc device data
+ * @vdev: virtio device
+ * @viortc_class: RTC class wrapper for UTC-like clock, NULL if not available
+ * @vqs: virtqueues
+ * @clocks_to_unregister: Clock references, which are only used during device
+ * removal.
+ * For other uses, there would be a race between device
+ * creation and setting the pointers here.
+ * @alarmq_bufs: alarmq buffers list
+ * @num_alarmq_bufs: # of alarmq buffers
+ * @num_clocks: # of virtio_rtc clocks
+ */
+struct viortc_dev {
+ struct virtio_device *vdev;
+ struct viortc_class *viortc_class;
+ struct viortc_vq vqs[VIORTC_MAX_NR_QUEUES];
+ struct viortc_ptp_clock **clocks_to_unregister;
+ void **alarmq_bufs;
+ unsigned int num_alarmq_bufs;
+ u16 num_clocks;
+};
+
+/**
+ * struct viortc_msg - Message requested by driver, responded by device.
+ * @viortc: device data
+ * @req: request buffer
+ * @resp: response buffer
+ * @responded: vqueue callback signals response reception
+ * @refcnt: Message reference count, message and buffers will be deallocated
+ * once 0. refcnt is decremented in the vqueue callback and in the
+ * thread waiting on the responded completion.
+ * If a message response wait function times out, the message will be
+ * freed upon late reception (refcnt will reach 0 in the callback), or
+ * device removal.
+ * @req_size: size of request in bytes
+ * @resp_cap: maximum size of response in bytes
+ * @resp_actual_size: actual size of response
+ */
+struct viortc_msg {
+ struct viortc_dev *viortc;
+ void *req;
+ void *resp;
+ struct completion responded;
+ refcount_t refcnt;
+ unsigned int req_size;
+ unsigned int resp_cap;
+ unsigned int resp_actual_size;
+};
+
+/**
+ * viortc_class_from_dev() - Get RTC class object from virtio device.
+ * @dev: virtio device
+ *
+ * Context: Any context.
+ * Return: RTC class object if available, ERR_PTR otherwise.
+ */
+struct viortc_class *viortc_class_from_dev(struct device *dev)
+{
+ struct virtio_device *vdev;
+ struct viortc_dev *viortc;
+
+ vdev = container_of(dev, typeof(*vdev), dev);
+ viortc = vdev->priv;
+
+ return viortc->viortc_class ?: ERR_PTR(-ENODEV);
+}
+
+/**
+ * viortc_alarms_supported() - Whether device and driver support alarms.
+ * @vdev: virtio device
+ *
+ * NB: Device and driver may not support alarms for the same clocks.
+ *
+ * Context: Any context.
+ * Return: True if both device and driver can support alarms.
+ */
+static bool viortc_alarms_supported(struct virtio_device *vdev)
+{
+ return IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS) &&
+ virtio_has_feature(vdev, VIRTIO_RTC_F_ALARM);
+}
+
+/**
+ * viortc_feed_vq() - Make a device write-only buffer available.
+ * @viortc: device data
+ * @vq: notification virtqueue
+ * @buf: buffer
+ * @buf_len: buffer capacity in bytes
+ * @data: token, identifying buffer
+ *
+ * Context: Caller must prevent concurrent access to vq.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_feed_vq(struct viortc_dev *viortc, struct virtqueue *vq,
+ void *buf, unsigned int buf_len, void *data)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, buf, buf_len);
+
+ return virtqueue_add_inbuf(vq, &sg, 1, data, GFP_ATOMIC);
+}
+
+/**
+ * viortc_msg_init() - Allocate and initialize requestq message.
+ * @viortc: device data
+ * @msg_type: virtio_rtc message type
+ * @req_size: size of request buffer to be allocated
+ * @resp_cap: size of response buffer to be allocated
+ *
+ * Initializes the message refcnt to 2. The refcnt will be decremented once in
+ * the virtqueue callback, and once in the thread waiting on the message (on
+ * completion or timeout).
+ *
+ * Context: Process context.
+ * Return: non-NULL on success.
+ */
+static struct viortc_msg *viortc_msg_init(struct viortc_dev *viortc,
+ u16 msg_type, unsigned int req_size,
+ unsigned int resp_cap)
+{
+ struct device *dev = &viortc->vdev->dev;
+ struct virtio_rtc_req_head *req_head;
+ struct viortc_msg *msg;
+
+ msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return NULL;
+
+ init_completion(&msg->responded);
+
+ msg->req = devm_kzalloc(dev, req_size, GFP_KERNEL);
+ if (!msg->req)
+ goto err_free_msg;
+
+ req_head = msg->req;
+
+ msg->resp = devm_kzalloc(dev, resp_cap, GFP_KERNEL);
+ if (!msg->resp)
+ goto err_free_msg_req;
+
+ msg->viortc = viortc;
+ msg->req_size = req_size;
+ msg->resp_cap = resp_cap;
+
+ refcount_set(&msg->refcnt, 2);
+
+ req_head->msg_type = virtio_cpu_to_le(msg_type, req_head->msg_type);
+
+ return msg;
+
+err_free_msg_req:
+ devm_kfree(dev, msg->req);
+
+err_free_msg:
+ devm_kfree(dev, msg);
+
+ return NULL;
+}
+
+/**
+ * viortc_msg_release() - Decrement message refcnt, potentially free message.
+ * @msg: message requested by driver
+ *
+ * Context: Any context.
+ */
+static void viortc_msg_release(struct viortc_msg *msg)
+{
+ struct device *dev;
+
+ if (refcount_dec_and_test(&msg->refcnt)) {
+ dev = &msg->viortc->vdev->dev;
+
+ devm_kfree(dev, msg->req);
+ devm_kfree(dev, msg->resp);
+ devm_kfree(dev, msg);
+ }
+}
+
+/**
+ * viortc_do_cb() - generic virtqueue callback logic
+ * @vq: virtqueue
+ * @handle_buf: function to process a used buffer
+ *
+ * Context: virtqueue callback, typically interrupt. Takes and releases vq lock.
+ */
+static void viortc_do_cb(struct virtqueue *vq,
+ void (*handle_buf)(void *token, unsigned int len,
+ struct virtqueue *vq,
+ struct viortc_vq *viortc_vq,
+ struct viortc_dev *viortc))
+{
+ struct viortc_dev *viortc = vq->vdev->priv;
+ struct viortc_vq *viortc_vq;
+ bool cb_enabled = true;
+ unsigned long flags;
+ unsigned int len;
+ void *token;
+
+ viortc_vq = &viortc->vqs[vq->index];
+
+ for (;;) {
+ spin_lock_irqsave(&viortc_vq->lock, flags);
+
+ if (cb_enabled) {
+ virtqueue_disable_cb(vq);
+ cb_enabled = false;
+ }
+
+ token = virtqueue_get_buf(vq, &len);
+ if (!token) {
+ if (virtqueue_enable_cb(vq)) {
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+ return;
+ }
+ cb_enabled = true;
+ }
+
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+
+ if (token)
+ handle_buf(token, len, vq, viortc_vq, viortc);
+ }
+}
+
+/**
+ * viortc_requestq_hdlr() - process a requestq used buffer
+ * @token: token identifying the buffer
+ * @len: bytes written by device
+ * @vq: virtqueue
+ * @viortc_vq: device specific data for virtqueue
+ * @viortc: device data
+ *
+ * Signals completion for each received message.
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_requestq_hdlr(void *token, unsigned int len,
+ struct virtqueue *vq,
+ struct viortc_vq *viortc_vq,
+ struct viortc_dev *viortc)
+{
+ struct viortc_msg *msg = token;
+
+ msg->resp_actual_size = len;
+
+ complete(&msg->responded);
+ viortc_msg_release(msg);
+}
+
+/**
+ * viortc_cb_requestq() - callback for requestq
+ * @vq: virtqueue
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_cb_requestq(struct virtqueue *vq)
+{
+ viortc_do_cb(vq, viortc_requestq_hdlr);
+}
+
+/**
+ * viortc_alarmq_hdlr() - process an alarmq used buffer
+ * @token: token identifying the buffer
+ * @len: bytes written by device
+ * @vq: virtqueue
+ * @viortc_vq: device specific data for virtqueue
+ * @viortc: device data
+ *
+ * Processes a VIRTIO_RTC_NOTIF_ALARM notification by calling the RTC class
+ * driver. Makes the buffer available again.
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_alarmq_hdlr(void *token, unsigned int len,
+ struct virtqueue *vq,
+ struct viortc_vq *viortc_vq,
+ struct viortc_dev *viortc)
+{
+ struct virtio_rtc_notif_alarm *notif = token;
+ struct virtio_rtc_notif_head *head = token;
+ unsigned long flags;
+ u16 clock_id;
+ bool notify;
+
+ if (len < sizeof(*head)) {
+ dev_err_ratelimited(&viortc->vdev->dev,
+ "%s: ignoring notification with short header\n",
+ __func__);
+ goto feed_vq;
+ }
+
+ if (virtio_le_to_cpu(head->msg_type) != VIRTIO_RTC_NOTIF_ALARM) {
+ dev_err_ratelimited(&viortc->vdev->dev,
+ "%s: ignoring unknown notification type 0x%x\n",
+ __func__, virtio_le_to_cpu(head->msg_type));
+ goto feed_vq;
+ }
+
+ if (len < sizeof(*notif)) {
+ dev_err_ratelimited(&viortc->vdev->dev,
+ "%s: ignoring too small alarm notification\n",
+ __func__);
+ goto feed_vq;
+ }
+
+ clock_id = virtio_le_to_cpu(notif->clock_id);
+
+ if (!viortc->viortc_class)
+ dev_warn_ratelimited(&viortc->vdev->dev,
+ "ignoring alarm, no RTC class device available\n");
+ else
+ viortc_class_alarm(viortc->viortc_class, clock_id);
+
+feed_vq:
+ spin_lock_irqsave(&viortc_vq->lock, flags);
+
+ if (viortc_feed_vq(viortc, vq, notif, VIORTC_ALARMQ_BUF_CAP, token))
+ dev_warn(&viortc->vdev->dev,
+ "%s: failed to re-expose input buffer\n", __func__);
+
+ notify = virtqueue_kick_prepare(vq);
+
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+
+ if (notify)
+ virtqueue_notify(vq);
+}
+
+/**
+ * viortc_cb_alarmq() - callback for alarmq
+ * @vq: virtqueue
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_cb_alarmq(struct virtqueue *vq)
+{
+ viortc_do_cb(vq, viortc_alarmq_hdlr);
+}
+
+/**
+ * viortc_get_resp_errno() - converts virtio_rtc errnos to system errnos
+ * @resp_head: message response header
+ *
+ * Return: negative system errno, or 0
+ */
+static int viortc_get_resp_errno(struct virtio_rtc_resp_head *resp_head)
+{
+ switch (virtio_le_to_cpu(resp_head->status)) {
+ case VIRTIO_RTC_S_OK:
+ return 0;
+ case VIRTIO_RTC_S_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case VIRTIO_RTC_S_EINVAL:
+ return -EINVAL;
+ case VIRTIO_RTC_S_ENODEV:
+ return -ENODEV;
+ case VIRTIO_RTC_S_EIO:
+ default:
+ return -EIO;
+ }
+}
+
+/**
+ * viortc_msg_xfer() - send message request, wait until message response
+ * @vq: virtqueue
+ * @msg: message with driver request
+ * @timeout_jiffies: message response timeout, 0 for no timeout
+ *
+ * Context: Process context. Takes and releases vq.lock. May sleep.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_msg_xfer(struct viortc_vq *vq, struct viortc_msg *msg,
+ unsigned long timeout_jiffies)
+{
+ struct scatterlist out_sg[1];
+ struct scatterlist in_sg[1];
+ struct scatterlist *sgs[2];
+ unsigned long flags;
+ long timeout_ret;
+ bool notify;
+ int ret;
+
+ sgs[0] = out_sg;
+ sgs[1] = in_sg;
+
+ sg_init_one(out_sg, msg->req, msg->req_size);
+ sg_init_one(in_sg, msg->resp, msg->resp_cap);
+
+ spin_lock_irqsave(&vq->lock, flags);
+
+ ret = virtqueue_add_sgs(vq->vq, sgs, 1, 1, msg, GFP_ATOMIC);
+ if (ret) {
+ spin_unlock_irqrestore(&vq->lock, flags);
+ /*
+ * Release in place of the response callback, which will never
+ * come.
+ */
+ viortc_msg_release(msg);
+ return ret;
+ }
+
+ notify = virtqueue_kick_prepare(vq->vq);
+
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ if (notify)
+ virtqueue_notify(vq->vq);
+
+ if (timeout_jiffies) {
+ timeout_ret = wait_for_completion_interruptible_timeout(
+ &msg->responded, timeout_jiffies);
+
+ if (!timeout_ret)
+ return -ETIMEDOUT;
+ else if (timeout_ret < 0)
+ return (int)timeout_ret;
+ } else {
+ ret = wait_for_completion_interruptible(&msg->responded);
+ if (ret)
+ return ret;
+ }
+
+ if (msg->resp_actual_size < sizeof(struct virtio_rtc_resp_head))
+ return -EINVAL;
+
+ ret = viortc_get_resp_errno(msg->resp);
+ if (ret)
+ return ret;
+
+ /*
+ * There is not yet a case where returning a short message would make
+ * sense, so consider any deviation an error.
+ */
+ if (msg->resp_actual_size != msg->resp_cap)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * common message handle macros for messages of different types
+ */
+
+/**
+ * VIORTC_DECLARE_MSG_HDL_ONSTACK() - declare message handle on stack
+ * @hdl: message handle name
+ * @msg_id: message type id
+ * @msg_req: message request type
+ * @msg_resp: message response type
+ */
+#define VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, msg_id, msg_req, msg_resp) \
+ struct { \
+ struct viortc_msg *msg; \
+ msg_req *req; \
+ msg_resp *resp; \
+ unsigned int req_size; \
+ unsigned int resp_cap; \
+ u16 msg_type; \
+ } hdl = { \
+ NULL, NULL, NULL, sizeof(msg_req), sizeof(msg_resp), (msg_id), \
+ }
+
+/**
+ * VIORTC_MSG() - extract message from message handle
+ * @hdl: message handle
+ *
+ * Return: struct viortc_msg
+ */
+#define VIORTC_MSG(hdl) ((hdl).msg)
+
+/**
+ * VIORTC_MSG_INIT() - initialize message handle
+ * @hdl: message handle
+ * @viortc: device data (struct viortc_dev *)
+ *
+ * Context: Process context.
+ * Return: 0 on success, -ENOMEM otherwise.
+ */
+#define VIORTC_MSG_INIT(hdl, viortc) \
+ ({ \
+ typeof(hdl) *_hdl = &(hdl); \
+ \
+ _hdl->msg = viortc_msg_init((viortc), _hdl->msg_type, \
+ _hdl->req_size, _hdl->resp_cap); \
+ if (_hdl->msg) { \
+ _hdl->req = _hdl->msg->req; \
+ _hdl->resp = _hdl->msg->resp; \
+ } \
+ _hdl->msg ? 0 : -ENOMEM; \
+ })
+
+/**
+ * VIORTC_MSG_WRITE() - write a request message field
+ * @hdl: message handle
+ * @dest_member: request message field name
+ * @src_ptr: pointer to data of compatible type
+ *
+ * Writes the field in little-endian format.
+ */
+#define VIORTC_MSG_WRITE(hdl, dest_member, src_ptr) \
+ do { \
+ typeof(hdl) _hdl = (hdl); \
+ typeof(src_ptr) _src_ptr = (src_ptr); \
+ \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu(_hdl.req->dest_member)), \
+ *_src_ptr); \
+ \
+ _hdl.req->dest_member = \
+ virtio_cpu_to_le(*_src_ptr, _hdl.req->dest_member); \
+ } while (0)
+
+/**
+ * VIORTC_MSG_READ() - read from a response message field
+ * @hdl: message handle
+ * @src_member: response message field name
+ * @dest_ptr: pointer to data of compatible type
+ *
+ * Converts from little-endian format and writes to dest_ptr.
+ */
+#define VIORTC_MSG_READ(hdl, src_member, dest_ptr) \
+ do { \
+ typeof(dest_ptr) _dest_ptr = (dest_ptr); \
+ \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu((hdl).resp->src_member)), \
+ *_dest_ptr); \
+ \
+ *_dest_ptr = virtio_le_to_cpu((hdl).resp->src_member); \
+ } while (0)
+
+/*
+ * read requests
+ */
+
+/** timeout for clock readings, where timeouts are considered non-fatal */
+#define VIORTC_MSG_READ_TIMEOUT secs_to_jiffies(60)
+
+/**
+ * viortc_read() - VIRTIO_RTC_REQ_READ wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @reading: clock reading [ns]
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_read(struct viortc_dev *viortc, u16 vio_clk_id, u64 *reading)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ,
+ struct virtio_rtc_req_read,
+ struct virtio_rtc_resp_read);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ VIORTC_MSG_READ_TIMEOUT);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, clock_reading, reading);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_read_cross() - VIRTIO_RTC_REQ_READ_CROSS wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @hw_counter: virtio_rtc HW counter type
+ * @reading: clock reading [ns]
+ * @cycles: HW counter cycles during clock reading
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_read_cross(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ u64 *reading, u64 *cycles)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ_CROSS,
+ struct virtio_rtc_req_read_cross,
+ struct virtio_rtc_resp_read_cross);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, hw_counter, &hw_counter);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ VIORTC_MSG_READ_TIMEOUT);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, clock_reading, reading);
+ VIORTC_MSG_READ(hdl, counter_cycles, cycles);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/*
+ * control requests
+ */
+
+/**
+ * viortc_cfg() - VIRTIO_RTC_REQ_CFG wrapper
+ * @viortc: device data
+ * @num_clocks: # of virtio_rtc clocks
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_cfg(struct viortc_dev *viortc, u16 *num_clocks)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CFG,
+ struct virtio_rtc_req_cfg,
+ struct virtio_rtc_resp_cfg);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, num_clocks, num_clocks);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_clock_cap() - VIRTIO_RTC_REQ_CLOCK_CAP wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @type: virtio_rtc clock type
+ * @leap_second_smearing: virtio_rtc smearing variant
+ * @flags: struct virtio_rtc_resp_clock_cap.flags
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_clock_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 *type,
+ u8 *leap_second_smearing, u8 *flags)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CLOCK_CAP,
+ struct virtio_rtc_req_clock_cap,
+ struct virtio_rtc_resp_clock_cap);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, type, type);
+ VIORTC_MSG_READ(hdl, leap_second_smearing, leap_second_smearing);
+ VIORTC_MSG_READ(hdl, flags, flags);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_cross_cap() - VIRTIO_RTC_REQ_CROSS_CAP wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @hw_counter: virtio_rtc HW counter type
+ * @supported: xtstamping is supported for the vio_clk_id/hw_counter pair
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_cross_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ bool *supported)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CROSS_CAP,
+ struct virtio_rtc_req_cross_cap,
+ struct virtio_rtc_resp_cross_cap);
+ u8 flags;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, hw_counter, &hw_counter);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, flags, &flags);
+ *supported = !!(flags & VIRTIO_RTC_FLAG_CROSS_CAP);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_read_alarm() - VIRTIO_RTC_REQ_READ_ALARM wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @alarm_time: alarm time in ns
+ * @enabled: whether alarm is enabled
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_read_alarm(struct viortc_dev *viortc, u16 vio_clk_id,
+ u64 *alarm_time, bool *enabled)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ_ALARM,
+ struct virtio_rtc_req_read_alarm,
+ struct virtio_rtc_resp_read_alarm);
+ u8 flags;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, alarm_time, alarm_time);
+ VIORTC_MSG_READ(hdl, flags, &flags);
+
+ *enabled = !!(flags & VIRTIO_RTC_FLAG_ALARM_ENABLED);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_set_alarm() - VIRTIO_RTC_REQ_SET_ALARM wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @alarm_time: alarm time in ns
+ * @alarm_enable: enable or disable alarm
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_set_alarm(struct viortc_dev *viortc, u16 vio_clk_id, u64 alarm_time,
+ bool alarm_enable)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_SET_ALARM,
+ struct virtio_rtc_req_set_alarm,
+ struct virtio_rtc_resp_set_alarm);
+ u8 flags = 0;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ if (alarm_enable)
+ flags |= VIRTIO_RTC_FLAG_ALARM_ENABLED;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, alarm_time, &alarm_time);
+ VIORTC_MSG_WRITE(hdl, flags, &flags);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_set_alarm_enabled() - VIRTIO_RTC_REQ_SET_ALARM_ENABLED wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @alarm_enable: enable or disable alarm
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_set_alarm_enabled(struct viortc_dev *viortc, u16 vio_clk_id,
+ bool alarm_enable)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_SET_ALARM_ENABLED,
+ struct virtio_rtc_req_set_alarm_enabled,
+ struct virtio_rtc_resp_set_alarm_enabled);
+ u8 flags = 0;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ if (alarm_enable)
+ flags |= VIRTIO_RTC_FLAG_ALARM_ENABLED;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, flags, &flags);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/*
+ * init, deinit
+ */
+
+/**
+ * viortc_init_rtc_class_clock() - init and register a RTC class device
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @clock_type: virtio_rtc clock type
+ * @flags: struct virtio_rtc_resp_clock_cap.flags
+ *
+ * The clock must be a UTC-like clock.
+ *
+ * Context: Process context.
+ * Return: Positive if registered, zero if not supported by configuration,
+ * negative error code otherwise.
+ */
+static int viortc_init_rtc_class_clock(struct viortc_dev *viortc,
+ u16 vio_clk_id, u8 clock_type, u8 flags)
+{
+ struct virtio_device *vdev = viortc->vdev;
+ struct viortc_class *viortc_class;
+ struct device *dev = &vdev->dev;
+ bool have_alarm;
+
+ if (clock_type != VIRTIO_RTC_CLOCK_UTC_SMEARED) {
+ dev_info(dev,
+ "not creating RTC class device for clock %d, which may step on leap seconds\n",
+ vio_clk_id);
+ return 0;
+ }
+
+ if (viortc->viortc_class) {
+ dev_warn_once(dev,
+ "multiple UTC-like clocks are present, but creating only one RTC class device\n");
+ return 0;
+ }
+
+ have_alarm = viortc_alarms_supported(vdev) &&
+ !!(flags & VIRTIO_RTC_FLAG_ALARM_CAP);
+
+ viortc_class = viortc_class_init(viortc, vio_clk_id, have_alarm, dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ viortc->viortc_class = viortc_class;
+
+ if (have_alarm)
+ devm_device_init_wakeup(dev);
+
+ return viortc_class_register(viortc_class) ?: 1;
+}
+
+/**
+ * viortc_init_ptp_clock() - init and register PTP clock
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @clock_type: virtio_rtc clock type
+ * @leap_second_smearing: virtio_rtc leap second smearing
+ *
+ * Context: Process context.
+ * Return: Positive if registered, zero if not supported by configuration,
+ * negative error code otherwise.
+ */
+static int viortc_init_ptp_clock(struct viortc_dev *viortc, u16 vio_clk_id,
+ u8 clock_type, u8 leap_second_smearing)
+{
+ struct device *dev = &viortc->vdev->dev;
+ char ptp_clock_name[PTP_CLOCK_NAME_LEN];
+ struct viortc_ptp_clock *vio_ptp;
+
+ snprintf(ptp_clock_name, PTP_CLOCK_NAME_LEN,
+ "Virtio PTP type %hhu/variant %hhu", clock_type,
+ leap_second_smearing);
+
+ vio_ptp = viortc_ptp_register(viortc, dev, vio_clk_id, ptp_clock_name);
+ if (IS_ERR(vio_ptp)) {
+ dev_err(dev, "failed to register PTP clock '%s'\n",
+ ptp_clock_name);
+ return PTR_ERR(vio_ptp);
+ }
+
+ viortc->clocks_to_unregister[vio_clk_id] = vio_ptp;
+
+ return !!vio_ptp;
+}
+
+/**
+ * viortc_init_clock() - init local representation of virtio_rtc clock
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ *
+ * Initializes PHC and/or RTC class device to represent virtio_rtc clock.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_init_clock(struct viortc_dev *viortc, u16 vio_clk_id)
+{
+ u8 clock_type, leap_second_smearing, flags;
+ bool is_exposed = false;
+ int ret;
+
+ ret = viortc_clock_cap(viortc, vio_clk_id, &clock_type,
+ &leap_second_smearing, &flags);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS) &&
+ (clock_type == VIRTIO_RTC_CLOCK_UTC ||
+ clock_type == VIRTIO_RTC_CLOCK_UTC_SMEARED ||
+ clock_type == VIRTIO_RTC_CLOCK_UTC_MAYBE_SMEARED)) {
+ ret = viortc_init_rtc_class_clock(viortc, vio_clk_id,
+ clock_type, flags);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ is_exposed = true;
+ }
+
+ if (IS_ENABLED(CONFIG_VIRTIO_RTC_PTP)) {
+ ret = viortc_init_ptp_clock(viortc, vio_clk_id, clock_type,
+ leap_second_smearing);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ is_exposed = true;
+ }
+
+ if (!is_exposed)
+ dev_warn(&viortc->vdev->dev,
+ "cannot expose clock %d (type %d, variant %d) to userspace\n",
+ vio_clk_id, clock_type, leap_second_smearing);
+
+ return 0;
+}
+
+/**
+ * viortc_clocks_deinit() - unregister PHCs, stop RTC ops
+ * @viortc: device data
+ */
+static void viortc_clocks_deinit(struct viortc_dev *viortc)
+{
+ struct viortc_ptp_clock *vio_ptp;
+ unsigned int i;
+
+ for (i = 0; i < viortc->num_clocks; i++) {
+ vio_ptp = viortc->clocks_to_unregister[i];
+
+ if (!vio_ptp)
+ continue;
+
+ viortc->clocks_to_unregister[i] = NULL;
+
+ WARN_ON(viortc_ptp_unregister(vio_ptp, &viortc->vdev->dev));
+ }
+
+ if (viortc->viortc_class)
+ viortc_class_stop(viortc->viortc_class);
+}
+
+/**
+ * viortc_clocks_init() - init local representations of virtio_rtc clocks
+ * @viortc: device data
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_clocks_init(struct viortc_dev *viortc)
+{
+ u16 num_clocks;
+ unsigned int i;
+ int ret;
+
+ ret = viortc_cfg(viortc, &num_clocks);
+ if (ret)
+ return ret;
+
+ if (num_clocks < 1) {
+ dev_err(&viortc->vdev->dev, "device reported 0 clocks\n");
+ return -ENODEV;
+ }
+
+ viortc->num_clocks = num_clocks;
+
+ viortc->clocks_to_unregister =
+ devm_kcalloc(&viortc->vdev->dev, num_clocks,
+ sizeof(*viortc->clocks_to_unregister), GFP_KERNEL);
+ if (!viortc->clocks_to_unregister)
+ return -ENOMEM;
+
+ for (i = 0; i < num_clocks; i++) {
+ ret = viortc_init_clock(viortc, i);
+ if (ret)
+ goto err_deinit_clocks;
+ }
+
+ return 0;
+
+err_deinit_clocks:
+ viortc_clocks_deinit(viortc);
+
+ return ret;
+}
+
+/**
+ * viortc_populate_vq() - populate alarmq with device-writable buffers
+ * @viortc: device data
+ * @viortc_vq: device specific data for virtqueue
+ * @buf_cap: device-writable buffer size in bytes
+ * @lock: lock queue during accesses
+ *
+ * Populates the alarmq with pre-allocated buffers.
+ *
+ * The caller is responsible for kicking the device.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_populate_vq(struct viortc_dev *viortc,
+ struct viortc_vq *viortc_vq, u32 buf_cap,
+ bool lock)
+{
+ unsigned int num_elems, i;
+ struct virtqueue *vq;
+ unsigned long flags;
+ void *buf;
+ int ret;
+
+ num_elems = viortc->num_alarmq_bufs;
+ vq = viortc_vq->vq;
+
+ for (i = 0; i < num_elems; i++) {
+ buf = viortc->alarmq_bufs[i];
+
+ if (lock) {
+ spin_lock_irqsave(&viortc_vq->lock, flags);
+
+ ret = viortc_feed_vq(viortc, vq, buf, buf_cap, buf);
+
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+ } else {
+ ret = viortc_feed_vq(viortc, vq, buf, buf_cap, buf);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * viortc_alloc_vq_bufs() - allocate alarmq buffers
+ * @viortc: device data
+ * @num_elems: # of buffers
+ * @buf_cap: per-buffer device-writable bytes
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_alloc_vq_bufs(struct viortc_dev *viortc,
+ unsigned int num_elems, u32 buf_cap)
+{
+ struct device *dev = &viortc->vdev->dev;
+ void **buf_list;
+ unsigned int i;
+ void *buf;
+
+ buf_list = devm_kcalloc(dev, num_elems, sizeof(*buf_list), GFP_KERNEL);
+ if (!buf_list)
+ return -ENOMEM;
+
+ viortc->alarmq_bufs = buf_list;
+ viortc->num_alarmq_bufs = num_elems;
+
+ for (i = 0; i < num_elems; i++) {
+ buf = devm_kzalloc(dev, buf_cap, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf_list[i] = buf;
+ }
+
+ return 0;
+}
+
+/**
+ * viortc_init_vqs() - init virtqueues
+ * @viortc: device data
+ *
+ * Inits virtqueues and associated data.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_init_vqs(struct viortc_dev *viortc)
+{
+ struct virtqueue *vqs[VIORTC_MAX_NR_QUEUES];
+ struct virtqueue_info vqs_info[] = {
+ { "requestq", viortc_cb_requestq },
+ { "alarmq", viortc_cb_alarmq },
+ };
+ struct virtio_device *vdev = viortc->vdev;
+ unsigned int num_elems;
+ int nr_queues, ret;
+ bool have_alarms;
+
+ have_alarms = viortc_alarms_supported(vdev);
+
+ if (have_alarms)
+ nr_queues = VIORTC_ALARMQ + 1;
+ else
+ nr_queues = VIORTC_REQUESTQ + 1;
+
+ ret = virtio_find_vqs(vdev, nr_queues, vqs, vqs_info, NULL);
+ if (ret)
+ return ret;
+
+ viortc->vqs[VIORTC_REQUESTQ].vq = vqs[VIORTC_REQUESTQ];
+ spin_lock_init(&viortc->vqs[VIORTC_REQUESTQ].lock);
+
+ if (have_alarms) {
+ viortc->vqs[VIORTC_ALARMQ].vq = vqs[VIORTC_ALARMQ];
+ spin_lock_init(&viortc->vqs[VIORTC_ALARMQ].lock);
+
+ num_elems = virtqueue_get_vring_size(vqs[VIORTC_ALARMQ]);
+ if (num_elems == 0)
+ return -ENOSPC;
+
+ if (!viortc->alarmq_bufs) {
+ ret = viortc_alloc_vq_bufs(viortc, num_elems,
+ VIORTC_ALARMQ_BUF_CAP);
+ if (ret)
+ return ret;
+ } else {
+ viortc->num_alarmq_bufs =
+ min(num_elems, viortc->num_alarmq_bufs);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * viortc_probe() - probe a virtio_rtc virtio device
+ * @vdev: virtio device
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_probe(struct virtio_device *vdev)
+{
+ struct viortc_vq *alarm_viortc_vq;
+ struct virtqueue *alarm_vq;
+ struct viortc_dev *viortc;
+ unsigned long flags;
+ bool notify;
+ int ret;
+
+ viortc = devm_kzalloc(&vdev->dev, sizeof(*viortc), GFP_KERNEL);
+ if (!viortc)
+ return -ENOMEM;
+
+ vdev->priv = viortc;
+ viortc->vdev = vdev;
+
+ ret = viortc_init_vqs(viortc);
+ if (ret)
+ return ret;
+
+ virtio_device_ready(vdev);
+
+ ret = viortc_clocks_init(viortc);
+ if (ret)
+ goto err_reset_vdev;
+
+ if (viortc_alarms_supported(vdev)) {
+ alarm_viortc_vq = &viortc->vqs[VIORTC_ALARMQ];
+ alarm_vq = alarm_viortc_vq->vq;
+
+ ret = viortc_populate_vq(viortc, alarm_viortc_vq,
+ VIORTC_ALARMQ_BUF_CAP, true);
+ if (ret)
+ goto err_deinit_clocks;
+
+ spin_lock_irqsave(&alarm_viortc_vq->lock, flags);
+ notify = virtqueue_kick_prepare(alarm_vq);
+ spin_unlock_irqrestore(&alarm_viortc_vq->lock, flags);
+
+ if (notify && !virtqueue_notify(alarm_vq)) {
+ ret = -EIO;
+ goto err_deinit_clocks;
+ }
+ }
+
+ return 0;
+
+err_deinit_clocks:
+ viortc_clocks_deinit(viortc);
+
+err_reset_vdev:
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+
+ return ret;
+}
+
+/**
+ * viortc_remove() - remove a virtio_rtc virtio device
+ * @vdev: virtio device
+ */
+static void viortc_remove(struct virtio_device *vdev)
+{
+ struct viortc_dev *viortc = vdev->priv;
+
+ viortc_clocks_deinit(viortc);
+
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+}
+
+static int viortc_freeze(struct virtio_device *dev)
+{
+ /*
+ * Do not reset the device, so that the device may still wake up the
+ * system through an alarmq notification.
+ */
+
+ return 0;
+}
+
+static int viortc_restore(struct virtio_device *dev)
+{
+ struct viortc_dev *viortc = dev->priv;
+ struct viortc_vq *alarm_viortc_vq;
+ struct virtqueue *alarm_vq;
+ bool notify = false;
+ int ret;
+
+ ret = viortc_init_vqs(viortc);
+ if (ret)
+ return ret;
+
+ alarm_viortc_vq = &viortc->vqs[VIORTC_ALARMQ];
+ alarm_vq = alarm_viortc_vq->vq;
+
+ if (viortc_alarms_supported(dev)) {
+ ret = viortc_populate_vq(viortc, alarm_viortc_vq,
+ VIORTC_ALARMQ_BUF_CAP, false);
+ if (ret)
+ return ret;
+
+ notify = virtqueue_kick_prepare(alarm_vq);
+ }
+
+ virtio_device_ready(dev);
+
+ if (notify && !virtqueue_notify(alarm_vq))
+ ret = -EIO;
+
+ return ret;
+}
+
+static unsigned int features[] = {
+#if IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS)
+ VIRTIO_RTC_F_ALARM,
+#endif
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_CLOCK, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_rtc_drv = {
+ .driver.name = KBUILD_MODNAME,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = viortc_probe,
+ .remove = viortc_remove,
+ .freeze = pm_sleep_ptr(viortc_freeze),
+ .restore = pm_sleep_ptr(viortc_restore),
+};
+
+module_virtio_driver(virtio_rtc_drv);
+
+MODULE_DESCRIPTION("Virtio RTC driver");
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_rtc_internal.h b/drivers/virtio/virtio_rtc_internal.h
new file mode 100644
index 000000000000..296afee6719b
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_internal.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * virtio_rtc internal interfaces
+ *
+ * Copyright (C) 2022-2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _VIRTIO_RTC_INTERNAL_H_
+#define _VIRTIO_RTC_INTERNAL_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+
+/* driver core IFs */
+
+struct viortc_dev;
+
+int viortc_read(struct viortc_dev *viortc, u16 vio_clk_id, u64 *reading);
+int viortc_read_cross(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ u64 *reading, u64 *cycles);
+int viortc_cross_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ bool *supported);
+int viortc_read_alarm(struct viortc_dev *viortc, u16 vio_clk_id,
+ u64 *alarm_time, bool *enabled);
+int viortc_set_alarm(struct viortc_dev *viortc, u16 vio_clk_id, u64 alarm_time,
+ bool alarm_enable);
+int viortc_set_alarm_enabled(struct viortc_dev *viortc, u16 vio_clk_id,
+ bool alarm_enable);
+
+struct viortc_class;
+
+struct viortc_class *viortc_class_from_dev(struct device *dev);
+
+/* PTP IFs */
+
+struct viortc_ptp_clock;
+
+#if IS_ENABLED(CONFIG_VIRTIO_RTC_PTP)
+
+struct viortc_ptp_clock *viortc_ptp_register(struct viortc_dev *viortc,
+ struct device *parent_dev,
+ u16 vio_clk_id,
+ const char *ptp_clock_name);
+int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
+ struct device *parent_dev);
+
+#else
+
+static inline struct viortc_ptp_clock *
+viortc_ptp_register(struct viortc_dev *viortc, struct device *parent_dev,
+ u16 vio_clk_id, const char *ptp_clock_name)
+{
+ return NULL;
+}
+
+static inline int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
+ struct device *parent_dev)
+{
+ return -ENODEV;
+}
+
+#endif
+
+/* HW counter IFs */
+
+/**
+ * viortc_hw_xtstamp_params() - get HW-specific xtstamp params
+ * @hw_counter: virtio_rtc HW counter type
+ * @cs_id: clocksource id corresponding to hw_counter
+ *
+ * Gets the HW-specific xtstamp params. Returns an error if the driver cannot
+ * support xtstamp.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id);
+
+/* RTC class IFs */
+
+#if IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS)
+
+void viortc_class_alarm(struct viortc_class *viortc_class, u16 vio_clk_id);
+
+void viortc_class_stop(struct viortc_class *viortc_class);
+
+int viortc_class_register(struct viortc_class *viortc_class);
+
+struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
+ u16 vio_clk_id, bool have_alarm,
+ struct device *parent_dev);
+
+#else /* CONFIG_VIRTIO_RTC_CLASS */
+
+static inline void viortc_class_alarm(struct viortc_class *viortc_class,
+ u16 vio_clk_id)
+{
+}
+
+static inline void viortc_class_stop(struct viortc_class *viortc_class)
+{
+}
+
+static inline int viortc_class_register(struct viortc_class *viortc_class)
+{
+ return -ENODEV;
+}
+
+static inline struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
+ u16 vio_clk_id,
+ bool have_alarm,
+ struct device *parent_dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#endif /* CONFIG_VIRTIO_RTC_CLASS */
+
+#endif /* _VIRTIO_RTC_INTERNAL_H_ */
diff --git a/drivers/virtio/virtio_rtc_ptp.c b/drivers/virtio/virtio_rtc_ptp.c
new file mode 100644
index 000000000000..f84599950cd4
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_ptp.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Expose virtio_rtc clocks as PTP clocks.
+ *
+ * Copyright (C) 2022-2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Derived from ptp_kvm_common.c, virtual PTP 1588 clock for use with KVM
+ * guests.
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+/**
+ * struct viortc_ptp_clock - PTP clock abstraction
+ * @ptp_clock: PTP clock handle for unregistering
+ * @viortc: virtio_rtc device data
+ * @ptp_info: PTP clock description
+ * @vio_clk_id: virtio_rtc clock id
+ * @have_cross: device supports crosststamp with available HW counter
+ */
+struct viortc_ptp_clock {
+ struct ptp_clock *ptp_clock;
+ struct viortc_dev *viortc;
+ struct ptp_clock_info ptp_info;
+ u16 vio_clk_id;
+ bool have_cross;
+};
+
+/**
+ * struct viortc_ptp_cross_ctx - context for get_device_system_crosststamp()
+ * @device_time: device clock reading
+ * @system_counterval: HW counter value at device_time
+ *
+ * Provides the already obtained crosststamp to get_device_system_crosststamp().
+ */
+struct viortc_ptp_cross_ctx {
+ ktime_t device_time;
+ struct system_counterval_t system_counterval;
+};
+
+/* Weak function in case get_device_system_crosststamp() is not supported */
+int __weak viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * viortc_ptp_get_time_fn() - callback for get_device_system_crosststamp()
+ * @device_time: device clock reading
+ * @system_counterval: HW counter value at device_time
+ * @ctx: context with already obtained crosststamp
+ *
+ * Return: zero (success).
+ */
+static int viortc_ptp_get_time_fn(ktime_t *device_time,
+ struct system_counterval_t *system_counterval,
+ void *ctx)
+{
+ struct viortc_ptp_cross_ctx *vio_ctx = ctx;
+
+ *device_time = vio_ctx->device_time;
+ *system_counterval = vio_ctx->system_counterval;
+
+ return 0;
+}
+
+/**
+ * viortc_ptp_do_xtstamp() - get crosststamp from device
+ * @vio_ptp: virtio_rtc PTP clock
+ * @hw_counter: virtio_rtc HW counter type
+ * @cs_id: clocksource id corresponding to hw_counter
+ * @ctx: context for get_device_system_crosststamp()
+ *
+ * Reads HW-specific crosststamp from device.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_ptp_do_xtstamp(struct viortc_ptp_clock *vio_ptp,
+ u8 hw_counter, enum clocksource_ids cs_id,
+ struct viortc_ptp_cross_ctx *ctx)
+{
+ u64 max_ns, ns;
+ int ret;
+
+ ctx->system_counterval.cs_id = cs_id;
+
+ ret = viortc_read_cross(vio_ptp->viortc, vio_ptp->vio_clk_id,
+ hw_counter, &ns,
+ &ctx->system_counterval.cycles);
+ if (ret)
+ return ret;
+
+ max_ns = (u64)ktime_to_ns(KTIME_MAX);
+ if (ns > max_ns)
+ return -EINVAL;
+
+ ctx->device_time = ns_to_ktime(ns);
+
+ return 0;
+}
+
+/*
+ * PTP clock operations
+ */
+
+/**
+ * viortc_ptp_getcrosststamp() - PTP clock getcrosststamp op
+ * @ptp: PTP clock info
+ * @xtstamp: crosststamp
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct viortc_ptp_clock *vio_ptp =
+ container_of(ptp, struct viortc_ptp_clock, ptp_info);
+ struct system_time_snapshot history_begin;
+ struct viortc_ptp_cross_ctx ctx;
+ enum clocksource_ids cs_id;
+ u8 hw_counter;
+ int ret;
+
+ if (!vio_ptp->have_cross)
+ return -EOPNOTSUPP;
+
+ ret = viortc_hw_xtstamp_params(&hw_counter, &cs_id);
+ if (ret)
+ return ret;
+
+ ktime_get_snapshot(&history_begin);
+ if (history_begin.cs_id != cs_id)
+ return -EOPNOTSUPP;
+
+ /*
+ * Getting the timestamp can take many milliseconds with a slow Virtio
+ * device. This is too long for viortc_ptp_get_time_fn() passed to
+ * get_device_system_crosststamp(), which has to usually return before
+ * the timekeeper seqcount increases (every tick or so).
+ *
+ * So, get the actual cross-timestamp first.
+ */
+ ret = viortc_ptp_do_xtstamp(vio_ptp, hw_counter, cs_id, &ctx);
+ if (ret)
+ return ret;
+
+ ret = get_device_system_crosststamp(viortc_ptp_get_time_fn, &ctx,
+ &history_begin, xtstamp);
+ if (ret)
+ pr_debug("%s: get_device_system_crosststamp() returned %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/* viortc_ptp_adjfine() - unsupported PTP clock adjfine op */
+static int viortc_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+/* viortc_ptp_adjtime() - unsupported PTP clock adjtime op */
+static int viortc_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+/* viortc_ptp_settime64() - unsupported PTP clock settime64 op */
+static int viortc_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+/*
+ * viortc_ptp_gettimex64() - PTP clock gettimex64 op
+ *
+ * Context: Process context.
+ */
+static int viortc_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct viortc_ptp_clock *vio_ptp =
+ container_of(ptp, struct viortc_ptp_clock, ptp_info);
+ int ret;
+ u64 ns;
+
+ ptp_read_system_prets(sts);
+ ret = viortc_read(vio_ptp->viortc, vio_ptp->vio_clk_id, &ns);
+ ptp_read_system_postts(sts);
+
+ if (ret)
+ return ret;
+
+ if (ns > (u64)S64_MAX)
+ return -EINVAL;
+
+ *ts = ns_to_timespec64((s64)ns);
+
+ return 0;
+}
+
+/* viortc_ptp_enable() - unsupported PTP clock enable op */
+static int viortc_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/*
+ * viortc_ptp_info_template - ptp_clock_info template
+ *
+ * The .name member will be set for individual virtio_rtc PTP clocks.
+ *
+ * The .getcrosststamp member will be cleared for PTP clocks not supporting
+ * crosststamp.
+ */
+static const struct ptp_clock_info viortc_ptp_info_template = {
+ .owner = THIS_MODULE,
+ /* .name is set according to clock type */
+ .adjfine = viortc_ptp_adjfine,
+ .adjtime = viortc_ptp_adjtime,
+ .gettimex64 = viortc_ptp_gettimex64,
+ .settime64 = viortc_ptp_settime64,
+ .enable = viortc_ptp_enable,
+ .getcrosststamp = viortc_ptp_getcrosststamp,
+};
+
+/**
+ * viortc_ptp_unregister() - PTP clock unregistering wrapper
+ * @vio_ptp: virtio_rtc PTP clock
+ * @parent_dev: parent device of PTP clock
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
+ struct device *parent_dev)
+{
+ int ret = ptp_clock_unregister(vio_ptp->ptp_clock);
+
+ if (!ret)
+ devm_kfree(parent_dev, vio_ptp);
+
+ return ret;
+}
+
+/**
+ * viortc_ptp_get_cross_cap() - get xtstamp support info from device
+ * @viortc: virtio_rtc device data
+ * @vio_ptp: virtio_rtc PTP clock abstraction
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_ptp_get_cross_cap(struct viortc_dev *viortc,
+ struct viortc_ptp_clock *vio_ptp)
+{
+ enum clocksource_ids cs_id;
+ bool xtstamp_supported;
+ u8 hw_counter;
+ int ret;
+
+ ret = viortc_hw_xtstamp_params(&hw_counter, &cs_id);
+ if (ret) {
+ vio_ptp->have_cross = false;
+ return 0;
+ }
+
+ ret = viortc_cross_cap(viortc, vio_ptp->vio_clk_id, hw_counter,
+ &xtstamp_supported);
+ if (ret)
+ return ret;
+
+ vio_ptp->have_cross = xtstamp_supported;
+
+ return 0;
+}
+
+/**
+ * viortc_ptp_register() - prepare and register PTP clock
+ * @viortc: virtio_rtc device data
+ * @parent_dev: parent device for PTP clock
+ * @vio_clk_id: id of virtio_rtc clock which backs PTP clock
+ * @ptp_clock_name: PTP clock name
+ *
+ * Context: Process context.
+ * Return: Pointer on success, ERR_PTR() otherwise; NULL if PTP clock support
+ * not available.
+ */
+struct viortc_ptp_clock *viortc_ptp_register(struct viortc_dev *viortc,
+ struct device *parent_dev,
+ u16 vio_clk_id,
+ const char *ptp_clock_name)
+{
+ struct viortc_ptp_clock *vio_ptp;
+ struct ptp_clock *ptp_clock;
+ ssize_t len;
+ int ret;
+
+ vio_ptp = devm_kzalloc(parent_dev, sizeof(*vio_ptp), GFP_KERNEL);
+ if (!vio_ptp)
+ return ERR_PTR(-ENOMEM);
+
+ vio_ptp->viortc = viortc;
+ vio_ptp->vio_clk_id = vio_clk_id;
+ vio_ptp->ptp_info = viortc_ptp_info_template;
+ len = strscpy(vio_ptp->ptp_info.name, ptp_clock_name,
+ sizeof(vio_ptp->ptp_info.name));
+ if (len < 0) {
+ ret = len;
+ goto err_free_dev;
+ }
+
+ ret = viortc_ptp_get_cross_cap(viortc, vio_ptp);
+ if (ret)
+ goto err_free_dev;
+
+ if (!vio_ptp->have_cross)
+ vio_ptp->ptp_info.getcrosststamp = NULL;
+
+ ptp_clock = ptp_clock_register(&vio_ptp->ptp_info, parent_dev);
+ if (IS_ERR(ptp_clock))
+ goto err_on_register;
+
+ vio_ptp->ptp_clock = ptp_clock;
+
+ return vio_ptp;
+
+err_on_register:
+ ret = PTR_ERR(ptp_clock);
+
+err_free_dev:
+ devm_kfree(parent_dev, vio_ptp);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 9670cc79371d..0a801f67b599 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uuid.h>
+#include <linux/group_cpus.h>
#include <linux/virtio.h>
#include <linux/vdpa.h>
#include <linux/virtio_config.h>
@@ -27,19 +28,6 @@ struct virtio_vdpa_device {
struct virtio_device vdev;
struct vdpa_device *vdpa;
u64 features;
-
- /* The lock to protect virtqueue list */
- spinlock_t lock;
- /* List of virtio_vdpa_vq_info */
- struct list_head virtqueues;
-};
-
-struct virtio_vdpa_vq_info {
- /* the actual virtqueue */
- struct virtqueue *vq;
-
- /* the list node for the virtqueues list */
- struct list_head node;
};
static inline struct virtio_vdpa_device *
@@ -92,14 +80,14 @@ static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- return vdpa_set_status(vdpa, status);
+ vdpa_set_status(vdpa, status);
}
static void virtio_vdpa_reset(struct virtio_device *vdev)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- vdpa_reset(vdpa);
+ vdpa_reset(vdpa, 0);
}
static bool virtio_vdpa_notify(struct virtqueue *vq)
@@ -112,6 +100,17 @@ static bool virtio_vdpa_notify(struct virtqueue *vq)
return true;
}
+static bool virtio_vdpa_notify_with_data(struct virtqueue *vq)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 data = vring_notification_data(vq);
+
+ ops->kick_vq_with_data(vdpa, data);
+
+ return true;
+}
+
static irqreturn_t virtio_vdpa_config_cb(void *private)
{
struct virtio_vdpa_device *vd_dev = private;
@@ -123,9 +122,9 @@ static irqreturn_t virtio_vdpa_config_cb(void *private)
static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
{
- struct virtio_vdpa_vq_info *info = private;
+ struct virtqueue *vq = private;
- return vring_interrupt(0, info->vq);
+ return vring_interrupt(0, vq);
}
static struct virtqueue *
@@ -133,16 +132,15 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name, bool ctx)
{
- struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
- struct virtio_vdpa_vq_info *info;
+ bool (*notify)(struct virtqueue *vq) = virtio_vdpa_notify;
struct vdpa_callback cb;
struct virtqueue *vq;
u64 desc_addr, driver_addr, device_addr;
+ union virtio_map map = {0};
/* Assume split virtqueue, switch to packed if necessary */
struct vdpa_vq_state state = {0};
- unsigned long flags;
u32 align, max_num, min_num = 1;
bool may_reduce_num = true;
int err;
@@ -153,16 +151,23 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (index >= vdpa->nvqs)
return ERR_PTR(-ENOENT);
+ /* We cannot accept VIRTIO_F_NOTIFICATION_DATA without kick_vq_with_data */
+ if (__virtio_test_bit(vdev, VIRTIO_F_NOTIFICATION_DATA)) {
+ if (ops->kick_vq_with_data)
+ notify = virtio_vdpa_notify_with_data;
+ else
+ __virtio_clear_bit(vdev, VIRTIO_F_NOTIFICATION_DATA);
+ }
+
/* Queue shouldn't already be set up. */
if (ops->get_vq_ready(vdpa, index))
return ERR_PTR(-ENOENT);
- /* Allocate and fill out our active queue description */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
+ if (ops->get_vq_size)
+ max_num = ops->get_vq_size(vdpa, index);
+ else
+ max_num = ops->get_vq_num_max(vdpa);
- max_num = ops->get_vq_num_max(vdpa);
if (max_num == 0) {
err = -ENOENT;
goto error_new_virtqueue;
@@ -171,23 +176,33 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (ops->get_vq_num_min)
min_num = ops->get_vq_num_min(vdpa);
- may_reduce_num = (max_num == min_num) ? false : true;
+ may_reduce_num = (max_num != min_num);
/* Create the vring */
align = ops->get_vq_align(vdpa);
- vq = vring_create_virtqueue(index, max_num, align, vdev,
- true, may_reduce_num, ctx,
- virtio_vdpa_notify, callback, name);
+
+ if (ops->get_vq_map)
+ map = ops->get_vq_map(vdpa, index);
+ else
+ map = vdpa_get_map(vdpa);
+
+ vq = vring_create_virtqueue_map(index, max_num, align, vdev,
+ true, may_reduce_num, ctx,
+ notify, callback, name, map);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
}
+ if (index == 0)
+ vdev->vmap = map;
+
vq->num_max = max_num;
/* Setup virtqueue callback */
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
- cb.private = info;
+ cb.private = vq;
+ cb.trigger = NULL;
ops->set_vq_cb(vdpa, index, &cb);
ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
@@ -217,13 +232,6 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
ops->set_vq_ready(vdpa, index, 1);
- vq->priv = info;
- info->vq = vq;
-
- spin_lock_irqsave(&vd_dev->lock, flags);
- list_add(&info->node, &vd_dev->virtqueues);
- spin_unlock_irqrestore(&vd_dev->lock, flags);
-
return vq;
err_vq:
@@ -232,7 +240,6 @@ error_new_virtqueue:
ops->set_vq_ready(vdpa, index, 0);
/* VDPA driver should make sure vq is stopeed here */
WARN_ON(ops->get_vq_ready(vdpa, index));
- kfree(info);
return ERR_PTR(err);
}
@@ -241,20 +248,12 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq)
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
struct vdpa_device *vdpa = vd_dev->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
- struct virtio_vdpa_vq_info *info = vq->priv;
unsigned int index = vq->index;
- unsigned long flags;
-
- spin_lock_irqsave(&vd_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vd_dev->lock, flags);
/* Select and deactivate the queue (best effort) */
ops->set_vq_ready(vdpa, index, 0);
vring_del_virtqueue(vq);
-
- kfree(info);
}
static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
@@ -265,42 +264,117 @@ static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
virtio_vdpa_del_vq(vq);
}
+static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
+{
+ affd->nr_sets = 1;
+ affd->set_size[0] = affvecs;
+}
+
+static struct cpumask *
+create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
+{
+ unsigned int affvecs = 0, curvec, usedvecs, i;
+ struct cpumask *masks = NULL;
+
+ if (nvecs > affd->pre_vectors + affd->post_vectors)
+ affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
+
+ if (!affd->calc_sets)
+ affd->calc_sets = default_calc_sets;
+
+ affd->calc_sets(affd, affvecs);
+
+ if (!affvecs)
+ return NULL;
+
+ masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
+ if (!masks)
+ return NULL;
+
+ /* Fill out vectors at the beginning that don't need affinity */
+ for (curvec = 0; curvec < affd->pre_vectors; curvec++)
+ cpumask_setall(&masks[curvec]);
+
+ for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
+ unsigned int this_vecs = affd->set_size[i];
+ unsigned int nr_masks;
+ int j;
+ struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks);
+
+ if (!result) {
+ kfree(masks);
+ return NULL;
+ }
+
+ for (j = 0; j < nr_masks; j++)
+ cpumask_copy(&masks[curvec + j], &result[j]);
+ kfree(result);
+
+ curvec += nr_masks;
+ usedvecs += nr_masks;
+ }
+
+ /* Fill out vectors at the end that don't need affinity */
+ if (usedvecs >= affvecs)
+ curvec = affd->pre_vectors + affvecs;
+ else
+ curvec = affd->pre_vectors + usedvecs;
+ for (; curvec < nvecs; curvec++)
+ cpumask_setall(&masks[curvec]);
+
+ return masks;
+}
+
static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- const bool *ctx,
+ struct virtqueue_info vqs_info[],
struct irq_affinity *desc)
{
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
+ struct cpumask *masks;
struct vdpa_callback cb;
+ bool has_affinity = desc && ops->set_vq_affinity;
int i, err, queue_idx = 0;
+ if (has_affinity) {
+ masks = create_affinity_masks(nvqs, desc);
+ if (!masks)
+ return -ENOMEM;
+ }
+
for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
+ struct virtqueue_info *vqi = &vqs_info[i];
+
+ if (!vqi->name) {
vqs[i] = NULL;
continue;
}
- vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
- callbacks[i], names[i], ctx ?
- ctx[i] : false);
+ vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto err_setup_vq;
}
+
+ if (has_affinity)
+ ops->set_vq_affinity(vdpa, i, &masks[i]);
}
cb.callback = virtio_vdpa_config_cb;
cb.private = vd_dev;
ops->set_config_cb(vdpa, &cb);
+ if (has_affinity)
+ kfree(masks);
return 0;
err_setup_vq:
virtio_vdpa_del_vqs(vdev);
+ if (has_affinity)
+ kfree(masks);
return err;
}
@@ -330,6 +404,32 @@ static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
return dev_name(&vdpa->dev);
}
+static int virtio_vdpa_set_vq_affinity(struct virtqueue *vq,
+ const struct cpumask *cpu_mask)
+{
+ struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
+ struct vdpa_device *vdpa = vd_dev->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ unsigned int index = vq->index;
+
+ if (ops->set_vq_affinity)
+ return ops->set_vq_affinity(vdpa, index, cpu_mask);
+
+ return 0;
+}
+
+static const struct cpumask *
+virtio_vdpa_get_vq_affinity(struct virtio_device *vdev, int index)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->get_vq_affinity)
+ return ops->get_vq_affinity(vdpa, index);
+
+ return NULL;
+}
+
static const struct virtio_config_ops virtio_vdpa_config_ops = {
.get = virtio_vdpa_get,
.set = virtio_vdpa_set,
@@ -342,6 +442,8 @@ static const struct virtio_config_ops virtio_vdpa_config_ops = {
.get_features = virtio_vdpa_get_features,
.finalize_features = virtio_vdpa_finalize_features,
.bus_name = virtio_vdpa_bus_name,
+ .set_vq_affinity = virtio_vdpa_set_vq_affinity,
+ .get_vq_affinity = virtio_vdpa_get_vq_affinity,
};
static void virtio_vdpa_release_dev(struct device *_d)
@@ -364,12 +466,12 @@ static int virtio_vdpa_probe(struct vdpa_device *vdpa)
if (!vd_dev)
return -ENOMEM;
- vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa);
+ vd_dev->vdev.dev.parent = vdpa->map ? &vdpa->dev :
+ vdpa_get_map(vdpa).dma_dev;
vd_dev->vdev.dev.release = virtio_vdpa_release_dev;
vd_dev->vdev.config = &virtio_vdpa_config_ops;
+ vd_dev->vdev.map = vdpa->map;
vd_dev->vdpa = vdpa;
- INIT_LIST_HEAD(&vd_dev->virtqueues);
- spin_lock_init(&vd_dev->lock);
vd_dev->vdev.id.device = ops->get_device_id(vdpa);
if (vd_dev->vdev.id.device == 0)