summaryrefslogtreecommitdiff
path: root/drivers/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/Kconfig86
-rw-r--r--drivers/virtio/Makefile6
-rw-r--r--drivers/virtio/virtio.c240
-rw-r--r--drivers/virtio/virtio_balloon.c180
-rw-r--r--drivers/virtio/virtio_debug.c114
-rw-r--r--drivers/virtio/virtio_dma_buf.c3
-rw-r--r--drivers/virtio/virtio_input.c10
-rw-r--r--drivers/virtio/virtio_mem.c206
-rw-r--r--drivers/virtio/virtio_mmio.c16
-rw-r--r--drivers/virtio/virtio_pci_common.c283
-rw-r--r--drivers/virtio/virtio_pci_common.h39
-rw-r--r--drivers/virtio/virtio_pci_modern.c596
-rw-r--r--drivers/virtio/virtio_ring.c607
-rw-r--r--drivers/virtio/virtio_rtc_arm.c23
-rw-r--r--drivers/virtio/virtio_rtc_class.c262
-rw-r--r--drivers/virtio/virtio_rtc_driver.c1407
-rw-r--r--drivers/virtio/virtio_rtc_internal.h122
-rw-r--r--drivers/virtio/virtio_rtc_ptp.c347
-rw-r--r--drivers/virtio/virtio_vdpa.c16
19 files changed, 3943 insertions, 620 deletions
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index c17193544268..6db5235a7693 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -122,7 +122,7 @@ config VIRTIO_BALLOON
config VIRTIO_MEM
tristate "Virtio mem driver"
- depends on X86_64 || ARM64
+ depends on X86_64 || ARM64 || RISCV || S390
depends on VIRTIO
depends on MEMORY_HOTPLUG
depends on MEMORY_HOTREMOVE
@@ -132,11 +132,11 @@ config VIRTIO_MEM
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
- This driver currently only supports x86-64 and arm64. Although it
- should compile on other architectures that implement memory
- hot(un)plug, architecture-specific and/or common
- code changes may be required for virtio-mem, kdump and kexec to work as
- expected.
+ This driver currently supports x86-64, arm64, riscv and s390.
+ Although it should compile on other architectures that implement
+ memory hot(un)plug, architecture-specific and/or common
+ code changes may be required for virtio-mem, kdump and kexec to
+ work as expected.
If unsure, say M.
@@ -178,4 +178,78 @@ config VIRTIO_DMA_SHARED_BUFFER
This option adds a flavor of dma buffers that are backed by
virtio resources.
+config VIRTIO_DEBUG
+ bool "Debug facilities"
+ depends on VIRTIO
+ help
+ Enable this to expose debug facilities over debugfs.
+ This allows to debug features, to see what features the device
+ advertises and to set filter for features used by driver.
+
+ If unsure, say N.
+
+config VIRTIO_RTC
+ tristate "Virtio RTC driver"
+ depends on VIRTIO
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver provides current time from a Virtio RTC device. The driver
+ provides the time through one or more clocks. The Virtio RTC PTP
+ clocks and/or the Real Time Clock driver for Virtio RTC must be
+ enabled to expose the clocks to userspace.
+
+ To compile this code as a module, choose M here: the module will be
+ called virtio_rtc.
+
+ If unsure, say M.
+
+if VIRTIO_RTC
+
+comment "WARNING: Consider enabling VIRTIO_RTC_PTP and/or VIRTIO_RTC_CLASS."
+ depends on !VIRTIO_RTC_PTP && !VIRTIO_RTC_CLASS
+
+comment "Enable PTP_1588_CLOCK in order to enable VIRTIO_RTC_PTP."
+ depends on PTP_1588_CLOCK=n
+
+config VIRTIO_RTC_PTP
+ bool "Virtio RTC PTP clocks"
+ default y
+ depends on PTP_1588_CLOCK
+ help
+ This exposes any Virtio RTC clocks as PTP Hardware Clocks (PHCs) to
+ userspace. The PHC sysfs attribute "clock_name" describes the clock
+ type.
+
+ If unsure, say Y.
+
+config VIRTIO_RTC_ARM
+ bool "Virtio RTC cross-timestamping using Arm Generic Timer"
+ default y
+ depends on VIRTIO_RTC_PTP && ARM_ARCH_TIMER
+ help
+ This enables Virtio RTC cross-timestamping using the Arm Generic Timer.
+ It only has an effect if the Virtio RTC device also supports this. The
+ cross-timestamp is available through the PTP clock driver precise
+ cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE2 aka
+ PTP_SYS_OFFSET_PRECISE).
+
+ If unsure, say Y.
+
+comment "Enable RTC_CLASS in order to enable VIRTIO_RTC_CLASS."
+ depends on RTC_CLASS=n
+
+config VIRTIO_RTC_CLASS
+ bool "Real Time Clock driver for Virtio RTC"
+ default y
+ depends on RTC_CLASS
+ help
+ This exposes the Virtio RTC UTC-like clock as a Linux Real Time Clock.
+ It only has an effect if the Virtio RTC device has a UTC-like clock
+ which smears leap seconds to avoid steps. The Real Time Clock is
+ read-only, and may support setting an alarm.
+
+ If unsure, say Y.
+
+endif # VIRTIO_RTC
+
endif # VIRTIO_MENU
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 73ace62af440..eefcfe90d6b8 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -13,3 +13,9 @@ obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o
obj-$(CONFIG_VIRTIO_DMA_SHARED_BUFFER) += virtio_dma_buf.o
+obj-$(CONFIG_VIRTIO_DEBUG) += virtio_debug.o
+obj-$(CONFIG_VIRTIO_RTC) += virtio_rtc.o
+virtio_rtc-y := virtio_rtc_driver.o
+virtio_rtc-$(CONFIG_VIRTIO_RTC_PTP) += virtio_rtc_ptp.o
+virtio_rtc-$(CONFIG_VIRTIO_RTC_ARM) += virtio_rtc_arm.o
+virtio_rtc-$(CONFIG_VIRTIO_RTC_CLASS) += virtio_rtc_class.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 9510c551dce8..95d5d7993e5b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -82,7 +82,7 @@ static inline int virtio_id_match(const struct virtio_device *dev,
/* This looks through all the IDs a driver claims to support. If any of them
* match, we return 1 and the kernel will call virtio_dev_probe(). */
-static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
+static int virtio_dev_match(struct device *_dv, const struct device_driver *_dr)
{
unsigned int i;
struct virtio_device *dev = dev_to_virtio(_dv);
@@ -127,10 +127,12 @@ static void __virtio_config_changed(struct virtio_device *dev)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- if (!dev->config_enabled)
+ if (!dev->config_core_enabled || dev->config_driver_disabled)
dev->config_change_pending = true;
- else if (drv && drv->config_changed)
+ else if (drv && drv->config_changed) {
drv->config_changed(dev);
+ dev->config_change_pending = false;
+ }
}
void virtio_config_changed(struct virtio_device *dev)
@@ -143,20 +145,51 @@ void virtio_config_changed(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_config_changed);
-static void virtio_config_disable(struct virtio_device *dev)
+/**
+ * virtio_config_driver_disable - disable config change reporting by drivers
+ * @dev: the device to reset
+ *
+ * This is only allowed to be called by a driver and disabling can't
+ * be nested.
+ */
+void virtio_config_driver_disable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
- dev->config_enabled = false;
+ dev->config_driver_disabled = true;
spin_unlock_irq(&dev->config_lock);
}
+EXPORT_SYMBOL_GPL(virtio_config_driver_disable);
-static void virtio_config_enable(struct virtio_device *dev)
+/**
+ * virtio_config_driver_enable - enable config change reporting by drivers
+ * @dev: the device to reset
+ *
+ * This is only allowed to be called by a driver and enabling can't
+ * be nested.
+ */
+void virtio_config_driver_enable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
- dev->config_enabled = true;
+ dev->config_driver_disabled = false;
+ if (dev->config_change_pending)
+ __virtio_config_changed(dev);
+ spin_unlock_irq(&dev->config_lock);
+}
+EXPORT_SYMBOL_GPL(virtio_config_driver_enable);
+
+static void virtio_config_core_disable(struct virtio_device *dev)
+{
+ spin_lock_irq(&dev->config_lock);
+ dev->config_core_enabled = false;
+ spin_unlock_irq(&dev->config_lock);
+}
+
+static void virtio_config_core_enable(struct virtio_device *dev)
+{
+ spin_lock_irq(&dev->config_lock);
+ dev->config_core_enabled = true;
if (dev->config_change_pending)
__virtio_config_changed(dev);
- dev->config_change_pending = false;
spin_unlock_irq(&dev->config_lock);
}
@@ -274,6 +307,9 @@ static int virtio_dev_probe(struct device *_d)
else
dev->features = driver_features_legacy & device_features;
+ /* When debugging, user may filter some features by hand. */
+ virtio_debug_device_filter_features(dev);
+
/* Transport features always preserved to pass to finalize_features. */
for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++)
if (device_features & (1ULL << i))
@@ -302,15 +338,9 @@ static int virtio_dev_probe(struct device *_d)
if (err)
goto err;
- if (dev->config->create_avq) {
- err = dev->config->create_avq(dev);
- if (err)
- goto err;
- }
-
err = drv->probe(dev);
if (err)
- goto err_probe;
+ goto err;
/* If probe didn't do it, mark device DRIVER_OK ourselves. */
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
@@ -319,13 +349,10 @@ static int virtio_dev_probe(struct device *_d)
if (drv->scan)
drv->scan(dev);
- virtio_config_enable(dev);
+ virtio_config_core_enable(dev);
return 0;
-err_probe:
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
@@ -337,13 +364,10 @@ static void virtio_dev_remove(struct device *_d)
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- virtio_config_disable(dev);
+ virtio_config_core_disable(dev);
drv->remove(dev);
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
-
/* Driver should have reset device. */
WARN_ON_ONCE(dev->config->get_status(dev));
@@ -353,6 +377,58 @@ static void virtio_dev_remove(struct device *_d)
of_node_put(dev->dev.of_node);
}
+/*
+ * virtio_irq_get_affinity - get IRQ affinity mask for device
+ * @_d: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @_d and @irq_vec.
+ */
+static const struct cpumask *virtio_irq_get_affinity(struct device *_d,
+ unsigned int irq_vec)
+{
+ struct virtio_device *dev = dev_to_virtio(_d);
+
+ if (!dev->config->get_vq_affinity)
+ return NULL;
+
+ return dev->config->get_vq_affinity(dev, irq_vec);
+}
+
+static void virtio_dev_shutdown(struct device *_d)
+{
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+
+ /*
+ * Stop accesses to or from the device.
+ * We only need to do it if there's a driver - no accesses otherwise.
+ */
+ if (!drv)
+ return;
+
+ /* If the driver has its own shutdown method, use that. */
+ if (drv->shutdown) {
+ drv->shutdown(dev);
+ return;
+ }
+
+ /*
+ * Some devices get wedged if you kick them after they are
+ * reset. Mark all vqs as broken to make sure we don't.
+ */
+ virtio_break_device(dev);
+ /*
+ * Guarantee that any callback will see vq->broken as true.
+ */
+ virtio_synchronize_cbs(dev);
+ /*
+ * As IOMMUs are reset on shutdown, this will block device access to memory.
+ * Some devices get wedged if this happens, so reset to make sure it does not.
+ */
+ dev->config->reset(dev);
+}
+
static const struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
@@ -360,6 +436,8 @@ static const struct bus_type virtio_bus = {
.uevent = virtio_uevent,
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
+ .irq_get_affinity = virtio_irq_get_affinity,
+ .shutdown = virtio_dev_shutdown,
};
int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
@@ -452,7 +530,7 @@ int register_virtio_device(struct virtio_device *dev)
goto out_ida_remove;
spin_lock_init(&dev->config_lock);
- dev->config_enabled = false;
+ dev->config_core_enabled = false;
dev->config_change_pending = false;
INIT_LIST_HEAD(&dev->vqs);
@@ -465,6 +543,8 @@ int register_virtio_device(struct virtio_device *dev)
/* Acknowledge that we've seen the device. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ virtio_debug_device_init(dev);
+
/*
* device_add() causes the bus infrastructure to look for a matching
* driver.
@@ -496,36 +576,12 @@ void unregister_virtio_device(struct virtio_device *dev)
int index = dev->index; /* save for after device release */
device_unregister(&dev->dev);
+ virtio_debug_device_exit(dev);
ida_free(&virtio_index_ida, index);
}
EXPORT_SYMBOL_GPL(unregister_virtio_device);
-#ifdef CONFIG_PM_SLEEP
-int virtio_device_freeze(struct virtio_device *dev)
-{
- struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- int ret;
-
- virtio_config_disable(dev);
-
- dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
-
- if (drv && drv->freeze) {
- ret = drv->freeze(dev);
- if (ret) {
- virtio_config_enable(dev);
- return ret;
- }
- }
-
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(virtio_device_freeze);
-
-int virtio_device_restore(struct virtio_device *dev)
+static int virtio_device_restore_priv(struct virtio_device *dev, bool restore)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
int ret;
@@ -556,49 +612,109 @@ int virtio_device_restore(struct virtio_device *dev)
if (ret)
goto err;
- if (dev->config->create_avq) {
- ret = dev->config->create_avq(dev);
+ if (restore) {
+ if (drv->restore) {
+ ret = drv->restore(dev);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = drv->reset_done(dev);
if (ret)
goto err;
}
- if (drv->restore) {
- ret = drv->restore(dev);
- if (ret)
- goto err_restore;
- }
-
/* If restore didn't do it, mark device DRIVER_OK ourselves. */
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
virtio_device_ready(dev);
- virtio_config_enable(dev);
+ virtio_config_core_enable(dev);
return 0;
-err_restore:
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return ret;
}
+
+#ifdef CONFIG_PM_SLEEP
+int virtio_device_freeze(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
+
+ virtio_config_core_disable(dev);
+
+ dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+
+ if (drv && drv->freeze) {
+ ret = drv->freeze(dev);
+ if (ret) {
+ virtio_config_core_enable(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_device_freeze);
+
+int virtio_device_restore(struct virtio_device *dev)
+{
+ return virtio_device_restore_priv(dev, true);
+}
EXPORT_SYMBOL_GPL(virtio_device_restore);
#endif
+int virtio_device_reset_prepare(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
+
+ if (!drv || !drv->reset_prepare)
+ return -EOPNOTSUPP;
+
+ virtio_config_core_disable(dev);
+
+ dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+
+ ret = drv->reset_prepare(dev);
+ if (ret) {
+ virtio_config_core_enable(dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_device_reset_prepare);
+
+int virtio_device_reset_done(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+
+ if (!drv || !drv->reset_done)
+ return -EOPNOTSUPP;
+
+ return virtio_device_restore_priv(dev, false);
+}
+EXPORT_SYMBOL_GPL(virtio_device_reset_done);
+
static int virtio_init(void)
{
if (bus_register(&virtio_bus) != 0)
panic("virtio bus registration failed");
+ virtio_debug_init();
return 0;
}
static void __exit virtio_exit(void)
{
+ virtio_debug_exit();
bus_unregister(&virtio_bus);
ida_destroy(&virtio_index_ida);
}
core_initcall(virtio_init);
module_exit(virtio_exit);
+MODULE_DESCRIPTION("Virtio core interface");
MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1f5b3dd31fcf..89da052f4f68 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -121,11 +121,14 @@ struct virtio_balloon {
struct page_reporting_dev_info pr_dev_info;
/* State for keeping the wakeup_source active while adjusting the balloon */
- spinlock_t adjustment_lock;
- bool adjustment_signal_pending;
- bool adjustment_in_progress;
+ spinlock_t wakeup_lock;
+ bool processing_wakeup_event;
+ u32 wakeup_signal_mask;
};
+#define VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST (1 << 0)
+#define VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS (1 << 1)
+
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -140,6 +143,36 @@ static u32 page_to_balloon_pfn(struct page *page)
return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
}
+static void start_wakeup_event(struct virtio_balloon *vb, u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vb->wakeup_lock, flags);
+ vb->wakeup_signal_mask |= mask;
+ if (!vb->processing_wakeup_event) {
+ vb->processing_wakeup_event = true;
+ pm_stay_awake(&vb->vdev->dev);
+ }
+ spin_unlock_irqrestore(&vb->wakeup_lock, flags);
+}
+
+static void process_wakeup_event(struct virtio_balloon *vb, u32 mask)
+{
+ spin_lock_irq(&vb->wakeup_lock);
+ vb->wakeup_signal_mask &= ~mask;
+ spin_unlock_irq(&vb->wakeup_lock);
+}
+
+static void finish_wakeup_event(struct virtio_balloon *vb)
+{
+ spin_lock_irq(&vb->wakeup_lock);
+ if (!vb->wakeup_signal_mask && vb->processing_wakeup_event) {
+ vb->processing_wakeup_event = false;
+ pm_relax(&vb->vdev->dev);
+ }
+ spin_unlock_irq(&vb->wakeup_lock);
+}
+
static void balloon_ack(struct virtqueue *vq)
{
struct virtio_balloon *vb = vq->vdev->priv;
@@ -218,7 +251,7 @@ static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num)
for (num_pfns = 0; num_pfns < num;
num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- struct page *page = balloon_page_alloc();
+ page = balloon_page_alloc();
if (!page) {
dev_info_ratelimited(&vb->vdev->dev,
@@ -316,34 +349,67 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
-static unsigned int update_balloon_stats(struct virtio_balloon *vb)
+#ifdef CONFIG_VM_EVENT_COUNTERS
+/* Return the number of entries filled by vm events */
+static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
{
unsigned long events[NR_VM_EVENT_ITEMS];
- struct sysinfo i;
unsigned int idx = 0;
- long available;
- unsigned long caches;
+ unsigned int zid;
+ unsigned long stall = 0;
all_vm_events(events);
- si_meminfo(&i);
-
- available = si_mem_available();
- caches = global_node_page_state(NR_FILE_PAGES);
-
-#ifdef CONFIG_VM_EVENT_COUNTERS
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
- pages_to_bytes(events[PSWPIN]));
+ pages_to_bytes(events[PSWPIN]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
- pages_to_bytes(events[PSWPOUT]));
+ pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_OOM_KILL, events[OOM_KILL]);
+
+ /* sum all the stall events */
+ for (zid = 0; zid < MAX_NR_ZONES; zid++)
+ stall += events[ALLOCSTALL_NORMAL - ZONE_NORMAL + zid];
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ALLOC_STALL, stall);
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_SCAN,
+ pages_to_bytes(events[PGSCAN_KSWAPD]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_SCAN,
+ pages_to_bytes(events[PGSCAN_DIRECT]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_RECLAIM,
+ pages_to_bytes(events[PGSTEAL_KSWAPD]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_RECLAIM,
+ pages_to_bytes(events[PGSTEAL_DIRECT]));
+
#ifdef CONFIG_HUGETLB_PAGE
update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC,
events[HTLB_BUDDY_PGALLOC]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGFAIL,
events[HTLB_BUDDY_PGALLOC_FAIL]);
-#endif
-#endif
+#endif /* CONFIG_HUGETLB_PAGE */
+
+ return idx;
+}
+#else /* CONFIG_VM_EVENT_COUNTERS */
+static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
+{
+ return 0;
+}
+#endif /* CONFIG_VM_EVENT_COUNTERS */
+
+static unsigned int update_balloon_stats(struct virtio_balloon *vb)
+{
+ struct sysinfo i;
+ unsigned int idx;
+ long available;
+ unsigned long caches;
+
+ idx = update_balloon_vm_stats(vb);
+
+ si_meminfo(&i);
+ available = si_mem_available();
+ caches = global_node_page_state(NR_FILE_PAGES);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
pages_to_bytes(i.freeram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
@@ -370,8 +436,10 @@ static void stats_request(struct virtqueue *vq)
struct virtio_balloon *vb = vq->vdev->priv;
spin_lock(&vb->stop_update_lock);
- if (!vb->stop_update)
+ if (!vb->stop_update) {
+ start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS);
queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
+ }
spin_unlock(&vb->stop_update_lock);
}
@@ -444,29 +512,10 @@ static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
static void start_update_balloon_size(struct virtio_balloon *vb)
{
- unsigned long flags;
-
- spin_lock_irqsave(&vb->adjustment_lock, flags);
- vb->adjustment_signal_pending = true;
- if (!vb->adjustment_in_progress) {
- vb->adjustment_in_progress = true;
- pm_stay_awake(vb->vdev->dev.parent);
- }
- spin_unlock_irqrestore(&vb->adjustment_lock, flags);
-
+ start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST);
queue_work(system_freezable_wq, &vb->update_balloon_size_work);
}
-static void end_update_balloon_size(struct virtio_balloon *vb)
-{
- spin_lock_irq(&vb->adjustment_lock);
- if (!vb->adjustment_signal_pending && vb->adjustment_in_progress) {
- vb->adjustment_in_progress = false;
- pm_relax(vb->vdev->dev.parent);
- }
- spin_unlock_irq(&vb->adjustment_lock);
-}
-
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@@ -495,7 +544,10 @@ static void update_balloon_stats_func(struct work_struct *work)
vb = container_of(work, struct virtio_balloon,
update_balloon_stats_work);
+
+ process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS);
stats_handle_request(vb);
+ finish_wakeup_event(vb);
}
static void update_balloon_size_func(struct work_struct *work)
@@ -506,9 +558,7 @@ static void update_balloon_size_func(struct work_struct *work)
vb = container_of(work, struct virtio_balloon,
update_balloon_size_work);
- spin_lock_irq(&vb->adjustment_lock);
- vb->adjustment_signal_pending = false;
- spin_unlock_irq(&vb->adjustment_lock);
+ process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST);
diff = towards_target(vb);
@@ -523,14 +573,13 @@ static void update_balloon_size_func(struct work_struct *work)
if (diff)
queue_work(system_freezable_wq, work);
else
- end_update_balloon_size(vb);
+ finish_wakeup_event(vb);
}
static int init_vqs(struct virtio_balloon *vb)
{
+ struct virtqueue_info vqs_info[VIRTIO_BALLOON_VQ_MAX] = {};
struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX];
- vq_callback_t *callbacks[VIRTIO_BALLOON_VQ_MAX];
- const char *names[VIRTIO_BALLOON_VQ_MAX];
int err;
/*
@@ -538,33 +587,26 @@ static int init_vqs(struct virtio_balloon *vb)
* will be NULL if the related feature is not enabled, which will
* cause no allocation for the corresponding virtqueue in find_vqs.
*/
- callbacks[VIRTIO_BALLOON_VQ_INFLATE] = balloon_ack;
- names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate";
- callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack;
- names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate";
- callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL;
- names[VIRTIO_BALLOON_VQ_STATS] = NULL;
- callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
- names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
- names[VIRTIO_BALLOON_VQ_REPORTING] = NULL;
+ vqs_info[VIRTIO_BALLOON_VQ_INFLATE].callback = balloon_ack;
+ vqs_info[VIRTIO_BALLOON_VQ_INFLATE].name = "inflate";
+ vqs_info[VIRTIO_BALLOON_VQ_DEFLATE].callback = balloon_ack;
+ vqs_info[VIRTIO_BALLOON_VQ_DEFLATE].name = "deflate";
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
- names[VIRTIO_BALLOON_VQ_STATS] = "stats";
- callbacks[VIRTIO_BALLOON_VQ_STATS] = stats_request;
+ vqs_info[VIRTIO_BALLOON_VQ_STATS].name = "stats";
+ vqs_info[VIRTIO_BALLOON_VQ_STATS].callback = stats_request;
}
- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
- names[VIRTIO_BALLOON_VQ_FREE_PAGE] = "free_page_vq";
- callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
- }
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ vqs_info[VIRTIO_BALLOON_VQ_FREE_PAGE].name = "free_page_vq";
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
- names[VIRTIO_BALLOON_VQ_REPORTING] = "reporting_vq";
- callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack;
+ vqs_info[VIRTIO_BALLOON_VQ_REPORTING].name = "reporting_vq";
+ vqs_info[VIRTIO_BALLOON_VQ_REPORTING].callback = balloon_ack;
}
err = virtio_find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs,
- callbacks, names, NULL);
+ vqs_info, NULL);
if (err)
return err;
@@ -1027,7 +1069,16 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_unregister_oom;
}
- spin_lock_init(&vb->adjustment_lock);
+ spin_lock_init(&vb->wakeup_lock);
+
+ /*
+ * The virtio balloon itself can't wake up the device, but it is
+ * responsible for processing wakeup events passed up from the transport
+ * layer. Wakeup sources don't support nesting/chaining calls, so we use
+ * our own wakeup source to ensure wakeup events are properly handled
+ * without trampling on the transport layer's wakeup source.
+ */
+ device_set_wakeup_capable(&vb->vdev->dev, true);
virtio_device_ready(vdev);
@@ -1155,7 +1206,6 @@ static struct virtio_driver virtio_balloon_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.validate = virtballoon_validate,
.probe = virtballoon_probe,
diff --git a/drivers/virtio/virtio_debug.c b/drivers/virtio/virtio_debug.c
new file mode 100644
index 000000000000..95c8fc7705bb
--- /dev/null
+++ b/drivers/virtio/virtio_debug.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/debugfs.h>
+
+static struct dentry *virtio_debugfs_dir;
+
+static int virtio_debug_device_features_show(struct seq_file *s, void *data)
+{
+ struct virtio_device *dev = s->private;
+ u64 device_features;
+ unsigned int i;
+
+ device_features = dev->config->get_features(dev);
+ for (i = 0; i < BITS_PER_LONG_LONG; i++) {
+ if (device_features & (1ULL << i))
+ seq_printf(s, "%u\n", i);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(virtio_debug_device_features);
+
+static int virtio_debug_filter_features_show(struct seq_file *s, void *data)
+{
+ struct virtio_device *dev = s->private;
+ unsigned int i;
+
+ for (i = 0; i < BITS_PER_LONG_LONG; i++) {
+ if (dev->debugfs_filter_features & (1ULL << i))
+ seq_printf(s, "%u\n", i);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(virtio_debug_filter_features);
+
+static int virtio_debug_filter_features_clear(void *data, u64 val)
+{
+ struct virtio_device *dev = data;
+
+ if (val == 1)
+ dev->debugfs_filter_features = 0;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(virtio_debug_filter_features_clear_fops, NULL,
+ virtio_debug_filter_features_clear, "%llu\n");
+
+static int virtio_debug_filter_feature_add(void *data, u64 val)
+{
+ struct virtio_device *dev = data;
+
+ if (val >= BITS_PER_LONG_LONG)
+ return -EINVAL;
+ dev->debugfs_filter_features |= BIT_ULL_MASK(val);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(virtio_debug_filter_feature_add_fops, NULL,
+ virtio_debug_filter_feature_add, "%llu\n");
+
+static int virtio_debug_filter_feature_del(void *data, u64 val)
+{
+ struct virtio_device *dev = data;
+
+ if (val >= BITS_PER_LONG_LONG)
+ return -EINVAL;
+ dev->debugfs_filter_features &= ~BIT_ULL_MASK(val);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(virtio_debug_filter_feature_del_fops, NULL,
+ virtio_debug_filter_feature_del, "%llu\n");
+
+void virtio_debug_device_init(struct virtio_device *dev)
+{
+ dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->dev),
+ virtio_debugfs_dir);
+ debugfs_create_file("device_features", 0400, dev->debugfs_dir, dev,
+ &virtio_debug_device_features_fops);
+ debugfs_create_file("filter_features", 0400, dev->debugfs_dir, dev,
+ &virtio_debug_filter_features_fops);
+ debugfs_create_file("filter_features_clear", 0200, dev->debugfs_dir, dev,
+ &virtio_debug_filter_features_clear_fops);
+ debugfs_create_file("filter_feature_add", 0200, dev->debugfs_dir, dev,
+ &virtio_debug_filter_feature_add_fops);
+ debugfs_create_file("filter_feature_del", 0200, dev->debugfs_dir, dev,
+ &virtio_debug_filter_feature_del_fops);
+}
+EXPORT_SYMBOL_GPL(virtio_debug_device_init);
+
+void virtio_debug_device_filter_features(struct virtio_device *dev)
+{
+ dev->features &= ~dev->debugfs_filter_features;
+}
+EXPORT_SYMBOL_GPL(virtio_debug_device_filter_features);
+
+void virtio_debug_device_exit(struct virtio_device *dev)
+{
+ debugfs_remove_recursive(dev->debugfs_dir);
+}
+EXPORT_SYMBOL_GPL(virtio_debug_device_exit);
+
+void virtio_debug_init(void)
+{
+ virtio_debugfs_dir = debugfs_create_dir("virtio", NULL);
+}
+EXPORT_SYMBOL_GPL(virtio_debug_init);
+
+void virtio_debug_exit(void)
+{
+ debugfs_remove_recursive(virtio_debugfs_dir);
+}
+EXPORT_SYMBOL_GPL(virtio_debug_exit);
diff --git a/drivers/virtio/virtio_dma_buf.c b/drivers/virtio/virtio_dma_buf.c
index 2521a75009c3..3fe1d03b0645 100644
--- a/drivers/virtio/virtio_dma_buf.c
+++ b/drivers/virtio/virtio_dma_buf.c
@@ -85,5 +85,6 @@ int virtio_dma_buf_get_uuid(struct dma_buf *dma_buf,
}
EXPORT_SYMBOL(virtio_dma_buf_get_uuid);
+MODULE_DESCRIPTION("dma-bufs for virtio exported objects");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 3aa46703872d..a5d63269f20b 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -185,13 +185,14 @@ static void virtinput_cfg_abs(struct virtio_input *vi, int abs)
static int virtinput_init_vqs(struct virtio_input *vi)
{
+ struct virtqueue_info vqs_info[] = {
+ { "events", virtinput_recv_events },
+ { "status", virtinput_recv_status },
+ };
struct virtqueue *vqs[2];
- vq_callback_t *cbs[] = { virtinput_recv_events,
- virtinput_recv_status };
- static const char * const names[] = { "events", "status" };
int err;
- err = virtio_find_vqs(vi->vdev, 2, vqs, cbs, names, NULL);
+ err = virtio_find_vqs(vi->vdev, 2, vqs, vqs_info, NULL);
if (err)
return err;
vi->evt = vqs[0];
@@ -394,7 +395,6 @@ static const struct virtio_device_id id_table[] = {
static struct virtio_driver virtio_input_driver = {
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.id_table = id_table,
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 8e3223294442..56d0dbe62163 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -21,6 +21,8 @@
#include <linux/bitmap.h>
#include <linux/lockdep.h>
#include <linux/log2.h>
+#include <linux/vmalloc.h>
+#include <linux/suspend.h>
#include <acpi/acpi_numa.h>
@@ -131,6 +133,8 @@ struct virtio_mem {
uint64_t addr;
/* Maximum region size in bytes. */
uint64_t region_size;
+ /* Usable region size in bytes. */
+ uint64_t usable_region_size;
/* The parent resource for all memory added via this device. */
struct resource *parent_resource;
@@ -252,6 +256,9 @@ struct virtio_mem {
/* Memory notifier (online/offline events). */
struct notifier_block memory_notifier;
+ /* Notifier to block hibernation image storing/reloading. */
+ struct notifier_block pm_notifier;
+
#ifdef CONFIG_PROC_VMCORE
/* vmcore callback for /proc/vmcore handling in kdump mode */
struct vmcore_cb vmcore_cb;
@@ -1111,6 +1118,25 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
return rc;
}
+static int virtio_mem_pm_notifier_cb(struct notifier_block *nb,
+ unsigned long action, void *arg)
+{
+ struct virtio_mem *vm = container_of(nb, struct virtio_mem,
+ pm_notifier);
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ /*
+ * When restarting the VM, all memory is unplugged. Don't
+ * allow to hibernate and restore from an image.
+ */
+ dev_err(&vm->vdev->dev, "hibernation is not supported.\n");
+ return NOTIFY_BAD;
+ default:
+ return NOTIFY_OK;
+ }
+}
+
/*
* Set a range of pages PG_offline. Remember pages that were never onlined
* (via generic_online_page()) using PageDirty().
@@ -1122,12 +1148,16 @@ static void virtio_mem_set_fake_offline(unsigned long pfn,
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
- __SetPageOffline(page);
- if (!onlined) {
+ if (!onlined)
+ /*
+ * Pages that have not been onlined yet were initialized
+ * to PageOffline(). Remember that we have to route them
+ * through generic_online_page().
+ */
SetPageDirty(page);
- /* FIXME: remove after cleanups */
- ClearPageReserved(page);
- }
+ else
+ __SetPageOffline(page);
+ VM_WARN_ON_ONCE(!PageOffline(page));
}
page_offline_end();
}
@@ -1142,9 +1172,11 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
- __ClearPageOffline(page);
if (!onlined)
+ /* generic_online_page() will clear PageOffline(). */
ClearPageDirty(page);
+ else
+ __ClearPageOffline(page);
}
}
@@ -1239,12 +1271,6 @@ static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
struct page *page;
unsigned long i;
- /*
- * Drop our reference to the pages so the memory can get offlined
- * and add the unplugged pages to the managed page counters (so
- * offlining code can correctly subtract them again).
- */
- adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
/* Drop our reference to the pages so the memory can get offlined. */
for (i = 0; i < nr_pages; i++) {
page = pfn_to_page(pfn + i);
@@ -1263,10 +1289,9 @@ static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
unsigned long i;
/*
- * Get the reference we dropped when going offline and subtract the
- * unplugged pages from the managed page counters.
+ * Get the reference again that we dropped via page_ref_dec_and_test()
+ * when going offline.
*/
- adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
for (i = 0; i < nr_pages; i++)
page_ref_inc(pfn_to_page(pfn + i));
}
@@ -2345,7 +2370,7 @@ static int virtio_mem_cleanup_pending_mb(struct virtio_mem *vm)
static void virtio_mem_refresh_config(struct virtio_mem *vm)
{
const struct range pluggable_range = mhp_get_pluggable_range(true);
- uint64_t new_plugged_size, usable_region_size, end_addr;
+ uint64_t new_plugged_size, end_addr;
/* the plugged_size is just a reflection of what _we_ did previously */
virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
@@ -2355,8 +2380,8 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
/* calculate the last usable memory block id */
virtio_cread_le(vm->vdev, struct virtio_mem_config,
- usable_region_size, &usable_region_size);
- end_addr = min(vm->addr + usable_region_size - 1,
+ usable_region_size, &vm->usable_region_size);
+ end_addr = min(vm->addr + vm->usable_region_size - 1,
pluggable_range.end);
if (vm->in_sbm) {
@@ -2615,11 +2640,20 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
rc = register_memory_notifier(&vm->memory_notifier);
if (rc)
goto out_unreg_group;
- rc = register_virtio_mem_device(vm);
+ /* Block hibernation as early as possible. */
+ vm->pm_notifier.priority = INT_MAX;
+ vm->pm_notifier.notifier_call = virtio_mem_pm_notifier_cb;
+ rc = register_pm_notifier(&vm->pm_notifier);
if (rc)
goto out_unreg_mem;
+ rc = register_virtio_mem_device(vm);
+ if (rc)
+ goto out_unreg_pm;
+ virtio_device_ready(vm->vdev);
return 0;
+out_unreg_pm:
+ unregister_pm_notifier(&vm->pm_notifier);
out_unreg_mem:
unregister_memory_notifier(&vm->memory_notifier);
out_unreg_group:
@@ -2694,13 +2728,103 @@ static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
mutex_unlock(&vm->hotplug_mutex);
return is_ram;
}
+
+#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
+static int virtio_mem_vmcore_add_device_ram(struct virtio_mem *vm,
+ struct list_head *list, uint64_t start, uint64_t end)
+{
+ int rc;
+
+ rc = vmcore_alloc_add_range(list, start, end - start);
+ if (rc)
+ dev_err(&vm->vdev->dev,
+ "Error adding device RAM range: %d\n", rc);
+ return rc;
+}
+
+static int virtio_mem_vmcore_get_device_ram(struct vmcore_cb *cb,
+ struct list_head *list)
+{
+ struct virtio_mem *vm = container_of(cb, struct virtio_mem,
+ vmcore_cb);
+ const uint64_t device_start = vm->addr;
+ const uint64_t device_end = vm->addr + vm->usable_region_size;
+ uint64_t chunk_size, cur_start, cur_end, plugged_range_start = 0;
+ LIST_HEAD(tmp_list);
+ int rc;
+
+ if (!vm->plugged_size)
+ return 0;
+
+ /* Process memory sections, unless the device block size is bigger. */
+ chunk_size = max_t(uint64_t, PFN_PHYS(PAGES_PER_SECTION),
+ vm->device_block_size);
+
+ mutex_lock(&vm->hotplug_mutex);
+
+ /*
+ * We process larger chunks and indicate the complete chunk if any
+ * block in there is plugged. This reduces the number of pfn_is_ram()
+ * callbacks and mimic what is effectively being done when the old
+ * kernel would add complete memory sections/blocks to the elfcore hdr.
+ */
+ cur_start = device_start;
+ for (cur_start = device_start; cur_start < device_end; cur_start = cur_end) {
+ cur_end = ALIGN_DOWN(cur_start + chunk_size, chunk_size);
+ cur_end = min_t(uint64_t, cur_end, device_end);
+
+ rc = virtio_mem_send_state_request(vm, cur_start,
+ cur_end - cur_start);
+
+ if (rc < 0) {
+ dev_err(&vm->vdev->dev,
+ "Error querying block states: %d\n", rc);
+ goto out;
+ } else if (rc != VIRTIO_MEM_STATE_UNPLUGGED) {
+ /* Merge ranges with plugged memory. */
+ if (!plugged_range_start)
+ plugged_range_start = cur_start;
+ continue;
+ }
+
+ /* Flush any plugged range. */
+ if (plugged_range_start) {
+ rc = virtio_mem_vmcore_add_device_ram(vm, &tmp_list,
+ plugged_range_start,
+ cur_start);
+ if (rc)
+ goto out;
+ plugged_range_start = 0;
+ }
+ }
+
+ /* Flush any plugged range. */
+ if (plugged_range_start)
+ rc = virtio_mem_vmcore_add_device_ram(vm, &tmp_list,
+ plugged_range_start,
+ cur_start);
+out:
+ mutex_unlock(&vm->hotplug_mutex);
+ if (rc < 0) {
+ vmcore_free_ranges(&tmp_list);
+ return rc;
+ }
+ list_splice_tail(&tmp_list, list);
+ return 0;
+}
+#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
#endif /* CONFIG_PROC_VMCORE */
static int virtio_mem_init_kdump(struct virtio_mem *vm)
{
+ /* We must be prepared to receive a callback immediately. */
+ virtio_device_ready(vm->vdev);
#ifdef CONFIG_PROC_VMCORE
dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
+#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
+ vm->vmcore_cb.get_device_ram = virtio_mem_vmcore_get_device_ram;
+#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
register_vmcore_cb(&vm->vmcore_cb);
return 0;
#else /* CONFIG_PROC_VMCORE */
@@ -2729,6 +2853,8 @@ static int virtio_mem_init(struct virtio_mem *vm)
virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
&vm->region_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, usable_region_size,
+ &vm->usable_region_size);
/* Determine the nid for the device based on the lowest address. */
if (vm->nid == NUMA_NO_NODE)
@@ -2824,8 +2950,8 @@ static int virtio_mem_probe(struct virtio_device *vdev)
mutex_init(&vm->hotplug_mutex);
INIT_LIST_HEAD(&vm->next);
spin_lock_init(&vm->removal_lock);
- hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vm->retry_timer.function = virtio_mem_timer_expired;
+ hrtimer_setup(&vm->retry_timer, virtio_mem_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
vm->in_kdump = is_kdump_kernel();
@@ -2839,8 +2965,6 @@ static int virtio_mem_probe(struct virtio_device *vdev)
if (rc)
goto out_del_vq;
- virtio_device_ready(vdev);
-
/* trigger a config update to start processing the requested_size */
if (!vm->in_kdump) {
atomic_set(&vm->config_changed, 1);
@@ -2897,6 +3021,7 @@ static void virtio_mem_deinit_hotplug(struct virtio_mem *vm)
/* unregister callbacks */
unregister_virtio_mem_device(vm);
+ unregister_pm_notifier(&vm->pm_notifier);
unregister_memory_notifier(&vm->memory_notifier);
/*
@@ -2960,17 +3085,40 @@ static void virtio_mem_config_changed(struct virtio_device *vdev)
#ifdef CONFIG_PM_SLEEP
static int virtio_mem_freeze(struct virtio_device *vdev)
{
+ struct virtio_mem *vm = vdev->priv;
+
/*
- * When restarting the VM, all memory is usually unplugged. Don't
- * allow to suspend/hibernate.
+ * We block hibernation using the PM notifier completely. The workqueue
+ * is already frozen by the PM core at this point, so we simply
+ * reset the device and cleanup the queues.
*/
- dev_err(&vdev->dev, "save/restore not supported.\n");
- return -EPERM;
+ if (pm_suspend_target_state != PM_SUSPEND_TO_IDLE &&
+ vm->plugged_size &&
+ !virtio_has_feature(vm->vdev, VIRTIO_MEM_F_PERSISTENT_SUSPEND)) {
+ dev_err(&vm->vdev->dev,
+ "suspending with plugged memory is not supported\n");
+ return -EPERM;
+ }
+
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+ vm->vq = NULL;
+ return 0;
}
static int virtio_mem_restore(struct virtio_device *vdev)
{
- return -EPERM;
+ struct virtio_mem *vm = vdev->priv;
+ int ret;
+
+ ret = virtio_mem_init_vq(vm);
+ if (ret)
+ return ret;
+ virtio_device_ready(vdev);
+
+ /* Let's check if anything changed. */
+ virtio_mem_config_changed(vdev);
+ return 0;
}
#endif
@@ -2979,6 +3127,7 @@ static unsigned int virtio_mem_features[] = {
VIRTIO_MEM_F_ACPI_PXM,
#endif
VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE,
+ VIRTIO_MEM_F_PERSISTENT_SUSPEND,
};
static const struct virtio_device_id virtio_mem_id_table[] = {
@@ -2990,7 +3139,6 @@ static struct virtio_driver virtio_mem_driver = {
.feature_table = virtio_mem_features,
.feature_table_size = ARRAY_SIZE(virtio_mem_features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = virtio_mem_id_table,
.probe = virtio_mem_probe,
.remove = virtio_mem_remove,
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 59892a31cf76..5d78c2d572ab 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -489,9 +489,7 @@ error_available:
static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- const bool *ctx,
+ struct virtqueue_info vqs_info[],
struct irq_affinity *desc)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
@@ -510,13 +508,15 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
enable_irq_wake(irq);
for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
+ struct virtqueue_info *vqi = &vqs_info[i];
+
+ if (!vqi->name) {
vqs[i] = NULL;
continue;
}
- vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false);
+ vqs[i] = vm_setup_vq(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx);
if (IS_ERR(vqs[i])) {
vm_del_vqs(vdev);
return PTR_ERR(vqs[i]);
@@ -696,12 +696,10 @@ free_vm_dev:
return rc;
}
-static int virtio_mmio_remove(struct platform_device *pdev)
+static void virtio_mmio_remove(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
unregister_virtio_device(&vm_dev->vdev);
-
- return 0;
}
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index b655fccaf773..d6d79af44569 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -24,6 +24,16 @@ MODULE_PARM_DESC(force_legacy,
"Force legacy mode for transitional virtio 1 devices");
#endif
+bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return false;
+
+ return index == vp_dev->admin_vq.vq_index;
+}
+
/* wait for pending irq handlers */
void vp_synchronize_vectors(struct virtio_device *vdev)
{
@@ -46,12 +56,26 @@ bool vp_notify(struct virtqueue *vq)
return true;
}
+/* Notify all slow path virtqueues on an interrupt. */
+static void vp_vring_slow_path_interrupt(int irq,
+ struct virtio_pci_device *vp_dev)
+{
+ struct virtio_pci_vq_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_for_each_entry(info, &vp_dev->slow_virtqueues, node)
+ vring_interrupt(irq, info->vq);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+}
+
/* Handle a configuration change: Tell driver if it wants to know. */
static irqreturn_t vp_config_changed(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
virtio_config_changed(&vp_dev->vdev);
+ vp_vring_slow_path_interrupt(irq, vp_dev);
return IRQ_HANDLED;
}
@@ -125,6 +149,9 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL))
goto error;
+ if (!per_vq_vectors)
+ desc = NULL;
+
if (desc) {
flags |= PCI_IRQ_AFFINITY;
desc->pre_vectors++; /* virtio config vector */
@@ -171,11 +198,17 @@ error:
return err;
}
+static bool vp_is_slow_path_vector(u16 msix_vec)
+{
+ return msix_vec == VP_MSIX_CONFIG_VECTOR;
+}
+
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
- u16 msix_vec)
+ u16 msix_vec,
+ struct virtio_pci_vq_info **p_info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
@@ -194,13 +227,16 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
info->vq = vq;
if (callback) {
spin_lock_irqsave(&vp_dev->lock, flags);
- list_add(&info->node, &vp_dev->virtqueues);
+ if (!vp_is_slow_path_vector(msix_vec))
+ list_add(&info->node, &vp_dev->virtqueues);
+ else
+ list_add(&info->node, &vp_dev->slow_virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags);
} else {
INIT_LIST_HEAD(&info->node);
}
- vp_dev->vqs[index] = info;
+ *p_info = info;
return vq;
out_info:
@@ -208,10 +244,9 @@ out_info:
return vq;
}
-static void vp_del_vq(struct virtqueue *vq)
+static void vp_del_vq(struct virtqueue *vq, struct virtio_pci_vq_info *info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
/*
@@ -232,24 +267,25 @@ static void vp_del_vq(struct virtqueue *vq)
void vp_del_vqs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_vq_info *info;
struct virtqueue *vq, *n;
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- if (vp_dev->is_avq(vdev, vq->index))
- continue;
+ info = vp_is_avq(vdev, vq->index) ? vp_dev->admin_vq.info :
+ vp_dev->vqs[vq->index];
if (vp_dev->per_vq_vectors) {
- int v = vp_dev->vqs[vq->index]->msix_vector;
-
- if (v != VIRTIO_MSI_NO_VECTOR) {
+ int v = info->msix_vector;
+ if (v != VIRTIO_MSI_NO_VECTOR &&
+ !vp_is_slow_path_vector(v)) {
int irq = pci_irq_vector(vp_dev->pci_dev, v);
irq_update_affinity_hint(irq, NULL);
free_irq(irq, vq);
}
}
- vp_del_vq(vq);
+ vp_del_vq(vq, info);
}
vp_dev->per_vq_vectors = false;
@@ -284,73 +320,133 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_dev->vqs = NULL;
}
-static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], bool per_vq_vectors,
- const bool *ctx,
- struct irq_affinity *desc)
+enum vp_vq_vector_policy {
+ VP_VQ_VECTOR_POLICY_EACH,
+ VP_VQ_VECTOR_POLICY_SHARED_SLOW,
+ VP_VQ_VECTOR_POLICY_SHARED,
+};
+
+static struct virtqueue *
+vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
+ vq_callback_t *callback, const char *name, bool ctx,
+ bool slow_path, int *allocated_vectors,
+ enum vp_vq_vector_policy vector_policy,
+ struct virtio_pci_vq_info **p_info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtqueue *vq;
u16 msix_vec;
+ int err;
+
+ if (!callback)
+ msix_vec = VIRTIO_MSI_NO_VECTOR;
+ else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH ||
+ (vector_policy == VP_VQ_VECTOR_POLICY_SHARED_SLOW &&
+ !slow_path))
+ msix_vec = (*allocated_vectors)++;
+ else if (vector_policy != VP_VQ_VECTOR_POLICY_EACH &&
+ slow_path)
+ msix_vec = VP_MSIX_CONFIG_VECTOR;
+ else
+ msix_vec = VP_MSIX_VQ_VECTOR;
+ vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec,
+ p_info);
+ if (IS_ERR(vq))
+ return vq;
+
+ if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED ||
+ msix_vec == VIRTIO_MSI_NO_VECTOR ||
+ vp_is_slow_path_vector(msix_vec))
+ return vq;
+
+ /* allocate per-vq irq if available and necessary */
+ snprintf(vp_dev->msix_names[msix_vec], sizeof(*vp_dev->msix_names),
+ "%s-%s", dev_name(&vp_dev->vdev.dev), name);
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
+ vring_interrupt, 0,
+ vp_dev->msix_names[msix_vec], vq);
+ if (err) {
+ vp_del_vq(vq, *p_info);
+ return ERR_PTR(err);
+ }
+
+ return vq;
+}
+
+static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ struct virtqueue_info vqs_info[],
+ enum vp_vq_vector_policy vector_policy,
+ struct irq_affinity *desc)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
+ struct virtqueue_info *vqi;
int i, err, nvectors, allocated_vectors, queue_idx = 0;
+ struct virtqueue *vq;
+ bool per_vq_vectors;
+ u16 avq_num = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
+ if (vp_dev->avq_index) {
+ err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
+ if (err)
+ goto error_find;
+ }
+
+ per_vq_vectors = vector_policy != VP_VQ_VECTOR_POLICY_SHARED;
+
if (per_vq_vectors) {
/* Best option: one for change interrupt, one per vq. */
nvectors = 1;
- for (i = 0; i < nvqs; ++i)
- if (names[i] && callbacks[i])
+ for (i = 0; i < nvqs; ++i) {
+ vqi = &vqs_info[i];
+ if (vqi->name && vqi->callback)
++nvectors;
+ }
+ if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
+ ++nvectors;
} else {
/* Second best: one for change, shared for all vqs. */
nvectors = 2;
}
- err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
- per_vq_vectors ? desc : NULL);
+ err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, desc);
if (err)
goto error_find;
vp_dev->per_vq_vectors = per_vq_vectors;
allocated_vectors = vp_dev->msix_used_vectors;
for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
+ vqi = &vqs_info[i];
+ if (!vqi->name) {
vqs[i] = NULL;
continue;
}
-
- if (!callbacks[i])
- msix_vec = VIRTIO_MSI_NO_VECTOR;
- else if (vp_dev->per_vq_vectors)
- msix_vec = allocated_vectors++;
- else
- msix_vec = VP_MSIX_VQ_VECTOR;
- vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false,
- msix_vec);
+ vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx, false,
+ &allocated_vectors, vector_policy,
+ &vp_dev->vqs[i]);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
}
+ }
- if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
- continue;
-
- /* allocate per-vq irq if available and necessary */
- snprintf(vp_dev->msix_names[msix_vec],
- sizeof *vp_dev->msix_names,
- "%s-%s",
- dev_name(&vp_dev->vdev.dev), names[i]);
- err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
- vring_interrupt, 0,
- vp_dev->msix_names[msix_vec],
- vqs[i]);
- if (err)
- goto error_find;
+ if (!avq_num)
+ return 0;
+ sprintf(avq->name, "avq.%u", avq->vq_index);
+ vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
+ avq->name, false, true, &allocated_vectors,
+ vector_policy, &vp_dev->admin_vq.info);
+ if (IS_ERR(vq)) {
+ err = PTR_ERR(vq);
+ goto error_find;
}
+
return 0;
error_find:
@@ -359,16 +455,25 @@ error_find:
}
static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx)
+ struct virtqueue *vqs[],
+ struct virtqueue_info vqs_info[])
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
int i, err, queue_idx = 0;
+ struct virtqueue *vq;
+ u16 avq_num = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
+ if (vp_dev->avq_index) {
+ err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
+ if (err)
+ goto out_del_vqs;
+ }
+
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vp_dev);
if (err)
@@ -377,19 +482,32 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
vp_dev->intx_enabled = 1;
vp_dev->per_vq_vectors = false;
for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
+ struct virtqueue_info *vqi = &vqs_info[i];
+
+ if (!vqi->name) {
vqs[i] = NULL;
continue;
}
- vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false,
- VIRTIO_MSI_NO_VECTOR);
+ vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx,
+ VIRTIO_MSI_NO_VECTOR, &vp_dev->vqs[i]);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto out_del_vqs;
}
}
+ if (!avq_num)
+ return 0;
+ sprintf(avq->name, "avq.%u", avq->vq_index);
+ vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
+ false, VIRTIO_MSI_NO_VECTOR,
+ &vp_dev->admin_vq.info);
+ if (IS_ERR(vq)) {
+ err = PTR_ERR(vq);
+ goto out_del_vqs;
+ }
+
return 0;
out_del_vqs:
vp_del_vqs(vdev);
@@ -398,25 +516,33 @@ out_del_vqs:
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
+ struct virtqueue *vqs[], struct virtqueue_info vqs_info[],
struct irq_affinity *desc)
{
int err;
/* Try MSI-X with one vector per queue. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_EACH, desc);
+ if (!err)
+ return 0;
+ /* Fallback: MSI-X with one shared vector for config and
+ * slow path queues, one vector per queue for the rest.
+ */
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_SHARED_SLOW, desc);
if (!err)
return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_SHARED, desc);
if (!err)
return 0;
/* Is there an interrupt? If not give up. */
if (!(to_vp_device(vdev)->pci_dev->irq))
return err;
/* Finally fall back to regular interrupts. */
- return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
+ return vp_find_vqs_intx(vdev, nvqs, vqs, vqs_info);
}
const char *vp_bus_name(struct virtio_device *vdev)
@@ -460,7 +586,8 @@ const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
if (!vp_dev->per_vq_vectors ||
- vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
+ vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR ||
+ vp_is_slow_path_vector(vp_dev->vqs[index]->msix_vector))
return NULL;
return pci_irq_get_affinity(vp_dev->pci_dev,
@@ -568,6 +695,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->pci_dev = pci_dev;
INIT_LIST_HEAD(&vp_dev->virtqueues);
+ INIT_LIST_HEAD(&vp_dev->slow_virtqueues);
spin_lock_init(&vp_dev->lock);
/* enable the device */
@@ -666,6 +794,46 @@ static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
return num_vfs;
}
+static void virtio_pci_reset_prepare(struct pci_dev *pci_dev)
+{
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret = 0;
+
+ ret = virtio_device_reset_prepare(&vp_dev->vdev);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ dev_warn(&pci_dev->dev, "Reset prepare failure: %d",
+ ret);
+ return;
+ }
+
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static void virtio_pci_reset_done(struct pci_dev *pci_dev)
+{
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret;
+
+ if (pci_is_enabled(pci_dev))
+ return;
+
+ ret = pci_enable_device(pci_dev);
+ if (!ret) {
+ pci_set_master(pci_dev);
+ ret = virtio_device_reset_done(&vp_dev->vdev);
+ }
+
+ if (ret && ret != -EOPNOTSUPP)
+ dev_warn(&pci_dev->dev, "Reset done failure: %d", ret);
+}
+
+static const struct pci_error_handlers virtio_pci_err_handler = {
+ .reset_prepare = virtio_pci_reset_prepare,
+ .reset_done = virtio_pci_reset_done,
+};
+
static struct pci_driver virtio_pci_driver = {
.name = "virtio-pci",
.id_table = virtio_pci_id_table,
@@ -675,6 +843,7 @@ static struct pci_driver virtio_pci_driver = {
.driver.pm = &virtio_pci_pm_ops,
#endif
.sriov_configure = virtio_pci_sriov_configure,
+ .err_handler = &virtio_pci_err_handler,
};
struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev)
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 7fef52bee455..8cd01de27baf 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -35,7 +35,7 @@ struct virtio_pci_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
- /* the list node for the virtqueues list */
+ /* the list node for the virtqueues or slow_virtqueues list */
struct list_head node;
/* MSI-X vector (or none) */
@@ -44,10 +44,13 @@ struct virtio_pci_vq_info {
struct virtio_pci_admin_vq {
/* Virtqueue info associated with this admin queue. */
- struct virtio_pci_vq_info info;
- /* serializing admin commands execution and virtqueue deletion */
- struct mutex cmd_lock;
+ struct virtio_pci_vq_info *info;
+ /* Protects virtqueue access. */
+ spinlock_t lock;
u64 supported_cmds;
+ u64 supported_caps;
+ u8 max_dev_parts_objects;
+ struct ida dev_parts_ida;
/* Name of the admin queue: avq.$vq_index. */
char name[10];
u16 vq_index;
@@ -66,9 +69,12 @@ struct virtio_pci_device {
/* Where to read and clear interrupt */
u8 __iomem *isr;
- /* a list of queues so we can dispatch IRQs */
+ /* Lists of queues and potentially slow path queues
+ * so we can dispatch IRQs.
+ */
spinlock_t lock;
struct list_head virtqueues;
+ struct list_head slow_virtqueues;
/* Array of all virtqueues reported in the
* PCI common config num_queues field
@@ -102,7 +108,7 @@ struct virtio_pci_device {
void (*del_vq)(struct virtio_pci_vq_info *info);
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
- bool (*is_avq)(struct virtio_device *vdev, unsigned int index);
+ int (*avq_index)(struct virtio_device *vdev, u16 *index, u16 *num);
};
/* Constants for MSI-X */
@@ -127,8 +133,7 @@ bool vp_notify(struct virtqueue *vq);
void vp_del_vqs(struct virtio_device *vdev);
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
+ struct virtqueue *vqs[], struct virtqueue_info vqs_info[],
struct irq_affinity *desc);
const char *vp_bus_name(struct virtio_device *vdev);
@@ -165,17 +170,31 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ) | \
BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO))
+#define VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP \
+ (BIT_ULL(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_GET) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_SET) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_DEV_MODE_SET))
+
/* Unlike modern drivers which support hardware virtio devices, legacy drivers
* assume software-based devices: e.g. they don't use proper memory barriers
* on ARM, use big endian on PPC, etc. X86 drivers are mostly ok though, more
* or less by chance. For now, only support legacy IO on X86.
*/
#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
-#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_LEGACY_ADMIN_CMD_BITMAP
+#define VIRTIO_ADMIN_CMD_BITMAP (VIRTIO_LEGACY_ADMIN_CMD_BITMAP | \
+ VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP)
#else
-#define VIRTIO_ADMIN_CMD_BITMAP 0
+#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP
#endif
+bool vp_is_avq(struct virtio_device *vdev, unsigned int index);
+void vp_modern_avq_done(struct virtqueue *vq);
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
struct virtio_admin_cmd *cmd);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index f62b530aa3b5..7182f43ed055 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -15,6 +15,7 @@
*/
#include <linux/delay.h>
+#include <linux/virtio_pci_admin.h>
#define VIRTIO_PCI_NO_LEGACY
#define VIRTIO_RING_NO_LEGACY
#include "virtio_pci_common.h"
@@ -28,14 +29,49 @@ static u64 vp_get_features(struct virtio_device *vdev)
return vp_modern_get_features(&vp_dev->mdev);
}
-static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
+static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ *num = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
- return false;
+ return 0;
+
+ *num = vp_modern_avq_num(&vp_dev->mdev);
+ if (!(*num))
+ return -EINVAL;
+ *index = vp_modern_avq_index(&vp_dev->mdev);
+ return 0;
+}
- return index == vp_dev->admin_vq.vq_index;
+void vp_modern_avq_done(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+ unsigned int status_size = sizeof(struct virtio_admin_cmd_status);
+ struct virtio_admin_cmd *cmd;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&admin_vq->lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((cmd = virtqueue_get_buf(vq, &len))) {
+ /* If the number of bytes written by the device is less
+ * than the size of struct virtio_admin_cmd_status, the
+ * remaining status bytes will remain zero-initialized,
+ * since the buffer was zeroed during allocation.
+ * In this case, set the size of command_specific_result
+ * to 0.
+ */
+ if (len < status_size)
+ cmd->result_sg_size = 0;
+ else
+ cmd->result_sg_size = len - status_size;
+ complete(&cmd->completion);
+ }
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
}
static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
@@ -43,12 +79,13 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
struct scatterlist **sgs,
unsigned int out_num,
unsigned int in_num,
- void *data)
+ struct virtio_admin_cmd *cmd)
{
struct virtqueue *vq;
- int ret, len;
+ unsigned long flags;
+ int ret;
- vq = admin_vq->info.vq;
+ vq = admin_vq->info->vq;
if (!vq)
return -EIO;
@@ -57,21 +94,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
!((1ULL << opcode) & admin_vq->supported_cmds))
return -EOPNOTSUPP;
- ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, data, GFP_KERNEL);
- if (ret < 0)
- return -EIO;
+ init_completion(&cmd->completion);
- if (unlikely(!virtqueue_kick(vq)))
+again:
+ if (virtqueue_is_broken(vq))
return -EIO;
- while (!virtqueue_get_buf(vq, &len) &&
- !virtqueue_is_broken(vq))
- cpu_relax();
+ spin_lock_irqsave(&admin_vq->lock, flags);
+ ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+ cpu_relax();
+ goto again;
+ }
+ goto unlock_err;
+ }
+ if (!virtqueue_kick(vq))
+ goto unlock_err;
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
- if (virtqueue_is_broken(vq))
- return -EIO;
+ wait_for_completion(&cmd->completion);
- return 0;
+ return cmd->ret;
+
+unlock_err:
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+ return -EIO;
}
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
@@ -122,12 +171,9 @@ int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
in_num++;
}
- mutex_lock(&vp_dev->admin_vq.cmd_lock);
ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
le16_to_cpu(cmd->opcode),
- sgs, out_num, in_num, sgs);
- mutex_unlock(&vp_dev->admin_vq.cmd_lock);
-
+ sgs, out_num, in_num, cmd);
if (ret) {
dev_err(&vdev->dev,
"Failed to execute command on admin vq: %d\n.", ret);
@@ -186,27 +232,136 @@ end:
kfree(data);
}
-static void vp_modern_avq_activate(struct virtio_device *vdev)
+static void
+virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev)
{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+ struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
+ struct virtio_admin_cmd_cap_get_data *get_data;
+ struct virtio_admin_cmd_cap_set_data *set_data;
+ struct virtio_dev_parts_cap *result;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ struct scatterlist data_sg;
+ u8 resource_objects_limit;
+ u16 set_data_size;
+ int ret;
+
+ get_data = kzalloc(sizeof(*get_data), GFP_KERNEL);
+ if (!get_data)
+ return;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ goto end;
+
+ get_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP);
+ sg_init_one(&data_sg, get_data, sizeof(*get_data));
+ sg_init_one(&result_sg, result, sizeof(*result));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = &result_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto err_get;
+
+ set_data_size = sizeof(*set_data) + sizeof(*result);
+ set_data = kzalloc(set_data_size, GFP_KERNEL);
+ if (!set_data)
+ goto err_get;
+
+ set_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP);
+
+ /* Set the limit to the minimum value between the GET and SET values
+ * supported by the device. Since the obj_id for VIRTIO_DEV_PARTS_CAP
+ * is a globally unique value per PF, there is no possibility of
+ * overlap between GET and SET operations.
+ */
+ resource_objects_limit = min(result->get_parts_resource_objects_limit,
+ result->set_parts_resource_objects_limit);
+ result->get_parts_resource_objects_limit = resource_objects_limit;
+ result->set_parts_resource_objects_limit = resource_objects_limit;
+ memcpy(set_data->cap_specific_data, result, sizeof(*result));
+ sg_init_one(&data_sg, set_data, set_data_size);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = NULL;
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET);
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto err_set;
+
+ /* Allocate IDR to manage the dev caps objects */
+ ida_init(&vp_dev->admin_vq.dev_parts_ida);
+ vp_dev->admin_vq.max_dev_parts_objects = resource_objects_limit;
+
+err_set:
+ kfree(set_data);
+err_get:
+ kfree(result);
+end:
+ kfree(get_data);
+}
+
+static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
+ struct virtio_admin_cmd_query_cap_id_result *data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+ sg_init_one(&result_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
+ cmd.result_sg = &result_sg;
+
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto end;
+
+ /* Max number of caps fits into a single u64 */
+ BUILD_BUG_ON(sizeof(data->supported_caps) > sizeof(u64));
+
+ vp_dev->admin_vq.supported_caps = le64_to_cpu(data->supported_caps[0]);
+
+ if (!(vp_dev->admin_vq.supported_caps & (1 << VIRTIO_DEV_PARTS_CAP)))
+ goto end;
+
+ virtio_pci_admin_cmd_dev_parts_objects_enable(virtio_dev);
+end:
+ kfree(data);
+}
+
+static void vp_modern_avq_activate(struct virtio_device *vdev)
+{
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return;
- __virtqueue_unbreak(admin_vq->info.vq);
virtio_pci_admin_cmd_list_init(vdev);
+ virtio_pci_admin_cmd_cap_init(vdev);
}
-static void vp_modern_avq_deactivate(struct virtio_device *vdev)
+static void vp_modern_avq_cleanup(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+ struct virtio_admin_cmd *cmd;
+ struct virtqueue *vq;
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return;
- __virtqueue_break(admin_vq->info.vq);
+ vq = vp_dev->admin_vq.info->vq;
+ if (!vq)
+ return;
+
+ while ((cmd = virtqueue_detach_unused_buf(vq))) {
+ cmd->ret = -EIO;
+ complete(&cmd->completion);
+ }
}
static void vp_transport_features(struct virtio_device *vdev, u64 features)
@@ -403,7 +558,7 @@ static void vp_reset(struct virtio_device *vdev)
while (vp_modern_get_status(mdev))
msleep(1);
- vp_modern_avq_deactivate(vdev);
+ vp_modern_avq_cleanup(vdev);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
@@ -552,8 +707,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
return ERR_PTR(-EINVAL);
- num = is_avq ?
- VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev, index);
+ num = vp_modern_get_queue_size(mdev, index);
/* Check if queue is either not available or already active. */
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
@@ -580,12 +734,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
goto err;
}
- if (is_avq) {
- mutex_lock(&vp_dev->admin_vq.cmd_lock);
- vp_dev->admin_vq.info.vq = vq;
- mutex_unlock(&vp_dev->admin_vq.cmd_lock);
- }
-
return vq;
err:
@@ -595,13 +743,12 @@ err:
static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
+ struct virtqueue_info vqs_info[],
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq;
- int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
+ int rc = vp_find_vqs(vdev, nvqs, vqs, vqs_info, desc);
if (rc)
return rc;
@@ -621,12 +768,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- if (vp_is_avq(&vp_dev->vdev, vq->index)) {
- mutex_lock(&vp_dev->admin_vq.cmd_lock);
- vp_dev->admin_vq.info.vq = NULL;
- mutex_unlock(&vp_dev->admin_vq.cmd_lock);
- }
-
if (vp_dev->msix_enabled)
vp_modern_queue_vector(mdev, vq->index,
VIRTIO_MSI_NO_VECTOR);
@@ -736,44 +877,352 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
return true;
}
-static int vp_modern_create_avq(struct virtio_device *vdev)
+/*
+ * virtio_pci_admin_has_dev_parts - Checks whether the device parts
+ * functionality is supported
+ * @pdev: VF pci_dev
+ *
+ * Returns true on success.
+ */
+bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev)
{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_pci_device *vp_dev;
+
+ if (!virtio_dev)
+ return false;
+
+ if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ))
+ return false;
+
+ vp_dev = to_vp_device(virtio_dev);
+
+ if (!((vp_dev->admin_vq.supported_cmds & VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) ==
+ VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP))
+ return false;
+
+ return vp_dev->admin_vq.max_dev_parts_objects;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_has_dev_parts);
+
+/*
+ * virtio_pci_admin_mode_set - Sets the mode of a member device
+ * @pdev: VF pci_dev
+ * @flags: device mode's flags
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 flags)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_dev_mode_set_data *data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->flags = flags;
+ sg_init_one(&data_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_MODE_SET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_mode_set);
+
+/*
+ * virtio_pci_admin_obj_create - Creates an object for a given type and operation,
+ * following the max objects that can be created for that request.
+ * @pdev: VF pci_dev
+ * @obj_type: Object type
+ * @operation_type: Operation type
+ * @obj_id: Output unique object id
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type,
+ u32 *obj_id)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ u16 data_size = sizeof(struct virtio_admin_cmd_resource_obj_create_data);
+ struct virtio_admin_cmd_resource_obj_create_data *obj_create_data;
+ struct virtio_resource_obj_dev_parts obj_dev_parts = {};
struct virtio_pci_admin_vq *avq;
- struct virtqueue *vq;
- u16 admin_q_num;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ void *data;
+ int id = -1;
+ int vf_id;
+ int ret;
- if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
- return 0;
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS)
+ return -EOPNOTSUPP;
- admin_q_num = vp_modern_avq_num(&vp_dev->mdev);
- if (!admin_q_num)
+ if (operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET &&
+ operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET)
return -EINVAL;
- avq = &vp_dev->admin_vq;
- avq->vq_index = vp_modern_avq_index(&vp_dev->mdev);
- sprintf(avq->name, "avq.%u", avq->vq_index);
- vq = vp_dev->setup_vq(vp_dev, &vp_dev->admin_vq.info, avq->vq_index, NULL,
- avq->name, NULL, VIRTIO_MSI_NO_VECTOR);
- if (IS_ERR(vq)) {
- dev_err(&vdev->dev, "failed to setup admin virtqueue, err=%ld",
- PTR_ERR(vq));
- return PTR_ERR(vq);
+ avq = &to_vp_device(virtio_dev)->admin_vq;
+ if (!avq->max_dev_parts_objects)
+ return -EOPNOTSUPP;
+
+ id = ida_alloc_range(&avq->dev_parts_ida, 0,
+ avq->max_dev_parts_objects - 1, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *obj_id = id;
+ data_size += sizeof(obj_dev_parts);
+ data = kzalloc(data_size, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto end;
}
- vp_modern_set_queue_enable(&vp_dev->mdev, avq->info.vq->index, true);
- return 0;
+ obj_create_data = data;
+ obj_create_data->hdr.type = cpu_to_le16(obj_type);
+ obj_create_data->hdr.id = cpu_to_le32(*obj_id);
+ obj_dev_parts.type = operation_type;
+ memcpy(obj_create_data->resource_obj_specific_data, &obj_dev_parts,
+ sizeof(obj_dev_parts));
+ sg_init_one(&data_sg, data, data_size);
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+ kfree(data);
+end:
+ if (ret)
+ ida_free(&avq->dev_parts_ida, id);
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_create);
-static void vp_modern_destroy_avq(struct virtio_device *vdev)
+/*
+ * virtio_pci_admin_obj_destroy - Destroys an object of a given type and id
+ * @pdev: VF pci_dev
+ * @obj_type: Object type
+ * @id: Object id
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id)
{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_resource_obj_cmd_hdr *data;
+ struct virtio_pci_device *vp_dev;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ int vf_id;
+ int ret;
- if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
- return;
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->type = cpu_to_le16(obj_type);
+ data->id = cpu_to_le32(id);
+ sg_init_one(&data_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (!ret) {
+ vp_dev = to_vp_device(virtio_dev);
+ ida_free(&vp_dev->admin_vq.dev_parts_ida, id);
+ }
- vp_dev->del_vq(&vp_dev->admin_vq.info);
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_destroy);
+
+/*
+ * virtio_pci_admin_dev_parts_metadata_get - Gets the metadata of the device parts
+ * identified by the below attributes.
+ * @pdev: VF pci_dev
+ * @obj_type: Object type
+ * @id: Object id
+ * @metadata_type: Metadata type
+ * @out: Upon success holds the output for 'metadata type size'
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type,
+ u32 id, u8 metadata_type, u32 *out)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_dev_parts_metadata_result *result;
+ struct virtio_admin_cmd_dev_parts_metadata_data *data;
+ struct scatterlist data_sg, result_sg;
+ struct virtio_admin_cmd cmd = {};
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ if (metadata_type != VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE)
+ return -EOPNOTSUPP;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ data->hdr.type = cpu_to_le16(obj_type);
+ data->hdr.id = cpu_to_le32(id);
+ data->type = metadata_type;
+ sg_init_one(&data_sg, data, sizeof(*data));
+ sg_init_one(&result_sg, result, sizeof(*result));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = &result_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (!ret)
+ *out = le32_to_cpu(result->parts_size.size);
+
+ kfree(result);
+end:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_metadata_get);
+
+/*
+ * virtio_pci_admin_dev_parts_get - Gets the device parts identified by the below attributes.
+ * @pdev: VF pci_dev
+ * @obj_type: Object type
+ * @id: Object id
+ * @get_type: Get type
+ * @res_sg: Upon success holds the output result data
+ * @res_size: Upon success holds the output result size
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id,
+ u8 get_type, struct scatterlist *res_sg,
+ u32 *res_size)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_dev_parts_get_data *data;
+ struct scatterlist data_sg;
+ struct virtio_admin_cmd cmd = {};
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ if (get_type != VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL)
+ return -EOPNOTSUPP;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->hdr.type = cpu_to_le16(obj_type);
+ data->hdr.id = cpu_to_le32(id);
+ data->type = get_type;
+ sg_init_one(&data_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_GET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = res_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (!ret)
+ *res_size = cmd.result_sg_size;
+
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_get);
+
+/*
+ * virtio_pci_admin_dev_parts_set - Sets the device parts identified by the below attributes.
+ * @pdev: VF pci_dev
+ * @data_sg: The device parts data, its layout follows struct virtio_admin_cmd_dev_parts_set_data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd cmd = {};
+ int vf_id;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_SET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = data_sg;
+ return vp_modern_admin_cmd_exec(virtio_dev, &cmd);
}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_set);
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get = NULL,
@@ -793,8 +1242,6 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
- .create_avq = vp_modern_create_avq,
- .destroy_avq = vp_modern_destroy_avq,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -815,8 +1262,6 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
- .create_avq = vp_modern_create_avq,
- .destroy_avq = vp_modern_destroy_avq,
};
/* the PCI probing function */
@@ -840,11 +1285,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
- vp_dev->is_avq = vp_is_avq;
+ vp_dev->avq_index = vp_avq_index;
vp_dev->isr = mdev->isr;
vp_dev->vdev.id = mdev->id;
- mutex_init(&vp_dev->admin_vq.cmd_lock);
+ spin_lock_init(&vp_dev->admin_vq.lock);
return 0;
}
@@ -852,6 +1297,5 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- mutex_destroy(&vp_dev->admin_vq.cmd_lock);
vp_modern_remove(mdev);
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 6f7e5010a673..b784aab66867 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -69,12 +69,20 @@
struct vring_desc_state_split {
void *data; /* Data for callback. */
- struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
+
+ /* Indirect desc table and extra table, if any. These two will be
+ * allocated together. So we won't stress more to the memory allocator.
+ */
+ struct vring_desc *indir_desc;
};
struct vring_desc_state_packed {
void *data; /* Data for callback. */
- struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
+
+ /* Indirect desc table and extra table, if any. These two will be
+ * allocated together. So we won't stress more to the memory allocator.
+ */
+ struct vring_packed_desc *indir_desc;
u16 num; /* Descriptor list length. */
u16 last; /* The last desc state in a list. */
};
@@ -172,14 +180,6 @@ struct vring_virtqueue {
/* Host publishes avail event idx */
bool event;
- /* Do DMA mapping by driver */
- bool premapped;
-
- /* Do unmap or not for desc. Just when premapped is False and
- * use_dma_api is true, this is true.
- */
- bool do_unmap;
-
/* Head of free buffer list. */
unsigned int free_head;
/* Number we've added since last sync. */
@@ -223,15 +223,6 @@ struct vring_virtqueue {
#endif
};
-static struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring_virtqueue_split *vring_split,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name,
- struct device *dma_dev);
static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
static void vring_free(struct virtqueue *_vq);
@@ -297,6 +288,12 @@ static bool vring_use_dma_api(const struct virtio_device *vdev)
return false;
}
+static bool vring_need_unmap_buffer(const struct vring_virtqueue *vring,
+ const struct vring_desc_extra *extra)
+{
+ return vring->use_dma_api && (extra->addr != DMA_MAPPING_ERROR);
+}
+
size_t virtio_max_dma_size(const struct virtio_device *vdev)
{
size_t max_segment_size = SIZE_MAX;
@@ -364,13 +361,17 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
/* Map one sg entry. */
static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
- enum dma_data_direction direction, dma_addr_t *addr)
+ enum dma_data_direction direction, dma_addr_t *addr,
+ u32 *len, bool premapped)
{
- if (vq->premapped) {
+ if (premapped) {
*addr = sg_dma_address(sg);
+ *len = sg_dma_len(sg);
return 0;
}
+ *len = sg->length;
+
if (!vq->use_dma_api) {
/*
* If DMA is not used, KMSAN doesn't know that the scatterlist
@@ -440,61 +441,44 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
* Split ring specific functions - *_split().
*/
-static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
- const struct vring_desc *desc)
-{
- u16 flags;
-
- if (!vq->do_unmap)
- return;
-
- flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
-
- dma_unmap_page(vring_dma_dev(vq),
- virtio64_to_cpu(vq->vq.vdev, desc->addr),
- virtio32_to_cpu(vq->vq.vdev, desc->len),
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
-}
-
static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
- unsigned int i)
+ struct vring_desc_extra *extra)
{
- struct vring_desc_extra *extra = vq->split.desc_extra;
u16 flags;
- flags = extra[i].flags;
+ flags = extra->flags;
if (flags & VRING_DESC_F_INDIRECT) {
if (!vq->use_dma_api)
goto out;
dma_unmap_single(vring_dma_dev(vq),
- extra[i].addr,
- extra[i].len,
+ extra->addr,
+ extra->len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
- if (!vq->do_unmap)
+ if (!vring_need_unmap_buffer(vq, extra))
goto out;
dma_unmap_page(vring_dma_dev(vq),
- extra[i].addr,
- extra[i].len,
+ extra->addr,
+ extra->len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
out:
- return extra[i].next;
+ return extra->next;
}
static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
unsigned int total_sg,
gfp_t gfp)
{
+ struct vring_desc_extra *extra;
struct vring_desc *desc;
- unsigned int i;
+ unsigned int i, size;
/*
* We require lowmem mappings for the descriptors because
@@ -503,40 +487,41 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
*/
gfp &= ~__GFP_HIGHMEM;
- desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
+ size = sizeof(*desc) * total_sg + sizeof(*extra) * total_sg;
+
+ desc = kmalloc(size, gfp);
if (!desc)
return NULL;
+ extra = (struct vring_desc_extra *)&desc[total_sg];
+
for (i = 0; i < total_sg; i++)
- desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
+ extra[i].next = i + 1;
+
return desc;
}
static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
struct vring_desc *desc,
+ struct vring_desc_extra *extra,
unsigned int i,
dma_addr_t addr,
unsigned int len,
- u16 flags,
- bool indirect)
+ u16 flags, bool premapped)
{
- struct vring_virtqueue *vring = to_vvq(vq);
- struct vring_desc_extra *extra = vring->split.desc_extra;
u16 next;
desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
desc[i].len = cpu_to_virtio32(vq->vdev, len);
- if (!indirect) {
- next = extra[i].next;
- desc[i].next = cpu_to_virtio16(vq->vdev, next);
+ extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr;
+ extra[i].len = len;
+ extra[i].flags = flags;
+
+ next = extra[i].next;
- extra[i].addr = addr;
- extra[i].len = len;
- extra[i].flags = flags;
- } else
- next = virtio16_to_cpu(vq->vdev, desc[i].next);
+ desc[i].next = cpu_to_virtio16(vq->vdev, next);
return next;
}
@@ -548,9 +533,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
unsigned int in_sgs,
void *data,
void *ctx,
+ bool premapped,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ struct vring_desc_extra *extra;
struct scatterlist *sg;
struct vring_desc *desc;
unsigned int i, n, avail, descs_used, prev, err_idx;
@@ -586,9 +573,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
/* Set up rest to use this indirect table. */
i = 0;
descs_used = 1;
+ extra = (struct vring_desc_extra *)&desc[total_sg];
} else {
indirect = false;
desc = vq->split.vring.desc;
+ extra = vq->split.desc_extra;
i = head;
descs_used = total_sg;
}
@@ -610,40 +599,41 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
for (n = 0; n < out_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr;
+ u32 len;
- if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr))
+ if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped))
goto unmap_release;
prev = i;
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
- i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
+ i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
VRING_DESC_F_NEXT,
- indirect);
+ premapped);
}
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr;
+ u32 len;
- if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr))
+ if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped))
goto unmap_release;
prev = i;
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
- i = virtqueue_add_desc_split(_vq, desc, i, addr,
- sg->length,
+ i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
VRING_DESC_F_NEXT |
VRING_DESC_F_WRITE,
- indirect);
+ premapped);
}
}
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
- if (!indirect && vq->do_unmap)
+ if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
~VRING_DESC_F_NEXT;
@@ -652,18 +642,14 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
dma_addr_t addr = vring_map_single(
vq, desc, total_sg * sizeof(struct vring_desc),
DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr)) {
- if (vq->premapped)
- goto free_indirect;
-
+ if (vring_mapping_error(vq, addr))
goto unmap_release;
- }
virtqueue_add_desc_split(_vq, vq->split.vring.desc,
+ vq->split.desc_extra,
head, addr,
total_sg * sizeof(struct vring_desc),
- VRING_DESC_F_INDIRECT,
- false);
+ VRING_DESC_F_INDIRECT, false);
}
/* We're using some buffers from the free list. */
@@ -716,14 +702,10 @@ unmap_release:
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
- if (indirect) {
- vring_unmap_one_split_indirect(vq, &desc[i]);
- i = virtio16_to_cpu(_vq->vdev, desc[i].next);
- } else
- i = vring_unmap_one_split(vq, i);
+
+ i = vring_unmap_one_split(vq, &extra[i]);
}
-free_indirect:
if (indirect)
kfree(desc);
@@ -765,22 +747,25 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
void **ctx)
{
+ struct vring_desc_extra *extra;
unsigned int i, j;
__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
/* Clear data ptr. */
vq->split.desc_state[head].data = NULL;
+ extra = vq->split.desc_extra;
+
/* Put back on free list: unmap first-level descriptors and find end */
i = head;
while (vq->split.vring.desc[i].flags & nextflag) {
- vring_unmap_one_split(vq, i);
+ vring_unmap_one_split(vq, &extra[i]);
i = vq->split.desc_extra[i].next;
vq->vq.num_free++;
}
- vring_unmap_one_split(vq, i);
+ vring_unmap_one_split(vq, &extra[i]);
vq->split.desc_extra[i].next = vq->free_head;
vq->free_head = head;
@@ -790,21 +775,24 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
if (vq->indirect) {
struct vring_desc *indir_desc =
vq->split.desc_state[head].indir_desc;
- u32 len;
+ u32 len, num;
/* Free the indirect table, if any, now that it's unmapped. */
if (!indir_desc)
return;
-
len = vq->split.desc_extra[head].len;
BUG_ON(!(vq->split.desc_extra[head].flags &
VRING_DESC_F_INDIRECT));
BUG_ON(len == 0 || len % sizeof(struct vring_desc));
- if (vq->do_unmap) {
- for (j = 0; j < len / sizeof(struct vring_desc); j++)
- vring_unmap_one_split_indirect(vq, &indir_desc[j]);
+ num = len / sizeof(struct vring_desc);
+
+ extra = (struct vring_desc_extra *)&indir_desc[num];
+
+ if (vq->use_dma_api) {
+ for (j = 0; j < num; j++)
+ vring_unmap_one_split(vq, &extra[j]);
}
kfree(indir_desc);
@@ -1138,6 +1126,64 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
return 0;
}
+static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name,
+ struct device *dma_dev)
+{
+ struct vring_virtqueue *vq;
+ int err;
+
+ vq = kmalloc(sizeof(*vq), GFP_KERNEL);
+ if (!vq)
+ return NULL;
+
+ vq->packed_ring = false;
+ vq->vq.callback = callback;
+ vq->vq.vdev = vdev;
+ vq->vq.name = name;
+ vq->vq.index = index;
+ vq->vq.reset = false;
+ vq->we_own_ring = false;
+ vq->notify = notify;
+ vq->weak_barriers = weak_barriers;
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ vq->broken = true;
+#else
+ vq->broken = false;
+#endif
+ vq->dma_dev = dma_dev;
+ vq->use_dma_api = vring_use_dma_api(vdev);
+
+ vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
+ !context;
+ vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
+
+ if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
+ vq->weak_barriers = false;
+
+ err = vring_alloc_state_extra_split(vring_split);
+ if (err) {
+ kfree(vq);
+ return NULL;
+ }
+
+ virtqueue_vring_init_split(vring_split, vq);
+
+ virtqueue_init(vq, vring_split->vring.num);
+ virtqueue_vring_attach_split(vq, vring_split);
+
+ spin_lock(&vdev->vqs_list_lock);
+ list_add_tail(&vq->vq.list, &vdev->vqs);
+ spin_unlock(&vdev->vqs_list_lock);
+ return &vq->vq;
+}
+
static struct virtqueue *vring_create_virtqueue_split(
unsigned int index,
unsigned int num,
@@ -1160,7 +1206,7 @@ static struct virtqueue *vring_create_virtqueue_split(
if (err)
return NULL;
- vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
context, notify, callback, name, dma_dev);
if (!vq) {
vring_free_split(&vring_split, vdev, dma_dev);
@@ -1236,7 +1282,7 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
- if (!vq->do_unmap)
+ if (!vring_need_unmap_buffer(vq, extra))
return;
dma_unmap_page(vring_dma_dev(vq),
@@ -1246,27 +1292,12 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
}
}
-static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
- const struct vring_packed_desc *desc)
-{
- u16 flags;
-
- if (!vq->do_unmap)
- return;
-
- flags = le16_to_cpu(desc->flags);
-
- dma_unmap_page(vring_dma_dev(vq),
- le64_to_cpu(desc->addr),
- le32_to_cpu(desc->len),
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
-}
-
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
gfp_t gfp)
{
+ struct vring_desc_extra *extra;
struct vring_packed_desc *desc;
+ int i, size;
/*
* We require lowmem mappings for the descriptors because
@@ -1275,7 +1306,16 @@ static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
*/
gfp &= ~__GFP_HIGHMEM;
- desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
+ size = (sizeof(*desc) + sizeof(*extra)) * total_sg;
+
+ desc = kmalloc(size, gfp);
+ if (!desc)
+ return NULL;
+
+ extra = (struct vring_desc_extra *)&desc[total_sg];
+
+ for (i = 0; i < total_sg; i++)
+ extra[i].next = i + 1;
return desc;
}
@@ -1286,11 +1326,13 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
+ bool premapped,
gfp_t gfp)
{
+ struct vring_desc_extra *extra;
struct vring_packed_desc *desc;
struct scatterlist *sg;
- unsigned int i, n, err_idx;
+ unsigned int i, n, err_idx, len;
u16 head, id;
dma_addr_t addr;
@@ -1299,6 +1341,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
if (!desc)
return -ENOMEM;
+ extra = (struct vring_desc_extra *)&desc[total_sg];
+
if (unlikely(vq->vq.num_free < 1)) {
pr_debug("Can't add buf len 1 - avail = 0\n");
kfree(desc);
@@ -1313,13 +1357,21 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
if (vring_map_one_sg(vq, sg, n < out_sgs ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
+ DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ &addr, &len, premapped))
goto unmap_release;
desc[i].flags = cpu_to_le16(n < out_sgs ?
0 : VRING_DESC_F_WRITE);
desc[i].addr = cpu_to_le64(addr);
- desc[i].len = cpu_to_le32(sg->length);
+ desc[i].len = cpu_to_le32(len);
+
+ if (unlikely(vq->use_dma_api)) {
+ extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr;
+ extra[i].len = len;
+ extra[i].flags = n < out_sgs ? 0 : VRING_DESC_F_WRITE;
+ }
+
i++;
}
}
@@ -1328,12 +1380,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
addr = vring_map_single(vq, desc,
total_sg * sizeof(struct vring_packed_desc),
DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr)) {
- if (vq->premapped)
- goto free_desc;
-
+ if (vring_mapping_error(vq, addr))
goto unmap_release;
- }
vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
@@ -1389,9 +1437,8 @@ unmap_release:
err_idx = i;
for (i = 0; i < err_idx; i++)
- vring_unmap_desc_packed(vq, &desc[i]);
+ vring_unmap_extra_packed(vq, &extra[i]);
-free_desc:
kfree(desc);
END_USE(vq);
@@ -1405,12 +1452,13 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
unsigned int in_sgs,
void *data,
void *ctx,
+ bool premapped,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
struct vring_packed_desc *desc;
struct scatterlist *sg;
- unsigned int i, n, c, descs_used, err_idx;
+ unsigned int i, n, c, descs_used, err_idx, len;
__le16 head_flags, flags;
u16 head, id, prev, curr, avail_used_flags;
int err;
@@ -1431,7 +1479,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
if (virtqueue_use_indirect(vq, total_sg)) {
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
- in_sgs, data, gfp);
+ in_sgs, data, premapped, gfp);
if (err != -ENOMEM) {
END_USE(vq);
return err;
@@ -1466,7 +1514,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
dma_addr_t addr;
if (vring_map_one_sg(vq, sg, n < out_sgs ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
+ DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ &addr, &len, premapped))
goto unmap_release;
flags = cpu_to_le16(vq->packed.avail_used_flags |
@@ -1478,12 +1527,13 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
desc[i].flags = flags;
desc[i].addr = cpu_to_le64(addr);
- desc[i].len = cpu_to_le32(sg->length);
+ desc[i].len = cpu_to_le32(len);
desc[i].id = cpu_to_le16(id);
if (unlikely(vq->use_dma_api)) {
- vq->packed.desc_extra[curr].addr = addr;
- vq->packed.desc_extra[curr].len = sg->length;
+ vq->packed.desc_extra[curr].addr = premapped ?
+ DMA_MAPPING_ERROR : addr;
+ vq->packed.desc_extra[curr].len = len;
vq->packed.desc_extra[curr].flags =
le16_to_cpu(flags);
}
@@ -1625,18 +1675,22 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
}
if (vq->indirect) {
- u32 len;
+ struct vring_desc_extra *extra;
+ u32 len, num;
/* Free the indirect table, if any, now that it's unmapped. */
desc = state->indir_desc;
if (!desc)
return;
- if (vq->do_unmap) {
+ if (vq->use_dma_api) {
len = vq->packed.desc_extra[id].len;
- for (i = 0; i < len / sizeof(struct vring_packed_desc);
- i++)
- vring_unmap_desc_packed(vq, &desc[i]);
+ num = len / sizeof(struct vring_packed_desc);
+
+ extra = (struct vring_desc_extra *)&desc[num];
+
+ for (i = 0; i < num; i++)
+ vring_unmap_extra_packed(vq, &extra[i]);
}
kfree(desc);
state->indir_desc = NULL;
@@ -2050,36 +2104,29 @@ static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
}
-static struct virtqueue *vring_create_virtqueue_packed(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name,
- struct device *dma_dev)
+static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index,
+ struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name,
+ struct device *dma_dev)
{
- struct vring_virtqueue_packed vring_packed = {};
struct vring_virtqueue *vq;
int err;
- if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
- goto err_ring;
-
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
- goto err_vq;
+ return NULL;
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
vq->vq.index = index;
vq->vq.reset = false;
- vq->we_own_ring = true;
+ vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
@@ -2090,8 +2137,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->packed_ring = true;
vq->dma_dev = dma_dev;
vq->use_dma_api = vring_use_dma_api(vdev);
- vq->premapped = false;
- vq->do_unmap = vq->use_dma_api;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2100,26 +2145,52 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- err = vring_alloc_state_extra_packed(&vring_packed);
- if (err)
- goto err_state_extra;
+ err = vring_alloc_state_extra_packed(vring_packed);
+ if (err) {
+ kfree(vq);
+ return NULL;
+ }
- virtqueue_vring_init_packed(&vring_packed, !!callback);
+ virtqueue_vring_init_packed(vring_packed, !!callback);
- virtqueue_init(vq, num);
- virtqueue_vring_attach_packed(vq, &vring_packed);
+ virtqueue_init(vq, vring_packed->vring.num);
+ virtqueue_vring_attach_packed(vq, vring_packed);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
+}
-err_state_extra:
- kfree(vq);
-err_vq:
- vring_free_packed(&vring_packed, vdev, dma_dev);
-err_ring:
- return NULL;
+static struct virtqueue *vring_create_virtqueue_packed(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name,
+ struct device *dma_dev)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct virtqueue *vq;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
+ return NULL;
+
+ vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers,
+ context, notify, callback, name, dma_dev);
+ if (!vq) {
+ vring_free_packed(&vring_packed, vdev, dma_dev);
+ return NULL;
+ }
+
+ to_vvq(vq)->we_own_ring = true;
+
+ return vq;
}
static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
@@ -2201,14 +2272,15 @@ static inline int virtqueue_add(struct virtqueue *_vq,
unsigned int in_sgs,
void *data,
void *ctx,
+ bool premapped,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
- out_sgs, in_sgs, data, ctx, gfp) :
+ out_sgs, in_sgs, data, ctx, premapped, gfp) :
virtqueue_add_split(_vq, sgs, total_sg,
- out_sgs, in_sgs, data, ctx, gfp);
+ out_sgs, in_sgs, data, ctx, premapped, gfp);
}
/**
@@ -2242,7 +2314,7 @@ int virtqueue_add_sgs(struct virtqueue *_vq,
total_sg++;
}
return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
- data, NULL, gfp);
+ data, NULL, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
@@ -2264,11 +2336,34 @@ int virtqueue_add_outbuf(struct virtqueue *vq,
void *data,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
+ return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
/**
+ * virtqueue_add_outbuf_premapped - expose output buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg readable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Return:
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
+ struct scatterlist *sg, unsigned int num,
+ void *data,
+ gfp_t gfp)
+{
+ return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_premapped);
+
+/**
* virtqueue_add_inbuf - expose input buffers to other end
* @vq: the struct virtqueue we're talking about.
* @sg: scatterlist (must be well-formed and terminated!)
@@ -2286,7 +2381,7 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
void *data,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
+ return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
@@ -2310,11 +2405,36 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
void *ctx,
gfp_t gfp)
{
- return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
+ return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/**
+ * virtqueue_add_inbuf_premapped - expose input buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg writable by other side
+ * @data: the token identifying the buffer.
+ * @ctx: extra context for the token
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Return:
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
+ struct scatterlist *sg, unsigned int num,
+ void *data,
+ void *ctx,
+ gfp_t gfp)
+{
+ return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_premapped);
+
+/**
* virtqueue_dma_dev - get the dma dev
* @_vq: the struct virtqueue we're talking about.
*
@@ -2530,7 +2650,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->event_triggered)
- vq->event_triggered = false;
+ data_race(vq->event_triggered = false);
return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
virtqueue_enable_cb_delayed_split(_vq);
@@ -2588,7 +2708,7 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
/* Just a hint for performance: so it's ok that this can be racy! */
if (vq->event)
- vq->event_triggered = true;
+ data_race(vq->event_triggered = true);
pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
if (vq->vq.callback)
@@ -2598,70 +2718,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
}
EXPORT_SYMBOL_GPL(vring_interrupt);
-/* Only available for split ring */
-static struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring_virtqueue_split *vring_split,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name,
- struct device *dma_dev)
-{
- struct vring_virtqueue *vq;
- int err;
-
- if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
- return NULL;
-
- vq = kmalloc(sizeof(*vq), GFP_KERNEL);
- if (!vq)
- return NULL;
-
- vq->packed_ring = false;
- vq->vq.callback = callback;
- vq->vq.vdev = vdev;
- vq->vq.name = name;
- vq->vq.index = index;
- vq->vq.reset = false;
- vq->we_own_ring = false;
- vq->notify = notify;
- vq->weak_barriers = weak_barriers;
-#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
- vq->broken = true;
-#else
- vq->broken = false;
-#endif
- vq->dma_dev = dma_dev;
- vq->use_dma_api = vring_use_dma_api(vdev);
- vq->premapped = false;
- vq->do_unmap = vq->use_dma_api;
-
- vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
- !context;
- vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
-
- if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
- vq->weak_barriers = false;
-
- err = vring_alloc_state_extra_split(vring_split);
- if (err) {
- kfree(vq);
- return NULL;
- }
-
- virtqueue_vring_init_split(vring_split, vq);
-
- virtqueue_init(vq, vring_split->vring.num);
- virtqueue_vring_attach_split(vq, vring_split);
-
- spin_lock(&vdev->vqs_list_lock);
- list_add_tail(&vq->vq.list, &vdev->vqs);
- spin_unlock(&vdev->vqs_list_lock);
- return &vq->vq;
-}
-
struct virtqueue *vring_create_virtqueue(
unsigned int index,
unsigned int num,
@@ -2716,6 +2772,7 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
* @_vq: the struct virtqueue we're talking about.
* @num: new ring num
* @recycle: callback to recycle unused buffers
+ * @recycle_done: callback to be invoked when recycle for all unused buffers done
*
* When it is really necessary to create a new vring, it will set the current vq
* into the reset state. Then call the passed callback to recycle the buffer
@@ -2736,7 +2793,8 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
*
*/
int virtqueue_resize(struct virtqueue *_vq, u32 num,
- void (*recycle)(struct virtqueue *vq, void *buf))
+ void (*recycle)(struct virtqueue *vq, void *buf),
+ void (*recycle_done)(struct virtqueue *vq))
{
struct vring_virtqueue *vq = to_vvq(_vq);
int err;
@@ -2753,6 +2811,8 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
err = virtqueue_disable_and_recycle(_vq, recycle);
if (err)
return err;
+ if (recycle_done)
+ recycle_done(_vq);
if (vq->packed_ring)
err = virtqueue_resize_packed(_vq, num);
@@ -2764,58 +2824,10 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
EXPORT_SYMBOL_GPL(virtqueue_resize);
/**
- * virtqueue_set_dma_premapped - set the vring premapped mode
- * @_vq: the struct virtqueue we're talking about.
- *
- * Enable the premapped mode of the vq.
- *
- * The vring in premapped mode does not do dma internally, so the driver must
- * do dma mapping in advance. The driver must pass the dma_address through
- * dma_address of scatterlist. When the driver got a used buffer from
- * the vring, it has to unmap the dma address.
- *
- * This function must be called immediately after creating the vq, or after vq
- * reset, and before adding any buffers to it.
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error.
- * 0: success.
- * -EINVAL: vring does not use the dma api, so we can not enable premapped mode.
- */
-int virtqueue_set_dma_premapped(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
- u32 num;
-
- START_USE(vq);
-
- num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
-
- if (num != vq->vq.num_free) {
- END_USE(vq);
- return -EINVAL;
- }
-
- if (!vq->use_dma_api) {
- END_USE(vq);
- return -EINVAL;
- }
-
- vq->premapped = true;
- vq->do_unmap = false;
-
- END_USE(vq);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
-
-/**
* virtqueue_reset - detach and recycle all unused buffers
* @_vq: the struct virtqueue we're talking about.
* @recycle: callback to recycle unused buffers
+ * @recycle_done: callback to be invoked when recycle for all unused buffers done
*
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
@@ -2827,7 +2839,8 @@ EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
* -EPERM: Operation not permitted
*/
int virtqueue_reset(struct virtqueue *_vq,
- void (*recycle)(struct virtqueue *vq, void *buf))
+ void (*recycle)(struct virtqueue *vq, void *buf),
+ void (*recycle_done)(struct virtqueue *vq))
{
struct vring_virtqueue *vq = to_vvq(_vq);
int err;
@@ -2835,6 +2848,8 @@ int virtqueue_reset(struct virtqueue *_vq,
err = virtqueue_disable_and_recycle(_vq, recycle);
if (err)
return err;
+ if (recycle_done)
+ recycle_done(_vq);
if (vq->packed_ring)
virtqueue_reinit_packed(vq);
@@ -2845,7 +2860,6 @@ int virtqueue_reset(struct virtqueue *_vq,
}
EXPORT_SYMBOL_GPL(virtqueue_reset);
-/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
@@ -2859,11 +2873,19 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
{
struct vring_virtqueue_split vring_split = {};
- if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
- return NULL;
+ if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+ struct vring_virtqueue_packed vring_packed = {};
+
+ vring_packed.vring.num = num;
+ vring_packed.vring.desc = pages;
+ return __vring_new_virtqueue_packed(index, &vring_packed,
+ vdev, weak_barriers,
+ context, notify, callback,
+ name, vdev->dev.parent);
+ }
vring_init(&vring_split.vring, num, pages, vring_align);
- return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ return __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
context, notify, callback, name,
vdev->dev.parent);
}
@@ -3126,8 +3148,10 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
{
struct vring_virtqueue *vq = to_vvq(_vq);
- if (!vq->use_dma_api)
+ if (!vq->use_dma_api) {
+ kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir);
return (dma_addr_t)virt_to_phys(ptr);
+ }
return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
}
@@ -3249,4 +3273,5 @@ void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
}
EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
+MODULE_DESCRIPTION("Virtio ring implementation");
MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_rtc_arm.c b/drivers/virtio/virtio_rtc_arm.c
new file mode 100644
index 000000000000..211299d72870
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_arm.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Provides cross-timestamp params for Arm.
+ *
+ * Copyright (C) 2022-2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clocksource_ids.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+/* see header for doc */
+
+int viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id)
+{
+ *hw_counter = VIRTIO_RTC_COUNTER_ARM_VCT;
+ *cs_id = CSID_ARM_ARCH_COUNTER;
+
+ return 0;
+}
diff --git a/drivers/virtio/virtio_rtc_class.c b/drivers/virtio/virtio_rtc_class.c
new file mode 100644
index 000000000000..05d6d28255cf
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_class.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * virtio_rtc RTC class driver
+ *
+ * Copyright (C) 2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/math64.h>
+#include <linux/overflow.h>
+#include <linux/rtc.h>
+#include <linux/time64.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+/**
+ * struct viortc_class - RTC class wrapper
+ * @viortc: virtio_rtc device data
+ * @rtc: RTC device
+ * @vio_clk_id: virtio_rtc clock id
+ * @stopped: Whether RTC ops are disallowed. Access protected by rtc_lock().
+ */
+struct viortc_class {
+ struct viortc_dev *viortc;
+ struct rtc_device *rtc;
+ u16 vio_clk_id;
+ bool stopped;
+};
+
+/**
+ * viortc_class_get_locked() - get RTC class wrapper, if ops allowed
+ * @dev: virtio device
+ *
+ * Gets the RTC class wrapper from the virtio device, if it is available and
+ * ops are allowed.
+ *
+ * Context: Caller must hold rtc_lock().
+ * Return: RTC class wrapper if available and ops allowed, ERR_PTR otherwise.
+ */
+static struct viortc_class *viortc_class_get_locked(struct device *dev)
+{
+ struct viortc_class *viortc_class;
+
+ viortc_class = viortc_class_from_dev(dev);
+ if (IS_ERR(viortc_class))
+ return viortc_class;
+
+ if (viortc_class->stopped)
+ return ERR_PTR(-EBUSY);
+
+ return viortc_class;
+}
+
+/**
+ * viortc_class_read_time() - RTC class op read_time
+ * @dev: virtio device
+ * @tm: read time
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct viortc_class *viortc_class;
+ time64_t sec;
+ int ret;
+ u64 ns;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ ret = viortc_read(viortc_class->viortc, viortc_class->vio_clk_id, &ns);
+ if (ret)
+ return ret;
+
+ sec = div_u64(ns, NSEC_PER_SEC);
+
+ rtc_time64_to_tm(sec, tm);
+
+ return 0;
+}
+
+/**
+ * viortc_class_read_alarm() - RTC class op read_alarm
+ * @dev: virtio device
+ * @alrm: alarm read out
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct viortc_class *viortc_class;
+ time64_t alarm_time_sec;
+ u64 alarm_time_ns;
+ bool enabled;
+ int ret;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ ret = viortc_read_alarm(viortc_class->viortc, viortc_class->vio_clk_id,
+ &alarm_time_ns, &enabled);
+ if (ret)
+ return ret;
+
+ alarm_time_sec = div_u64(alarm_time_ns, NSEC_PER_SEC);
+ rtc_time64_to_tm(alarm_time_sec, &alrm->time);
+
+ alrm->enabled = enabled;
+
+ return 0;
+}
+
+/**
+ * viortc_class_set_alarm() - RTC class op set_alarm
+ * @dev: virtio device
+ * @alrm: alarm to set
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct viortc_class *viortc_class;
+ time64_t alarm_time_sec;
+ u64 alarm_time_ns;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ alarm_time_sec = rtc_tm_to_time64(&alrm->time);
+
+ if (alarm_time_sec < 0)
+ return -EINVAL;
+
+ if (check_mul_overflow((u64)alarm_time_sec, (u64)NSEC_PER_SEC,
+ &alarm_time_ns))
+ return -EINVAL;
+
+ return viortc_set_alarm(viortc_class->viortc, viortc_class->vio_clk_id,
+ alarm_time_ns, alrm->enabled);
+}
+
+/**
+ * viortc_class_alarm_irq_enable() - RTC class op alarm_irq_enable
+ * @dev: virtio device
+ * @enabled: enable or disable alarm IRQ
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct viortc_class *viortc_class;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ return viortc_set_alarm_enabled(viortc_class->viortc,
+ viortc_class->vio_clk_id, enabled);
+}
+
+static const struct rtc_class_ops viortc_class_ops = {
+ .read_time = viortc_class_read_time,
+ .read_alarm = viortc_class_read_alarm,
+ .set_alarm = viortc_class_set_alarm,
+ .alarm_irq_enable = viortc_class_alarm_irq_enable,
+};
+
+/**
+ * viortc_class_alarm() - propagate alarm notification as alarm interrupt
+ * @viortc_class: RTC class wrapper
+ * @vio_clk_id: virtio_rtc clock id
+ *
+ * Context: Any context.
+ */
+void viortc_class_alarm(struct viortc_class *viortc_class, u16 vio_clk_id)
+{
+ if (vio_clk_id != viortc_class->vio_clk_id) {
+ dev_warn_ratelimited(&viortc_class->rtc->dev,
+ "ignoring alarm for clock id %d, expected id %d\n",
+ vio_clk_id, viortc_class->vio_clk_id);
+ return;
+ }
+
+ rtc_update_irq(viortc_class->rtc, 1, RTC_AF | RTC_IRQF);
+}
+
+/**
+ * viortc_class_stop() - disallow RTC class ops
+ * @viortc_class: RTC class wrapper
+ *
+ * Context: Process context. Caller must NOT hold rtc_lock().
+ */
+void viortc_class_stop(struct viortc_class *viortc_class)
+{
+ rtc_lock(viortc_class->rtc);
+
+ viortc_class->stopped = true;
+
+ rtc_unlock(viortc_class->rtc);
+}
+
+/**
+ * viortc_class_register() - register RTC class device
+ * @viortc_class: RTC class wrapper
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_class_register(struct viortc_class *viortc_class)
+{
+ return devm_rtc_register_device(viortc_class->rtc);
+}
+
+/**
+ * viortc_class_init() - init RTC class wrapper and device
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @have_alarm: have alarm feature
+ * @parent_dev: virtio device
+ *
+ * Context: Process context.
+ * Return: RTC class wrapper on success, ERR_PTR otherwise.
+ */
+struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
+ u16 vio_clk_id, bool have_alarm,
+ struct device *parent_dev)
+{
+ struct viortc_class *viortc_class;
+ struct rtc_device *rtc;
+
+ viortc_class =
+ devm_kzalloc(parent_dev, sizeof(*viortc_class), GFP_KERNEL);
+ if (!viortc_class)
+ return ERR_PTR(-ENOMEM);
+
+ rtc = devm_rtc_allocate_device(parent_dev);
+ if (IS_ERR(rtc))
+ return ERR_CAST(rtc);
+
+ viortc_class->viortc = viortc;
+ viortc_class->rtc = rtc;
+ viortc_class->vio_clk_id = vio_clk_id;
+
+ if (!have_alarm)
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
+
+ rtc->ops = &viortc_class_ops;
+ rtc->range_max = div_u64(U64_MAX, NSEC_PER_SEC);
+
+ return viortc_class;
+}
diff --git a/drivers/virtio/virtio_rtc_driver.c b/drivers/virtio/virtio_rtc_driver.c
new file mode 100644
index 000000000000..a57d5e06e19d
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_driver.c
@@ -0,0 +1,1407 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * virtio_rtc driver core
+ *
+ * Copyright (C) 2022-2024 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+#define VIORTC_ALARMQ_BUF_CAP sizeof(union virtio_rtc_notif_alarmq)
+
+/* virtqueue order */
+enum {
+ VIORTC_REQUESTQ,
+ VIORTC_ALARMQ,
+ VIORTC_MAX_NR_QUEUES,
+};
+
+/**
+ * struct viortc_vq - virtqueue abstraction
+ * @vq: virtqueue
+ * @lock: protects access to vq
+ */
+struct viortc_vq {
+ struct virtqueue *vq;
+ spinlock_t lock;
+};
+
+/**
+ * struct viortc_dev - virtio_rtc device data
+ * @vdev: virtio device
+ * @viortc_class: RTC class wrapper for UTC-like clock, NULL if not available
+ * @vqs: virtqueues
+ * @clocks_to_unregister: Clock references, which are only used during device
+ * removal.
+ * For other uses, there would be a race between device
+ * creation and setting the pointers here.
+ * @alarmq_bufs: alarmq buffers list
+ * @num_alarmq_bufs: # of alarmq buffers
+ * @num_clocks: # of virtio_rtc clocks
+ */
+struct viortc_dev {
+ struct virtio_device *vdev;
+ struct viortc_class *viortc_class;
+ struct viortc_vq vqs[VIORTC_MAX_NR_QUEUES];
+ struct viortc_ptp_clock **clocks_to_unregister;
+ void **alarmq_bufs;
+ unsigned int num_alarmq_bufs;
+ u16 num_clocks;
+};
+
+/**
+ * struct viortc_msg - Message requested by driver, responded by device.
+ * @viortc: device data
+ * @req: request buffer
+ * @resp: response buffer
+ * @responded: vqueue callback signals response reception
+ * @refcnt: Message reference count, message and buffers will be deallocated
+ * once 0. refcnt is decremented in the vqueue callback and in the
+ * thread waiting on the responded completion.
+ * If a message response wait function times out, the message will be
+ * freed upon late reception (refcnt will reach 0 in the callback), or
+ * device removal.
+ * @req_size: size of request in bytes
+ * @resp_cap: maximum size of response in bytes
+ * @resp_actual_size: actual size of response
+ */
+struct viortc_msg {
+ struct viortc_dev *viortc;
+ void *req;
+ void *resp;
+ struct completion responded;
+ refcount_t refcnt;
+ unsigned int req_size;
+ unsigned int resp_cap;
+ unsigned int resp_actual_size;
+};
+
+/**
+ * viortc_class_from_dev() - Get RTC class object from virtio device.
+ * @dev: virtio device
+ *
+ * Context: Any context.
+ * Return: RTC class object if available, ERR_PTR otherwise.
+ */
+struct viortc_class *viortc_class_from_dev(struct device *dev)
+{
+ struct virtio_device *vdev;
+ struct viortc_dev *viortc;
+
+ vdev = container_of(dev, typeof(*vdev), dev);
+ viortc = vdev->priv;
+
+ return viortc->viortc_class ?: ERR_PTR(-ENODEV);
+}
+
+/**
+ * viortc_alarms_supported() - Whether device and driver support alarms.
+ * @vdev: virtio device
+ *
+ * NB: Device and driver may not support alarms for the same clocks.
+ *
+ * Context: Any context.
+ * Return: True if both device and driver can support alarms.
+ */
+static bool viortc_alarms_supported(struct virtio_device *vdev)
+{
+ return IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS) &&
+ virtio_has_feature(vdev, VIRTIO_RTC_F_ALARM);
+}
+
+/**
+ * viortc_feed_vq() - Make a device write-only buffer available.
+ * @viortc: device data
+ * @vq: notification virtqueue
+ * @buf: buffer
+ * @buf_len: buffer capacity in bytes
+ * @data: token, identifying buffer
+ *
+ * Context: Caller must prevent concurrent access to vq.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_feed_vq(struct viortc_dev *viortc, struct virtqueue *vq,
+ void *buf, unsigned int buf_len, void *data)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, buf, buf_len);
+
+ return virtqueue_add_inbuf(vq, &sg, 1, data, GFP_ATOMIC);
+}
+
+/**
+ * viortc_msg_init() - Allocate and initialize requestq message.
+ * @viortc: device data
+ * @msg_type: virtio_rtc message type
+ * @req_size: size of request buffer to be allocated
+ * @resp_cap: size of response buffer to be allocated
+ *
+ * Initializes the message refcnt to 2. The refcnt will be decremented once in
+ * the virtqueue callback, and once in the thread waiting on the message (on
+ * completion or timeout).
+ *
+ * Context: Process context.
+ * Return: non-NULL on success.
+ */
+static struct viortc_msg *viortc_msg_init(struct viortc_dev *viortc,
+ u16 msg_type, unsigned int req_size,
+ unsigned int resp_cap)
+{
+ struct device *dev = &viortc->vdev->dev;
+ struct virtio_rtc_req_head *req_head;
+ struct viortc_msg *msg;
+
+ msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return NULL;
+
+ init_completion(&msg->responded);
+
+ msg->req = devm_kzalloc(dev, req_size, GFP_KERNEL);
+ if (!msg->req)
+ goto err_free_msg;
+
+ req_head = msg->req;
+
+ msg->resp = devm_kzalloc(dev, resp_cap, GFP_KERNEL);
+ if (!msg->resp)
+ goto err_free_msg_req;
+
+ msg->viortc = viortc;
+ msg->req_size = req_size;
+ msg->resp_cap = resp_cap;
+
+ refcount_set(&msg->refcnt, 2);
+
+ req_head->msg_type = virtio_cpu_to_le(msg_type, req_head->msg_type);
+
+ return msg;
+
+err_free_msg_req:
+ devm_kfree(dev, msg->req);
+
+err_free_msg:
+ devm_kfree(dev, msg);
+
+ return NULL;
+}
+
+/**
+ * viortc_msg_release() - Decrement message refcnt, potentially free message.
+ * @msg: message requested by driver
+ *
+ * Context: Any context.
+ */
+static void viortc_msg_release(struct viortc_msg *msg)
+{
+ struct device *dev;
+
+ if (refcount_dec_and_test(&msg->refcnt)) {
+ dev = &msg->viortc->vdev->dev;
+
+ devm_kfree(dev, msg->req);
+ devm_kfree(dev, msg->resp);
+ devm_kfree(dev, msg);
+ }
+}
+
+/**
+ * viortc_do_cb() - generic virtqueue callback logic
+ * @vq: virtqueue
+ * @handle_buf: function to process a used buffer
+ *
+ * Context: virtqueue callback, typically interrupt. Takes and releases vq lock.
+ */
+static void viortc_do_cb(struct virtqueue *vq,
+ void (*handle_buf)(void *token, unsigned int len,
+ struct virtqueue *vq,
+ struct viortc_vq *viortc_vq,
+ struct viortc_dev *viortc))
+{
+ struct viortc_dev *viortc = vq->vdev->priv;
+ struct viortc_vq *viortc_vq;
+ bool cb_enabled = true;
+ unsigned long flags;
+ unsigned int len;
+ void *token;
+
+ viortc_vq = &viortc->vqs[vq->index];
+
+ for (;;) {
+ spin_lock_irqsave(&viortc_vq->lock, flags);
+
+ if (cb_enabled) {
+ virtqueue_disable_cb(vq);
+ cb_enabled = false;
+ }
+
+ token = virtqueue_get_buf(vq, &len);
+ if (!token) {
+ if (virtqueue_enable_cb(vq)) {
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+ return;
+ }
+ cb_enabled = true;
+ }
+
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+
+ if (token)
+ handle_buf(token, len, vq, viortc_vq, viortc);
+ }
+}
+
+/**
+ * viortc_requestq_hdlr() - process a requestq used buffer
+ * @token: token identifying the buffer
+ * @len: bytes written by device
+ * @vq: virtqueue
+ * @viortc_vq: device specific data for virtqueue
+ * @viortc: device data
+ *
+ * Signals completion for each received message.
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_requestq_hdlr(void *token, unsigned int len,
+ struct virtqueue *vq,
+ struct viortc_vq *viortc_vq,
+ struct viortc_dev *viortc)
+{
+ struct viortc_msg *msg = token;
+
+ msg->resp_actual_size = len;
+
+ complete(&msg->responded);
+ viortc_msg_release(msg);
+}
+
+/**
+ * viortc_cb_requestq() - callback for requestq
+ * @vq: virtqueue
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_cb_requestq(struct virtqueue *vq)
+{
+ viortc_do_cb(vq, viortc_requestq_hdlr);
+}
+
+/**
+ * viortc_alarmq_hdlr() - process an alarmq used buffer
+ * @token: token identifying the buffer
+ * @len: bytes written by device
+ * @vq: virtqueue
+ * @viortc_vq: device specific data for virtqueue
+ * @viortc: device data
+ *
+ * Processes a VIRTIO_RTC_NOTIF_ALARM notification by calling the RTC class
+ * driver. Makes the buffer available again.
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_alarmq_hdlr(void *token, unsigned int len,
+ struct virtqueue *vq,
+ struct viortc_vq *viortc_vq,
+ struct viortc_dev *viortc)
+{
+ struct virtio_rtc_notif_alarm *notif = token;
+ struct virtio_rtc_notif_head *head = token;
+ unsigned long flags;
+ u16 clock_id;
+ bool notify;
+
+ if (len < sizeof(*head)) {
+ dev_err_ratelimited(&viortc->vdev->dev,
+ "%s: ignoring notification with short header\n",
+ __func__);
+ goto feed_vq;
+ }
+
+ if (virtio_le_to_cpu(head->msg_type) != VIRTIO_RTC_NOTIF_ALARM) {
+ dev_err_ratelimited(&viortc->vdev->dev,
+ "%s: ignoring unknown notification type 0x%x\n",
+ __func__, virtio_le_to_cpu(head->msg_type));
+ goto feed_vq;
+ }
+
+ if (len < sizeof(*notif)) {
+ dev_err_ratelimited(&viortc->vdev->dev,
+ "%s: ignoring too small alarm notification\n",
+ __func__);
+ goto feed_vq;
+ }
+
+ clock_id = virtio_le_to_cpu(notif->clock_id);
+
+ if (!viortc->viortc_class)
+ dev_warn_ratelimited(&viortc->vdev->dev,
+ "ignoring alarm, no RTC class device available\n");
+ else
+ viortc_class_alarm(viortc->viortc_class, clock_id);
+
+feed_vq:
+ spin_lock_irqsave(&viortc_vq->lock, flags);
+
+ if (viortc_feed_vq(viortc, vq, notif, VIORTC_ALARMQ_BUF_CAP, token))
+ dev_warn(&viortc->vdev->dev,
+ "%s: failed to re-expose input buffer\n", __func__);
+
+ notify = virtqueue_kick_prepare(vq);
+
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+
+ if (notify)
+ virtqueue_notify(vq);
+}
+
+/**
+ * viortc_cb_alarmq() - callback for alarmq
+ * @vq: virtqueue
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_cb_alarmq(struct virtqueue *vq)
+{
+ viortc_do_cb(vq, viortc_alarmq_hdlr);
+}
+
+/**
+ * viortc_get_resp_errno() - converts virtio_rtc errnos to system errnos
+ * @resp_head: message response header
+ *
+ * Return: negative system errno, or 0
+ */
+static int viortc_get_resp_errno(struct virtio_rtc_resp_head *resp_head)
+{
+ switch (virtio_le_to_cpu(resp_head->status)) {
+ case VIRTIO_RTC_S_OK:
+ return 0;
+ case VIRTIO_RTC_S_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case VIRTIO_RTC_S_EINVAL:
+ return -EINVAL;
+ case VIRTIO_RTC_S_ENODEV:
+ return -ENODEV;
+ case VIRTIO_RTC_S_EIO:
+ default:
+ return -EIO;
+ }
+}
+
+/**
+ * viortc_msg_xfer() - send message request, wait until message response
+ * @vq: virtqueue
+ * @msg: message with driver request
+ * @timeout_jiffies: message response timeout, 0 for no timeout
+ *
+ * Context: Process context. Takes and releases vq.lock. May sleep.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_msg_xfer(struct viortc_vq *vq, struct viortc_msg *msg,
+ unsigned long timeout_jiffies)
+{
+ struct scatterlist out_sg[1];
+ struct scatterlist in_sg[1];
+ struct scatterlist *sgs[2];
+ unsigned long flags;
+ long timeout_ret;
+ bool notify;
+ int ret;
+
+ sgs[0] = out_sg;
+ sgs[1] = in_sg;
+
+ sg_init_one(out_sg, msg->req, msg->req_size);
+ sg_init_one(in_sg, msg->resp, msg->resp_cap);
+
+ spin_lock_irqsave(&vq->lock, flags);
+
+ ret = virtqueue_add_sgs(vq->vq, sgs, 1, 1, msg, GFP_ATOMIC);
+ if (ret) {
+ spin_unlock_irqrestore(&vq->lock, flags);
+ /*
+ * Release in place of the response callback, which will never
+ * come.
+ */
+ viortc_msg_release(msg);
+ return ret;
+ }
+
+ notify = virtqueue_kick_prepare(vq->vq);
+
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ if (notify)
+ virtqueue_notify(vq->vq);
+
+ if (timeout_jiffies) {
+ timeout_ret = wait_for_completion_interruptible_timeout(
+ &msg->responded, timeout_jiffies);
+
+ if (!timeout_ret)
+ return -ETIMEDOUT;
+ else if (timeout_ret < 0)
+ return (int)timeout_ret;
+ } else {
+ ret = wait_for_completion_interruptible(&msg->responded);
+ if (ret)
+ return ret;
+ }
+
+ if (msg->resp_actual_size < sizeof(struct virtio_rtc_resp_head))
+ return -EINVAL;
+
+ ret = viortc_get_resp_errno(msg->resp);
+ if (ret)
+ return ret;
+
+ /*
+ * There is not yet a case where returning a short message would make
+ * sense, so consider any deviation an error.
+ */
+ if (msg->resp_actual_size != msg->resp_cap)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * common message handle macros for messages of different types
+ */
+
+/**
+ * VIORTC_DECLARE_MSG_HDL_ONSTACK() - declare message handle on stack
+ * @hdl: message handle name
+ * @msg_id: message type id
+ * @msg_req: message request type
+ * @msg_resp: message response type
+ */
+#define VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, msg_id, msg_req, msg_resp) \
+ struct { \
+ struct viortc_msg *msg; \
+ msg_req *req; \
+ msg_resp *resp; \
+ unsigned int req_size; \
+ unsigned int resp_cap; \
+ u16 msg_type; \
+ } hdl = { \
+ NULL, NULL, NULL, sizeof(msg_req), sizeof(msg_resp), (msg_id), \
+ }
+
+/**
+ * VIORTC_MSG() - extract message from message handle
+ * @hdl: message handle
+ *
+ * Return: struct viortc_msg
+ */
+#define VIORTC_MSG(hdl) ((hdl).msg)
+
+/**
+ * VIORTC_MSG_INIT() - initialize message handle
+ * @hdl: message handle
+ * @viortc: device data (struct viortc_dev *)
+ *
+ * Context: Process context.
+ * Return: 0 on success, -ENOMEM otherwise.
+ */
+#define VIORTC_MSG_INIT(hdl, viortc) \
+ ({ \
+ typeof(hdl) *_hdl = &(hdl); \
+ \
+ _hdl->msg = viortc_msg_init((viortc), _hdl->msg_type, \
+ _hdl->req_size, _hdl->resp_cap); \
+ if (_hdl->msg) { \
+ _hdl->req = _hdl->msg->req; \
+ _hdl->resp = _hdl->msg->resp; \
+ } \
+ _hdl->msg ? 0 : -ENOMEM; \
+ })
+
+/**
+ * VIORTC_MSG_WRITE() - write a request message field
+ * @hdl: message handle
+ * @dest_member: request message field name
+ * @src_ptr: pointer to data of compatible type
+ *
+ * Writes the field in little-endian format.
+ */
+#define VIORTC_MSG_WRITE(hdl, dest_member, src_ptr) \
+ do { \
+ typeof(hdl) _hdl = (hdl); \
+ typeof(src_ptr) _src_ptr = (src_ptr); \
+ \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu(_hdl.req->dest_member)), \
+ *_src_ptr); \
+ \
+ _hdl.req->dest_member = \
+ virtio_cpu_to_le(*_src_ptr, _hdl.req->dest_member); \
+ } while (0)
+
+/**
+ * VIORTC_MSG_READ() - read from a response message field
+ * @hdl: message handle
+ * @src_member: response message field name
+ * @dest_ptr: pointer to data of compatible type
+ *
+ * Converts from little-endian format and writes to dest_ptr.
+ */
+#define VIORTC_MSG_READ(hdl, src_member, dest_ptr) \
+ do { \
+ typeof(dest_ptr) _dest_ptr = (dest_ptr); \
+ \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu((hdl).resp->src_member)), \
+ *_dest_ptr); \
+ \
+ *_dest_ptr = virtio_le_to_cpu((hdl).resp->src_member); \
+ } while (0)
+
+/*
+ * read requests
+ */
+
+/** timeout for clock readings, where timeouts are considered non-fatal */
+#define VIORTC_MSG_READ_TIMEOUT secs_to_jiffies(60)
+
+/**
+ * viortc_read() - VIRTIO_RTC_REQ_READ wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @reading: clock reading [ns]
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_read(struct viortc_dev *viortc, u16 vio_clk_id, u64 *reading)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ,
+ struct virtio_rtc_req_read,
+ struct virtio_rtc_resp_read);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ VIORTC_MSG_READ_TIMEOUT);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, clock_reading, reading);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_read_cross() - VIRTIO_RTC_REQ_READ_CROSS wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @hw_counter: virtio_rtc HW counter type
+ * @reading: clock reading [ns]
+ * @cycles: HW counter cycles during clock reading
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_read_cross(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ u64 *reading, u64 *cycles)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ_CROSS,
+ struct virtio_rtc_req_read_cross,
+ struct virtio_rtc_resp_read_cross);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, hw_counter, &hw_counter);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ VIORTC_MSG_READ_TIMEOUT);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, clock_reading, reading);
+ VIORTC_MSG_READ(hdl, counter_cycles, cycles);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/*
+ * control requests
+ */
+
+/**
+ * viortc_cfg() - VIRTIO_RTC_REQ_CFG wrapper
+ * @viortc: device data
+ * @num_clocks: # of virtio_rtc clocks
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_cfg(struct viortc_dev *viortc, u16 *num_clocks)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CFG,
+ struct virtio_rtc_req_cfg,
+ struct virtio_rtc_resp_cfg);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, num_clocks, num_clocks);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_clock_cap() - VIRTIO_RTC_REQ_CLOCK_CAP wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @type: virtio_rtc clock type
+ * @leap_second_smearing: virtio_rtc smearing variant
+ * @flags: struct virtio_rtc_resp_clock_cap.flags
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_clock_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 *type,
+ u8 *leap_second_smearing, u8 *flags)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CLOCK_CAP,
+ struct virtio_rtc_req_clock_cap,
+ struct virtio_rtc_resp_clock_cap);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, type, type);
+ VIORTC_MSG_READ(hdl, leap_second_smearing, leap_second_smearing);
+ VIORTC_MSG_READ(hdl, flags, flags);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_cross_cap() - VIRTIO_RTC_REQ_CROSS_CAP wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @hw_counter: virtio_rtc HW counter type
+ * @supported: xtstamping is supported for the vio_clk_id/hw_counter pair
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_cross_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ bool *supported)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CROSS_CAP,
+ struct virtio_rtc_req_cross_cap,
+ struct virtio_rtc_resp_cross_cap);
+ u8 flags;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, hw_counter, &hw_counter);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, flags, &flags);
+ *supported = !!(flags & VIRTIO_RTC_FLAG_CROSS_CAP);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_read_alarm() - VIRTIO_RTC_REQ_READ_ALARM wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @alarm_time: alarm time in ns
+ * @enabled: whether alarm is enabled
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_read_alarm(struct viortc_dev *viortc, u16 vio_clk_id,
+ u64 *alarm_time, bool *enabled)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ_ALARM,
+ struct virtio_rtc_req_read_alarm,
+ struct virtio_rtc_resp_read_alarm);
+ u8 flags;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, alarm_time, alarm_time);
+ VIORTC_MSG_READ(hdl, flags, &flags);
+
+ *enabled = !!(flags & VIRTIO_RTC_FLAG_ALARM_ENABLED);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_set_alarm() - VIRTIO_RTC_REQ_SET_ALARM wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @alarm_time: alarm time in ns
+ * @alarm_enable: enable or disable alarm
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_set_alarm(struct viortc_dev *viortc, u16 vio_clk_id, u64 alarm_time,
+ bool alarm_enable)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_SET_ALARM,
+ struct virtio_rtc_req_set_alarm,
+ struct virtio_rtc_resp_set_alarm);
+ u8 flags = 0;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ if (alarm_enable)
+ flags |= VIRTIO_RTC_FLAG_ALARM_ENABLED;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, alarm_time, &alarm_time);
+ VIORTC_MSG_WRITE(hdl, flags, &flags);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_set_alarm_enabled() - VIRTIO_RTC_REQ_SET_ALARM_ENABLED wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @alarm_enable: enable or disable alarm
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_set_alarm_enabled(struct viortc_dev *viortc, u16 vio_clk_id,
+ bool alarm_enable)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_SET_ALARM_ENABLED,
+ struct virtio_rtc_req_set_alarm_enabled,
+ struct virtio_rtc_resp_set_alarm_enabled);
+ u8 flags = 0;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ if (alarm_enable)
+ flags |= VIRTIO_RTC_FLAG_ALARM_ENABLED;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, flags, &flags);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/*
+ * init, deinit
+ */
+
+/**
+ * viortc_init_rtc_class_clock() - init and register a RTC class device
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @clock_type: virtio_rtc clock type
+ * @flags: struct virtio_rtc_resp_clock_cap.flags
+ *
+ * The clock must be a UTC-like clock.
+ *
+ * Context: Process context.
+ * Return: Positive if registered, zero if not supported by configuration,
+ * negative error code otherwise.
+ */
+static int viortc_init_rtc_class_clock(struct viortc_dev *viortc,
+ u16 vio_clk_id, u8 clock_type, u8 flags)
+{
+ struct virtio_device *vdev = viortc->vdev;
+ struct viortc_class *viortc_class;
+ struct device *dev = &vdev->dev;
+ bool have_alarm;
+
+ if (clock_type != VIRTIO_RTC_CLOCK_UTC_SMEARED) {
+ dev_info(dev,
+ "not creating RTC class device for clock %d, which may step on leap seconds\n",
+ vio_clk_id);
+ return 0;
+ }
+
+ if (viortc->viortc_class) {
+ dev_warn_once(dev,
+ "multiple UTC-like clocks are present, but creating only one RTC class device\n");
+ return 0;
+ }
+
+ have_alarm = viortc_alarms_supported(vdev) &&
+ !!(flags & VIRTIO_RTC_FLAG_ALARM_CAP);
+
+ viortc_class = viortc_class_init(viortc, vio_clk_id, have_alarm, dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ viortc->viortc_class = viortc_class;
+
+ if (have_alarm)
+ devm_device_init_wakeup(dev);
+
+ return viortc_class_register(viortc_class) ?: 1;
+}
+
+/**
+ * viortc_init_ptp_clock() - init and register PTP clock
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @clock_type: virtio_rtc clock type
+ * @leap_second_smearing: virtio_rtc leap second smearing
+ *
+ * Context: Process context.
+ * Return: Positive if registered, zero if not supported by configuration,
+ * negative error code otherwise.
+ */
+static int viortc_init_ptp_clock(struct viortc_dev *viortc, u16 vio_clk_id,
+ u8 clock_type, u8 leap_second_smearing)
+{
+ struct device *dev = &viortc->vdev->dev;
+ char ptp_clock_name[PTP_CLOCK_NAME_LEN];
+ struct viortc_ptp_clock *vio_ptp;
+
+ snprintf(ptp_clock_name, PTP_CLOCK_NAME_LEN,
+ "Virtio PTP type %hhu/variant %hhu", clock_type,
+ leap_second_smearing);
+
+ vio_ptp = viortc_ptp_register(viortc, dev, vio_clk_id, ptp_clock_name);
+ if (IS_ERR(vio_ptp)) {
+ dev_err(dev, "failed to register PTP clock '%s'\n",
+ ptp_clock_name);
+ return PTR_ERR(vio_ptp);
+ }
+
+ viortc->clocks_to_unregister[vio_clk_id] = vio_ptp;
+
+ return !!vio_ptp;
+}
+
+/**
+ * viortc_init_clock() - init local representation of virtio_rtc clock
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ *
+ * Initializes PHC and/or RTC class device to represent virtio_rtc clock.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_init_clock(struct viortc_dev *viortc, u16 vio_clk_id)
+{
+ u8 clock_type, leap_second_smearing, flags;
+ bool is_exposed = false;
+ int ret;
+
+ ret = viortc_clock_cap(viortc, vio_clk_id, &clock_type,
+ &leap_second_smearing, &flags);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS) &&
+ (clock_type == VIRTIO_RTC_CLOCK_UTC ||
+ clock_type == VIRTIO_RTC_CLOCK_UTC_SMEARED ||
+ clock_type == VIRTIO_RTC_CLOCK_UTC_MAYBE_SMEARED)) {
+ ret = viortc_init_rtc_class_clock(viortc, vio_clk_id,
+ clock_type, flags);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ is_exposed = true;
+ }
+
+ if (IS_ENABLED(CONFIG_VIRTIO_RTC_PTP)) {
+ ret = viortc_init_ptp_clock(viortc, vio_clk_id, clock_type,
+ leap_second_smearing);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ is_exposed = true;
+ }
+
+ if (!is_exposed)
+ dev_warn(&viortc->vdev->dev,
+ "cannot expose clock %d (type %d, variant %d) to userspace\n",
+ vio_clk_id, clock_type, leap_second_smearing);
+
+ return 0;
+}
+
+/**
+ * viortc_clocks_deinit() - unregister PHCs, stop RTC ops
+ * @viortc: device data
+ */
+static void viortc_clocks_deinit(struct viortc_dev *viortc)
+{
+ struct viortc_ptp_clock *vio_ptp;
+ unsigned int i;
+
+ for (i = 0; i < viortc->num_clocks; i++) {
+ vio_ptp = viortc->clocks_to_unregister[i];
+
+ if (!vio_ptp)
+ continue;
+
+ viortc->clocks_to_unregister[i] = NULL;
+
+ WARN_ON(viortc_ptp_unregister(vio_ptp, &viortc->vdev->dev));
+ }
+
+ if (viortc->viortc_class)
+ viortc_class_stop(viortc->viortc_class);
+}
+
+/**
+ * viortc_clocks_init() - init local representations of virtio_rtc clocks
+ * @viortc: device data
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_clocks_init(struct viortc_dev *viortc)
+{
+ u16 num_clocks;
+ unsigned int i;
+ int ret;
+
+ ret = viortc_cfg(viortc, &num_clocks);
+ if (ret)
+ return ret;
+
+ if (num_clocks < 1) {
+ dev_err(&viortc->vdev->dev, "device reported 0 clocks\n");
+ return -ENODEV;
+ }
+
+ viortc->num_clocks = num_clocks;
+
+ viortc->clocks_to_unregister =
+ devm_kcalloc(&viortc->vdev->dev, num_clocks,
+ sizeof(*viortc->clocks_to_unregister), GFP_KERNEL);
+ if (!viortc->clocks_to_unregister)
+ return -ENOMEM;
+
+ for (i = 0; i < num_clocks; i++) {
+ ret = viortc_init_clock(viortc, i);
+ if (ret)
+ goto err_deinit_clocks;
+ }
+
+ return 0;
+
+err_deinit_clocks:
+ viortc_clocks_deinit(viortc);
+
+ return ret;
+}
+
+/**
+ * viortc_populate_vq() - populate alarmq with device-writable buffers
+ * @viortc: device data
+ * @viortc_vq: device specific data for virtqueue
+ * @buf_cap: device-writable buffer size in bytes
+ * @lock: lock queue during accesses
+ *
+ * Populates the alarmq with pre-allocated buffers.
+ *
+ * The caller is responsible for kicking the device.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_populate_vq(struct viortc_dev *viortc,
+ struct viortc_vq *viortc_vq, u32 buf_cap,
+ bool lock)
+{
+ unsigned int num_elems, i;
+ struct virtqueue *vq;
+ unsigned long flags;
+ void *buf;
+ int ret;
+
+ num_elems = viortc->num_alarmq_bufs;
+ vq = viortc_vq->vq;
+
+ for (i = 0; i < num_elems; i++) {
+ buf = viortc->alarmq_bufs[i];
+
+ if (lock) {
+ spin_lock_irqsave(&viortc_vq->lock, flags);
+
+ ret = viortc_feed_vq(viortc, vq, buf, buf_cap, buf);
+
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+ } else {
+ ret = viortc_feed_vq(viortc, vq, buf, buf_cap, buf);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * viortc_alloc_vq_bufs() - allocate alarmq buffers
+ * @viortc: device data
+ * @num_elems: # of buffers
+ * @buf_cap: per-buffer device-writable bytes
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_alloc_vq_bufs(struct viortc_dev *viortc,
+ unsigned int num_elems, u32 buf_cap)
+{
+ struct device *dev = &viortc->vdev->dev;
+ void **buf_list;
+ unsigned int i;
+ void *buf;
+
+ buf_list = devm_kcalloc(dev, num_elems, sizeof(*buf_list), GFP_KERNEL);
+ if (!buf_list)
+ return -ENOMEM;
+
+ viortc->alarmq_bufs = buf_list;
+ viortc->num_alarmq_bufs = num_elems;
+
+ for (i = 0; i < num_elems; i++) {
+ buf = devm_kzalloc(dev, buf_cap, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf_list[i] = buf;
+ }
+
+ return 0;
+}
+
+/**
+ * viortc_init_vqs() - init virtqueues
+ * @viortc: device data
+ *
+ * Inits virtqueues and associated data.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_init_vqs(struct viortc_dev *viortc)
+{
+ struct virtqueue *vqs[VIORTC_MAX_NR_QUEUES];
+ struct virtqueue_info vqs_info[] = {
+ { "requestq", viortc_cb_requestq },
+ { "alarmq", viortc_cb_alarmq },
+ };
+ struct virtio_device *vdev = viortc->vdev;
+ unsigned int num_elems;
+ int nr_queues, ret;
+ bool have_alarms;
+
+ have_alarms = viortc_alarms_supported(vdev);
+
+ if (have_alarms)
+ nr_queues = VIORTC_ALARMQ + 1;
+ else
+ nr_queues = VIORTC_REQUESTQ + 1;
+
+ ret = virtio_find_vqs(vdev, nr_queues, vqs, vqs_info, NULL);
+ if (ret)
+ return ret;
+
+ viortc->vqs[VIORTC_REQUESTQ].vq = vqs[VIORTC_REQUESTQ];
+ spin_lock_init(&viortc->vqs[VIORTC_REQUESTQ].lock);
+
+ if (have_alarms) {
+ viortc->vqs[VIORTC_ALARMQ].vq = vqs[VIORTC_ALARMQ];
+ spin_lock_init(&viortc->vqs[VIORTC_ALARMQ].lock);
+
+ num_elems = virtqueue_get_vring_size(vqs[VIORTC_ALARMQ]);
+ if (num_elems == 0)
+ return -ENOSPC;
+
+ if (!viortc->alarmq_bufs) {
+ ret = viortc_alloc_vq_bufs(viortc, num_elems,
+ VIORTC_ALARMQ_BUF_CAP);
+ if (ret)
+ return ret;
+ } else {
+ viortc->num_alarmq_bufs =
+ min(num_elems, viortc->num_alarmq_bufs);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * viortc_probe() - probe a virtio_rtc virtio device
+ * @vdev: virtio device
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_probe(struct virtio_device *vdev)
+{
+ struct viortc_vq *alarm_viortc_vq;
+ struct virtqueue *alarm_vq;
+ struct viortc_dev *viortc;
+ unsigned long flags;
+ bool notify;
+ int ret;
+
+ viortc = devm_kzalloc(&vdev->dev, sizeof(*viortc), GFP_KERNEL);
+ if (!viortc)
+ return -ENOMEM;
+
+ vdev->priv = viortc;
+ viortc->vdev = vdev;
+
+ ret = viortc_init_vqs(viortc);
+ if (ret)
+ return ret;
+
+ virtio_device_ready(vdev);
+
+ ret = viortc_clocks_init(viortc);
+ if (ret)
+ goto err_reset_vdev;
+
+ if (viortc_alarms_supported(vdev)) {
+ alarm_viortc_vq = &viortc->vqs[VIORTC_ALARMQ];
+ alarm_vq = alarm_viortc_vq->vq;
+
+ ret = viortc_populate_vq(viortc, alarm_viortc_vq,
+ VIORTC_ALARMQ_BUF_CAP, true);
+ if (ret)
+ goto err_deinit_clocks;
+
+ spin_lock_irqsave(&alarm_viortc_vq->lock, flags);
+ notify = virtqueue_kick_prepare(alarm_vq);
+ spin_unlock_irqrestore(&alarm_viortc_vq->lock, flags);
+
+ if (notify && !virtqueue_notify(alarm_vq)) {
+ ret = -EIO;
+ goto err_deinit_clocks;
+ }
+ }
+
+ return 0;
+
+err_deinit_clocks:
+ viortc_clocks_deinit(viortc);
+
+err_reset_vdev:
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+
+ return ret;
+}
+
+/**
+ * viortc_remove() - remove a virtio_rtc virtio device
+ * @vdev: virtio device
+ */
+static void viortc_remove(struct virtio_device *vdev)
+{
+ struct viortc_dev *viortc = vdev->priv;
+
+ viortc_clocks_deinit(viortc);
+
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+}
+
+static int viortc_freeze(struct virtio_device *dev)
+{
+ /*
+ * Do not reset the device, so that the device may still wake up the
+ * system through an alarmq notification.
+ */
+
+ return 0;
+}
+
+static int viortc_restore(struct virtio_device *dev)
+{
+ struct viortc_dev *viortc = dev->priv;
+ struct viortc_vq *alarm_viortc_vq;
+ struct virtqueue *alarm_vq;
+ bool notify = false;
+ int ret;
+
+ ret = viortc_init_vqs(viortc);
+ if (ret)
+ return ret;
+
+ alarm_viortc_vq = &viortc->vqs[VIORTC_ALARMQ];
+ alarm_vq = alarm_viortc_vq->vq;
+
+ if (viortc_alarms_supported(dev)) {
+ ret = viortc_populate_vq(viortc, alarm_viortc_vq,
+ VIORTC_ALARMQ_BUF_CAP, false);
+ if (ret)
+ return ret;
+
+ notify = virtqueue_kick_prepare(alarm_vq);
+ }
+
+ virtio_device_ready(dev);
+
+ if (notify && !virtqueue_notify(alarm_vq))
+ ret = -EIO;
+
+ return ret;
+}
+
+static unsigned int features[] = {
+#if IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS)
+ VIRTIO_RTC_F_ALARM,
+#endif
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_CLOCK, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_rtc_drv = {
+ .driver.name = KBUILD_MODNAME,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = viortc_probe,
+ .remove = viortc_remove,
+ .freeze = pm_sleep_ptr(viortc_freeze),
+ .restore = pm_sleep_ptr(viortc_restore),
+};
+
+module_virtio_driver(virtio_rtc_drv);
+
+MODULE_DESCRIPTION("Virtio RTC driver");
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_rtc_internal.h b/drivers/virtio/virtio_rtc_internal.h
new file mode 100644
index 000000000000..296afee6719b
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_internal.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * virtio_rtc internal interfaces
+ *
+ * Copyright (C) 2022-2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _VIRTIO_RTC_INTERNAL_H_
+#define _VIRTIO_RTC_INTERNAL_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+
+/* driver core IFs */
+
+struct viortc_dev;
+
+int viortc_read(struct viortc_dev *viortc, u16 vio_clk_id, u64 *reading);
+int viortc_read_cross(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ u64 *reading, u64 *cycles);
+int viortc_cross_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ bool *supported);
+int viortc_read_alarm(struct viortc_dev *viortc, u16 vio_clk_id,
+ u64 *alarm_time, bool *enabled);
+int viortc_set_alarm(struct viortc_dev *viortc, u16 vio_clk_id, u64 alarm_time,
+ bool alarm_enable);
+int viortc_set_alarm_enabled(struct viortc_dev *viortc, u16 vio_clk_id,
+ bool alarm_enable);
+
+struct viortc_class;
+
+struct viortc_class *viortc_class_from_dev(struct device *dev);
+
+/* PTP IFs */
+
+struct viortc_ptp_clock;
+
+#if IS_ENABLED(CONFIG_VIRTIO_RTC_PTP)
+
+struct viortc_ptp_clock *viortc_ptp_register(struct viortc_dev *viortc,
+ struct device *parent_dev,
+ u16 vio_clk_id,
+ const char *ptp_clock_name);
+int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
+ struct device *parent_dev);
+
+#else
+
+static inline struct viortc_ptp_clock *
+viortc_ptp_register(struct viortc_dev *viortc, struct device *parent_dev,
+ u16 vio_clk_id, const char *ptp_clock_name)
+{
+ return NULL;
+}
+
+static inline int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
+ struct device *parent_dev)
+{
+ return -ENODEV;
+}
+
+#endif
+
+/* HW counter IFs */
+
+/**
+ * viortc_hw_xtstamp_params() - get HW-specific xtstamp params
+ * @hw_counter: virtio_rtc HW counter type
+ * @cs_id: clocksource id corresponding to hw_counter
+ *
+ * Gets the HW-specific xtstamp params. Returns an error if the driver cannot
+ * support xtstamp.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id);
+
+/* RTC class IFs */
+
+#if IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS)
+
+void viortc_class_alarm(struct viortc_class *viortc_class, u16 vio_clk_id);
+
+void viortc_class_stop(struct viortc_class *viortc_class);
+
+int viortc_class_register(struct viortc_class *viortc_class);
+
+struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
+ u16 vio_clk_id, bool have_alarm,
+ struct device *parent_dev);
+
+#else /* CONFIG_VIRTIO_RTC_CLASS */
+
+static inline void viortc_class_alarm(struct viortc_class *viortc_class,
+ u16 vio_clk_id)
+{
+}
+
+static inline void viortc_class_stop(struct viortc_class *viortc_class)
+{
+}
+
+static inline int viortc_class_register(struct viortc_class *viortc_class)
+{
+ return -ENODEV;
+}
+
+static inline struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
+ u16 vio_clk_id,
+ bool have_alarm,
+ struct device *parent_dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#endif /* CONFIG_VIRTIO_RTC_CLASS */
+
+#endif /* _VIRTIO_RTC_INTERNAL_H_ */
diff --git a/drivers/virtio/virtio_rtc_ptp.c b/drivers/virtio/virtio_rtc_ptp.c
new file mode 100644
index 000000000000..f84599950cd4
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_ptp.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Expose virtio_rtc clocks as PTP clocks.
+ *
+ * Copyright (C) 2022-2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Derived from ptp_kvm_common.c, virtual PTP 1588 clock for use with KVM
+ * guests.
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+/**
+ * struct viortc_ptp_clock - PTP clock abstraction
+ * @ptp_clock: PTP clock handle for unregistering
+ * @viortc: virtio_rtc device data
+ * @ptp_info: PTP clock description
+ * @vio_clk_id: virtio_rtc clock id
+ * @have_cross: device supports crosststamp with available HW counter
+ */
+struct viortc_ptp_clock {
+ struct ptp_clock *ptp_clock;
+ struct viortc_dev *viortc;
+ struct ptp_clock_info ptp_info;
+ u16 vio_clk_id;
+ bool have_cross;
+};
+
+/**
+ * struct viortc_ptp_cross_ctx - context for get_device_system_crosststamp()
+ * @device_time: device clock reading
+ * @system_counterval: HW counter value at device_time
+ *
+ * Provides the already obtained crosststamp to get_device_system_crosststamp().
+ */
+struct viortc_ptp_cross_ctx {
+ ktime_t device_time;
+ struct system_counterval_t system_counterval;
+};
+
+/* Weak function in case get_device_system_crosststamp() is not supported */
+int __weak viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * viortc_ptp_get_time_fn() - callback for get_device_system_crosststamp()
+ * @device_time: device clock reading
+ * @system_counterval: HW counter value at device_time
+ * @ctx: context with already obtained crosststamp
+ *
+ * Return: zero (success).
+ */
+static int viortc_ptp_get_time_fn(ktime_t *device_time,
+ struct system_counterval_t *system_counterval,
+ void *ctx)
+{
+ struct viortc_ptp_cross_ctx *vio_ctx = ctx;
+
+ *device_time = vio_ctx->device_time;
+ *system_counterval = vio_ctx->system_counterval;
+
+ return 0;
+}
+
+/**
+ * viortc_ptp_do_xtstamp() - get crosststamp from device
+ * @vio_ptp: virtio_rtc PTP clock
+ * @hw_counter: virtio_rtc HW counter type
+ * @cs_id: clocksource id corresponding to hw_counter
+ * @ctx: context for get_device_system_crosststamp()
+ *
+ * Reads HW-specific crosststamp from device.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_ptp_do_xtstamp(struct viortc_ptp_clock *vio_ptp,
+ u8 hw_counter, enum clocksource_ids cs_id,
+ struct viortc_ptp_cross_ctx *ctx)
+{
+ u64 max_ns, ns;
+ int ret;
+
+ ctx->system_counterval.cs_id = cs_id;
+
+ ret = viortc_read_cross(vio_ptp->viortc, vio_ptp->vio_clk_id,
+ hw_counter, &ns,
+ &ctx->system_counterval.cycles);
+ if (ret)
+ return ret;
+
+ max_ns = (u64)ktime_to_ns(KTIME_MAX);
+ if (ns > max_ns)
+ return -EINVAL;
+
+ ctx->device_time = ns_to_ktime(ns);
+
+ return 0;
+}
+
+/*
+ * PTP clock operations
+ */
+
+/**
+ * viortc_ptp_getcrosststamp() - PTP clock getcrosststamp op
+ * @ptp: PTP clock info
+ * @xtstamp: crosststamp
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct viortc_ptp_clock *vio_ptp =
+ container_of(ptp, struct viortc_ptp_clock, ptp_info);
+ struct system_time_snapshot history_begin;
+ struct viortc_ptp_cross_ctx ctx;
+ enum clocksource_ids cs_id;
+ u8 hw_counter;
+ int ret;
+
+ if (!vio_ptp->have_cross)
+ return -EOPNOTSUPP;
+
+ ret = viortc_hw_xtstamp_params(&hw_counter, &cs_id);
+ if (ret)
+ return ret;
+
+ ktime_get_snapshot(&history_begin);
+ if (history_begin.cs_id != cs_id)
+ return -EOPNOTSUPP;
+
+ /*
+ * Getting the timestamp can take many milliseconds with a slow Virtio
+ * device. This is too long for viortc_ptp_get_time_fn() passed to
+ * get_device_system_crosststamp(), which has to usually return before
+ * the timekeeper seqcount increases (every tick or so).
+ *
+ * So, get the actual cross-timestamp first.
+ */
+ ret = viortc_ptp_do_xtstamp(vio_ptp, hw_counter, cs_id, &ctx);
+ if (ret)
+ return ret;
+
+ ret = get_device_system_crosststamp(viortc_ptp_get_time_fn, &ctx,
+ &history_begin, xtstamp);
+ if (ret)
+ pr_debug("%s: get_device_system_crosststamp() returned %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/* viortc_ptp_adjfine() - unsupported PTP clock adjfine op */
+static int viortc_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+/* viortc_ptp_adjtime() - unsupported PTP clock adjtime op */
+static int viortc_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+/* viortc_ptp_settime64() - unsupported PTP clock settime64 op */
+static int viortc_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+/*
+ * viortc_ptp_gettimex64() - PTP clock gettimex64 op
+ *
+ * Context: Process context.
+ */
+static int viortc_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct viortc_ptp_clock *vio_ptp =
+ container_of(ptp, struct viortc_ptp_clock, ptp_info);
+ int ret;
+ u64 ns;
+
+ ptp_read_system_prets(sts);
+ ret = viortc_read(vio_ptp->viortc, vio_ptp->vio_clk_id, &ns);
+ ptp_read_system_postts(sts);
+
+ if (ret)
+ return ret;
+
+ if (ns > (u64)S64_MAX)
+ return -EINVAL;
+
+ *ts = ns_to_timespec64((s64)ns);
+
+ return 0;
+}
+
+/* viortc_ptp_enable() - unsupported PTP clock enable op */
+static int viortc_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/*
+ * viortc_ptp_info_template - ptp_clock_info template
+ *
+ * The .name member will be set for individual virtio_rtc PTP clocks.
+ *
+ * The .getcrosststamp member will be cleared for PTP clocks not supporting
+ * crosststamp.
+ */
+static const struct ptp_clock_info viortc_ptp_info_template = {
+ .owner = THIS_MODULE,
+ /* .name is set according to clock type */
+ .adjfine = viortc_ptp_adjfine,
+ .adjtime = viortc_ptp_adjtime,
+ .gettimex64 = viortc_ptp_gettimex64,
+ .settime64 = viortc_ptp_settime64,
+ .enable = viortc_ptp_enable,
+ .getcrosststamp = viortc_ptp_getcrosststamp,
+};
+
+/**
+ * viortc_ptp_unregister() - PTP clock unregistering wrapper
+ * @vio_ptp: virtio_rtc PTP clock
+ * @parent_dev: parent device of PTP clock
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
+ struct device *parent_dev)
+{
+ int ret = ptp_clock_unregister(vio_ptp->ptp_clock);
+
+ if (!ret)
+ devm_kfree(parent_dev, vio_ptp);
+
+ return ret;
+}
+
+/**
+ * viortc_ptp_get_cross_cap() - get xtstamp support info from device
+ * @viortc: virtio_rtc device data
+ * @vio_ptp: virtio_rtc PTP clock abstraction
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_ptp_get_cross_cap(struct viortc_dev *viortc,
+ struct viortc_ptp_clock *vio_ptp)
+{
+ enum clocksource_ids cs_id;
+ bool xtstamp_supported;
+ u8 hw_counter;
+ int ret;
+
+ ret = viortc_hw_xtstamp_params(&hw_counter, &cs_id);
+ if (ret) {
+ vio_ptp->have_cross = false;
+ return 0;
+ }
+
+ ret = viortc_cross_cap(viortc, vio_ptp->vio_clk_id, hw_counter,
+ &xtstamp_supported);
+ if (ret)
+ return ret;
+
+ vio_ptp->have_cross = xtstamp_supported;
+
+ return 0;
+}
+
+/**
+ * viortc_ptp_register() - prepare and register PTP clock
+ * @viortc: virtio_rtc device data
+ * @parent_dev: parent device for PTP clock
+ * @vio_clk_id: id of virtio_rtc clock which backs PTP clock
+ * @ptp_clock_name: PTP clock name
+ *
+ * Context: Process context.
+ * Return: Pointer on success, ERR_PTR() otherwise; NULL if PTP clock support
+ * not available.
+ */
+struct viortc_ptp_clock *viortc_ptp_register(struct viortc_dev *viortc,
+ struct device *parent_dev,
+ u16 vio_clk_id,
+ const char *ptp_clock_name)
+{
+ struct viortc_ptp_clock *vio_ptp;
+ struct ptp_clock *ptp_clock;
+ ssize_t len;
+ int ret;
+
+ vio_ptp = devm_kzalloc(parent_dev, sizeof(*vio_ptp), GFP_KERNEL);
+ if (!vio_ptp)
+ return ERR_PTR(-ENOMEM);
+
+ vio_ptp->viortc = viortc;
+ vio_ptp->vio_clk_id = vio_clk_id;
+ vio_ptp->ptp_info = viortc_ptp_info_template;
+ len = strscpy(vio_ptp->ptp_info.name, ptp_clock_name,
+ sizeof(vio_ptp->ptp_info.name));
+ if (len < 0) {
+ ret = len;
+ goto err_free_dev;
+ }
+
+ ret = viortc_ptp_get_cross_cap(viortc, vio_ptp);
+ if (ret)
+ goto err_free_dev;
+
+ if (!vio_ptp->have_cross)
+ vio_ptp->ptp_info.getcrosststamp = NULL;
+
+ ptp_clock = ptp_clock_register(&vio_ptp->ptp_info, parent_dev);
+ if (IS_ERR(ptp_clock))
+ goto err_on_register;
+
+ vio_ptp->ptp_clock = ptp_clock;
+
+ return vio_ptp;
+
+err_on_register:
+ ret = PTR_ERR(ptp_clock);
+
+err_free_dev:
+ devm_kfree(parent_dev, vio_ptp);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index e803db0da307..1f60c9d5cb18 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -358,35 +358,33 @@ create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- const bool *ctx,
+ struct virtqueue_info vqs_info[],
struct irq_affinity *desc)
{
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
- struct irq_affinity default_affd = { 0 };
struct cpumask *masks;
struct vdpa_callback cb;
bool has_affinity = desc && ops->set_vq_affinity;
int i, err, queue_idx = 0;
if (has_affinity) {
- masks = create_affinity_masks(nvqs, desc ? desc : &default_affd);
+ masks = create_affinity_masks(nvqs, desc);
if (!masks)
return -ENOMEM;
}
for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
+ struct virtqueue_info *vqi = &vqs_info[i];
+
+ if (!vqi->name) {
vqs[i] = NULL;
continue;
}
- vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
- callbacks[i], names[i], ctx ?
- ctx[i] : false);
+ vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto err_setup_vq;