summaryrefslogtreecommitdiff
path: root/drivers/pci/controller/pci-hyperv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller/pci-hyperv.c')
-rw-r--r--drivers/pci/controller/pci-hyperv.c2481
1 files changed, 1968 insertions, 513 deletions
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 9ba4d12c179c..1e237d3538f9 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -40,15 +40,18 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/delay.h>
#include <linux/semaphore.h>
-#include <linux/irqdomain.h>
-#include <asm/irqdomain.h>
-#include <asm/apic.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/msi.h>
#include <linux/hyperv.h>
#include <linux/refcount.h>
+#include <linux/irqdomain.h>
+#include <linux/acpi.h>
+#include <linux/sizes.h>
+#include <linux/of_irq.h>
#include <asm/mshyperv.h>
/*
@@ -63,6 +66,8 @@
enum pci_protocol_version_t {
PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
+ PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
+ PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */
};
#define CPU_AFFINITY_ALL -1ULL
@@ -72,15 +77,12 @@ enum pci_protocol_version_t {
* first.
*/
static enum pci_protocol_version_t pci_protocol_versions[] = {
+ PCI_PROTOCOL_VERSION_1_4,
+ PCI_PROTOCOL_VERSION_1_3,
PCI_PROTOCOL_VERSION_1_2,
PCI_PROTOCOL_VERSION_1_1,
};
-/*
- * Protocol version negotiated by hv_pci_protocol_negotiation().
- */
-static enum pci_protocol_version_t pci_protocol_version;
-
#define PCI_CONFIG_MMIO_LENGTH 0x2000
#define CFG_PAGE_OFFSET 0x1000
#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -93,6 +95,13 @@ static enum pci_protocol_version_t pci_protocol_version;
#define SLOT_NAME_SIZE 11
/*
+ * Size of requestor for VMbus; the value is based on the observation
+ * that having more than one request outstanding is 'rare', and so 64
+ * should be generous in ensuring that we don't ever run out.
+ */
+#define HV_PCI_RQSTOR_SIZE 64
+
+/*
* Message Types
*/
@@ -124,6 +133,9 @@ enum pci_message_type {
PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
+ PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
+ PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A,
+ PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B,
PCI_MESSAGE_MAXIMUM
};
@@ -169,6 +181,26 @@ struct pci_function_description {
u32 ser; /* serial number */
} __packed;
+enum pci_device_description_flags {
+ HV_PCI_DEVICE_FLAG_NONE = 0x0,
+ HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1,
+};
+
+struct pci_function_description2 {
+ u16 v_id; /* vendor ID */
+ u16 d_id; /* device ID */
+ u8 rev;
+ u8 prog_intf;
+ u8 subclass;
+ u8 base_class;
+ u32 subsystem_id;
+ union win_slot_encoding win_slot;
+ u32 ser; /* serial number */
+ u32 flags;
+ u16 virtual_numa_node;
+ u16 reserved;
+} __packed;
+
/**
* struct hv_msi_desc
* @vector: IDT entry
@@ -217,6 +249,21 @@ struct hv_msi_desc2 {
u16 processor_array[32];
} __packed;
+/*
+ * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
+ * Everything is the same as in 'hv_msi_desc2' except that the size of the
+ * 'vector' field is larger to support bigger vector values. For ex: LPI
+ * vectors on ARM.
+ */
+struct hv_msi_desc3 {
+ u32 vector;
+ u8 delivery_mode;
+ u8 reserved;
+ u16 vector_count;
+ u16 processor_count;
+ u16 processor_array[32];
+} __packed;
+
/**
* struct tran_int_desc
* @reserved: unused, padding
@@ -264,8 +311,6 @@ struct pci_packet {
void (*completion_func)(void *context, struct pci_response *resp,
int resp_packet_size);
void *compl_ctxt;
-
- struct pci_message message[0];
};
/*
@@ -301,13 +346,19 @@ struct pci_bus_d0_entry {
struct pci_bus_relations {
struct pci_incoming_message incoming;
u32 device_count;
- struct pci_function_description func[0];
+ struct pci_function_description func[];
+} __packed;
+
+struct pci_bus_relations2 {
+ struct pci_incoming_message incoming;
+ u32 device_count;
+ struct pci_function_description2 func[];
} __packed;
struct pci_q_res_req_response {
struct vmpacket_descriptor hdr;
s32 status; /* negative values are failures */
- u32 probed_bar[6];
+ u32 probed_bar[PCI_STD_NUM_BARS];
} __packed;
struct pci_set_power {
@@ -359,69 +410,64 @@ struct pci_create_interrupt2 {
struct hv_msi_desc2 int_desc;
} __packed;
-struct pci_delete_interrupt {
+struct pci_create_interrupt3 {
struct pci_message message_type;
union win_slot_encoding wslot;
- struct tran_int_desc int_desc;
+ struct hv_msi_desc3 int_desc;
} __packed;
-struct pci_dev_incoming {
- struct pci_incoming_message incoming;
+struct pci_delete_interrupt {
+ struct pci_message message_type;
union win_slot_encoding wslot;
+ struct tran_int_desc int_desc;
} __packed;
-struct pci_eject_response {
+/*
+ * Note: the VM must pass a valid block id, wslot and bytes_requested.
+ */
+struct pci_read_block {
struct pci_message message_type;
+ u32 block_id;
union win_slot_encoding wslot;
- u32 status;
+ u32 bytes_requested;
} __packed;
-static int pci_ring_size = (4 * PAGE_SIZE);
+struct pci_read_block_response {
+ struct vmpacket_descriptor hdr;
+ u32 status;
+ u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
+} __packed;
/*
- * Definitions or interrupt steering hypercall.
+ * Note: the VM must pass a valid block id, wslot and byte_count.
*/
-#define HV_PARTITION_ID_SELF ((u64)-1)
-#define HVCALL_RETARGET_INTERRUPT 0x7e
-
-struct hv_interrupt_entry {
- u32 source; /* 1 for MSI(-X) */
- u32 reserved1;
- u32 address;
- u32 data;
-};
-
-#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */
-
-struct hv_vp_set {
- u64 format; /* 0 (HvGenericSetSparse4k) */
- u64 valid_banks;
- u64 masks[HV_VP_SET_BANK_COUNT_MAX];
-};
+struct pci_write_block {
+ struct pci_message message_type;
+ u32 block_id;
+ union win_slot_encoding wslot;
+ u32 byte_count;
+ u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
+} __packed;
-/*
- * flags for hv_device_interrupt_target.flags
- */
-#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
-#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
+struct pci_dev_inval_block {
+ struct pci_incoming_message incoming;
+ union win_slot_encoding wslot;
+ u64 block_mask;
+} __packed;
-struct hv_device_interrupt_target {
- u32 vector;
- u32 flags;
- union {
- u64 vp_mask;
- struct hv_vp_set vp_set;
- };
-};
+struct pci_dev_incoming {
+ struct pci_incoming_message incoming;
+ union win_slot_encoding wslot;
+} __packed;
-struct retarget_msi_interrupt {
- u64 partition_id; /* use "self" */
- u64 device_id;
- struct hv_interrupt_entry int_entry;
- u64 reserved2;
- struct hv_device_interrupt_target int_target;
+struct pci_eject_response {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ u32 status;
} __packed;
+static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
+
/*
* Driver specific state.
*/
@@ -430,14 +476,24 @@ enum hv_pcibus_state {
hv_pcibus_init = 0,
hv_pcibus_probed,
hv_pcibus_installed,
- hv_pcibus_removed,
+ hv_pcibus_removing,
hv_pcibus_maximum
};
struct hv_pcibus_device {
+#ifdef CONFIG_X86
struct pci_sysdata sysdata;
+#elif defined(CONFIG_ARM64)
+ struct pci_config_window sysdata;
+#endif
+ struct pci_host_bridge *bridge;
+ struct fwnode_handle *fwnode;
+ /* Protocol version negotiated with the host */
+ enum pci_protocol_version_t protocol_version;
+
+ struct mutex state_lock;
enum hv_pcibus_state state;
- refcount_t remove_lock;
+
struct hv_device *hdev;
resource_size_t low_mmio_space;
resource_size_t high_mmio_space;
@@ -445,27 +501,21 @@ struct hv_pcibus_device {
struct resource *low_mmio_res;
struct resource *high_mmio_res;
struct completion *survey_event;
- struct completion remove_event;
struct pci_bus *pci_bus;
spinlock_t config_lock; /* Avoid two threads writing index page */
spinlock_t device_list_lock; /* Protect lists below */
void __iomem *cfg_addr;
- struct list_head resources_for_children;
-
struct list_head children;
struct list_head dr_list;
- struct msi_domain_info msi_info;
- struct msi_controller msi_chip;
struct irq_domain *irq_domain;
- /* hypercall arg, must not cross page boundary */
- struct retarget_msi_interrupt retarget_msi_interrupt_params;
-
- spinlock_t retarget_msi_interrupt_lock;
-
struct workqueue_struct *wq;
+
+ /* Highest slot of child device with resources allocated */
+ int wslot_res_allocated;
+ bool use_calls; /* Use hypercalls to access mmio cfg space */
};
/*
@@ -478,36 +528,44 @@ struct hv_dr_work {
struct hv_pcibus_device *bus;
};
+struct hv_pcidev_description {
+ u16 v_id; /* vendor ID */
+ u16 d_id; /* device ID */
+ u8 rev;
+ u8 prog_intf;
+ u8 subclass;
+ u8 base_class;
+ u32 subsystem_id;
+ union win_slot_encoding win_slot;
+ u32 ser; /* serial number */
+ u32 flags;
+ u16 virtual_numa_node;
+};
+
struct hv_dr_state {
struct list_head list_entry;
u32 device_count;
- struct pci_function_description func[0];
-};
-
-enum hv_pcichild_state {
- hv_pcichild_init = 0,
- hv_pcichild_requirements,
- hv_pcichild_resourced,
- hv_pcichild_ejecting,
- hv_pcichild_maximum
+ struct hv_pcidev_description func[] __counted_by(device_count);
};
struct hv_pci_dev {
/* List protected by pci_rescan_remove_lock */
struct list_head list_entry;
refcount_t refs;
- enum hv_pcichild_state state;
struct pci_slot *pci_slot;
- struct pci_function_description desc;
+ struct hv_pcidev_description desc;
bool reported_missing;
struct hv_pcibus_device *hbus;
struct work_struct wrk;
+ void (*block_invalidate)(void *context, u64 block_mask);
+ void *invalidate_context;
+
/*
* What would be observed if one wrote 0xFFFFFFFF to a BAR and then
* read it back, for each of the BAR offsets within config space.
*/
- u32 probed_bar[6];
+ u32 probed_bar[PCI_STD_NUM_BARS];
};
struct hv_pci_compl {
@@ -517,6 +575,437 @@ struct hv_pci_compl {
static void hv_pci_onchannelcallback(void *context);
+#ifdef CONFIG_X86
+#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
+#define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_ACK
+
+static int hv_pci_irqchip_init(void)
+{
+ return 0;
+}
+
+static struct irq_domain *hv_pci_get_root_domain(void)
+{
+ return x86_vector_domain;
+}
+
+static unsigned int hv_msi_get_int_vector(struct irq_data *data)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+
+ return cfg->vector;
+}
+
+#define hv_msi_prepare pci_msi_prepare
+
+/**
+ * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current
+ * affinity.
+ * @data: Describes the IRQ
+ *
+ * Build new a destination for the MSI and make a hypercall to
+ * update the Interrupt Redirection Table. "Device Logical ID"
+ * is built out of this PCI bus's instance GUID and the function
+ * number of the device.
+ */
+static void hv_irq_retarget_interrupt(struct irq_data *data)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
+ struct hv_retarget_device_interrupt *params;
+ struct tran_int_desc *int_desc;
+ struct hv_pcibus_device *hbus;
+ const struct cpumask *dest;
+ cpumask_var_t tmp;
+ struct pci_bus *pbus;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ u32 var_size = 0;
+ int cpu, nr_bank;
+ u64 res;
+
+ dest = irq_data_get_effective_affinity_mask(data);
+ pdev = msi_desc_to_pci_dev(msi_desc);
+ pbus = pdev->bus;
+ hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ int_desc = data->chip_data;
+ if (!int_desc) {
+ dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
+ __func__, data->irq);
+ return;
+ }
+
+ local_irq_save(flags);
+
+ params = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(params, 0, sizeof(*params));
+ params->partition_id = HV_PARTITION_ID_SELF;
+ params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
+ params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
+ params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
+ params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
+ (hbus->hdev->dev_instance.b[4] << 16) |
+ (hbus->hdev->dev_instance.b[7] << 8) |
+ (hbus->hdev->dev_instance.b[6] & 0xf8) |
+ PCI_FUNC(pdev->devfn);
+ params->int_target.vector = hv_msi_get_int_vector(data);
+
+ if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+ /*
+ * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
+ * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
+ * with >64 VP support.
+ * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
+ * is not sufficient for this hypercall.
+ */
+ params->int_target.flags |=
+ HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
+
+ if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
+ res = 1;
+ goto out;
+ }
+
+ cpumask_and(tmp, dest, cpu_online_mask);
+ nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
+ free_cpumask_var(tmp);
+
+ if (nr_bank <= 0) {
+ res = 1;
+ goto out;
+ }
+
+ /*
+ * var-sized hypercall, var-size starts after vp_mask (thus
+ * vp_set.format does not count, but vp_set.valid_bank_mask
+ * does).
+ */
+ var_size = 1 + nr_bank;
+ } else {
+ for_each_cpu_and(cpu, dest, cpu_online_mask) {
+ params->int_target.vp_mask |=
+ (1ULL << hv_cpu_number_to_vp_number(cpu));
+ }
+ }
+
+ res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
+ params, NULL);
+
+out:
+ local_irq_restore(flags);
+
+ /*
+ * During hibernation, when a CPU is offlined, the kernel tries
+ * to move the interrupt to the remaining CPUs that haven't
+ * been offlined yet. In this case, the below hv_do_hypercall()
+ * always fails since the vmbus channel has been closed:
+ * refer to cpu_disable_common() -> fixup_irqs() ->
+ * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
+ *
+ * Suppress the error message for hibernation because the failure
+ * during hibernation does not matter (at this time all the devices
+ * have been frozen). Note: the correct affinity info is still updated
+ * into the irqdata data structure in migrate_one_irq() ->
+ * irq_do_set_affinity(), so later when the VM resumes,
+ * hv_pci_restore_msi_state() is able to correctly restore the
+ * interrupt with the correct affinity.
+ */
+ if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
+ dev_err(&hbus->hdev->device,
+ "%s() failed: %#llx", __func__, res);
+}
+
+static void hv_arch_irq_unmask(struct irq_data *data)
+{
+ if (hv_root_partition())
+ /*
+ * In case of the nested root partition, the nested hypervisor
+ * is taking care of interrupt remapping and thus the
+ * MAP_DEVICE_INTERRUPT hypercall is required instead of
+ * RETARGET_INTERRUPT.
+ */
+ (void)hv_map_msi_interrupt(data, NULL);
+ else
+ hv_irq_retarget_interrupt(data);
+}
+#elif defined(CONFIG_ARM64)
+/*
+ * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
+ * of room at the start to allow for SPIs to be specified through ACPI and
+ * starting with a power of two to satisfy power of 2 multi-MSI requirement.
+ */
+#define HV_PCI_MSI_SPI_START 64
+#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
+#define DELIVERY_MODE 0
+#define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_EOI
+#define hv_msi_prepare NULL
+
+struct hv_pci_chip_data {
+ DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
+ struct mutex map_lock;
+};
+
+/* Hyper-V vPCI MSI GIC IRQ domain */
+static struct irq_domain *hv_msi_gic_irq_domain;
+
+/* Hyper-V PCI MSI IRQ chip */
+static struct irq_chip hv_arm64_msi_irq_chip = {
+ .name = "MSI",
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent
+};
+
+static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
+{
+ return irqd->parent_data->hwirq;
+}
+
+/*
+ * @nr_bm_irqs: Indicates the number of IRQs that were allocated from
+ * the bitmap.
+ * @nr_dom_irqs: Indicates the number of IRQs that were allocated from
+ * the parent domain.
+ */
+static void hv_pci_vec_irq_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_bm_irqs,
+ unsigned int nr_dom_irqs)
+{
+ struct hv_pci_chip_data *chip_data = domain->host_data;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ int first = d->hwirq - HV_PCI_MSI_SPI_START;
+ int i;
+
+ mutex_lock(&chip_data->map_lock);
+ bitmap_release_region(chip_data->spi_map,
+ first,
+ get_count_order(nr_bm_irqs));
+ mutex_unlock(&chip_data->map_lock);
+ for (i = 0; i < nr_dom_irqs; i++) {
+ if (i)
+ d = irq_domain_get_irq_data(domain, virq + i);
+ irq_domain_reset_irq_data(d);
+ }
+
+ irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
+}
+
+static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
+}
+
+static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
+ unsigned int nr_irqs,
+ irq_hw_number_t *hwirq)
+{
+ struct hv_pci_chip_data *chip_data = domain->host_data;
+ int index;
+
+ /* Find and allocate region from the SPI bitmap */
+ mutex_lock(&chip_data->map_lock);
+ index = bitmap_find_free_region(chip_data->spi_map,
+ HV_PCI_MSI_SPI_NR,
+ get_count_order(nr_irqs));
+ mutex_unlock(&chip_data->map_lock);
+ if (index < 0)
+ return -ENOSPC;
+
+ *hwirq = index + HV_PCI_MSI_SPI_START;
+
+ return 0;
+}
+
+static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ if (is_of_node(fwspec.fwnode)) {
+ /* SPI lines for OF translations start at offset 32 */
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = hwirq - 32;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+ } else {
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ }
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ /*
+ * Since the interrupt specifier is not coming from ACPI or DT, the
+ * trigger type will need to be set explicitly. Otherwise, it will be
+ * set to whatever is in the GIC configuration.
+ */
+ d = irq_domain_get_irq_data(domain->parent, virq);
+
+ return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ irq_hw_number_t hwirq;
+ unsigned int i;
+ int ret;
+
+ ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
+ hwirq + i);
+ if (ret) {
+ hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
+ return ret;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i,
+ hwirq + i,
+ &hv_arm64_msi_irq_chip,
+ domain->host_data);
+ pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
+ }
+
+ return 0;
+}
+
+/*
+ * Pick the first cpu as the irq affinity that can be temporarily used for
+ * composing MSI from the hypervisor. GIC will eventually set the right
+ * affinity for the irq and the 'unmask' will retarget the interrupt to that
+ * cpu.
+ */
+static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *irqd, bool reserve)
+{
+ int cpu = cpumask_first(cpu_present_mask);
+
+ irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
+
+ return 0;
+}
+
+static const struct irq_domain_ops hv_pci_domain_ops = {
+ .alloc = hv_pci_vec_irq_domain_alloc,
+ .free = hv_pci_vec_irq_domain_free,
+ .activate = hv_pci_vec_irq_domain_activate,
+};
+
+#ifdef CONFIG_OF
+
+static struct irq_domain *hv_pci_of_irq_domain_parent(void)
+{
+ struct device_node *parent;
+ struct irq_domain *domain;
+
+ parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
+ if (!parent)
+ return NULL;
+ domain = irq_find_host(parent);
+ of_node_put(parent);
+
+ return domain;
+}
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
+{
+ acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
+
+ gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
+ if (!gsi_domain_disp_fn)
+ return NULL;
+ return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
+ DOMAIN_BUS_ANY);
+}
+
+#endif
+
+static int hv_pci_irqchip_init(void)
+{
+ static struct hv_pci_chip_data *chip_data;
+ struct fwnode_handle *fn = NULL;
+ struct irq_domain *irq_domain_parent = NULL;
+ int ret = -ENOMEM;
+
+ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+ if (!chip_data)
+ return ret;
+
+ mutex_init(&chip_data->map_lock);
+ fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
+ if (!fn)
+ goto free_chip;
+
+ /*
+ * IRQ domain once enabled, should not be removed since there is no
+ * way to ensure that all the corresponding devices are also gone and
+ * no interrupts will be generated.
+ */
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ irq_domain_parent = hv_pci_acpi_irq_domain_parent();
+#endif
+#ifdef CONFIG_OF
+ if (!irq_domain_parent)
+ irq_domain_parent = hv_pci_of_irq_domain_parent();
+#endif
+ if (!irq_domain_parent) {
+ WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
+ ret = -EINVAL;
+ goto free_chip;
+ }
+
+ hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
+ HV_PCI_MSI_SPI_NR,
+ fn, &hv_pci_domain_ops,
+ chip_data);
+
+ if (!hv_msi_gic_irq_domain) {
+ pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
+ goto free_chip;
+ }
+
+ return 0;
+
+free_chip:
+ kfree(chip_data);
+ if (fn)
+ irq_domain_free_fwnode(fn);
+
+ return ret;
+}
+
+static struct irq_domain *hv_pci_get_root_domain(void)
+{
+ return hv_msi_gic_irq_domain;
+}
+
+/*
+ * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
+ * registers which Hyper-V already supports, so no hypercall needed.
+ */
+static void hv_arch_irq_unmask(struct irq_data *data) { }
+#endif /* CONFIG_ARM64 */
+
/**
* hv_pci_generic_compl() - Invoked for a completion packet
* @context: Set up by the sender of the packet.
@@ -532,11 +1021,7 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp,
{
struct hv_pci_compl *comp_pkt = context;
- if (resp_packet_size >= offsetofend(struct pci_response, status))
- comp_pkt->completion_status = resp->status;
- else
- comp_pkt->completion_status = -1;
-
+ comp_pkt->completion_status = resp->status;
complete(&comp_pkt->host_event);
}
@@ -554,9 +1039,6 @@ static void put_pcichild(struct hv_pci_dev *hpdev)
kfree(hpdev);
}
-static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
-static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
-
/*
* There is no good way to get notified from vmbus_onoffer_rescind(),
* so let's use polling here, since this is not a hot path.
@@ -612,6 +1094,70 @@ static int wslot_to_devfn(u32 wslot)
return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
}
+static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
+{
+ struct hv_mmio_read_input *in;
+ struct hv_mmio_read_output *out;
+ u64 ret;
+
+ /*
+ * Must be called with interrupts disabled so it is safe
+ * to use the per-cpu input argument page. Use it for
+ * both input and output.
+ */
+ in = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
+ in->gpa = gpa;
+ in->size = size;
+
+ ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
+ if (hv_result_success(ret)) {
+ switch (size) {
+ case 1:
+ *val = *(u8 *)(out->data);
+ break;
+ case 2:
+ *val = *(u16 *)(out->data);
+ break;
+ default:
+ *val = *(u32 *)(out->data);
+ break;
+ }
+ } else
+ dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
+ ret, gpa, size);
+}
+
+static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
+{
+ struct hv_mmio_write_input *in;
+ u64 ret;
+
+ /*
+ * Must be called with interrupts disabled so it is safe
+ * to use the per-cpu input argument memory.
+ */
+ in = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ in->gpa = gpa;
+ in->size = size;
+ switch (size) {
+ case 1:
+ *(u8 *)(in->data) = val;
+ break;
+ case 2:
+ *(u16 *)(in->data) = val;
+ break;
+ default:
+ *(u32 *)(in->data) = val;
+ break;
+ }
+
+ ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
+ if (!hv_result_success(ret))
+ dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
+ ret, gpa, size);
+}
+
/*
* PCI Configuration Space for these root PCI buses is implemented as a pair
* of pages in memory-mapped I/O space. Writing to the first page chooses
@@ -630,8 +1176,10 @@ static int wslot_to_devfn(u32 wslot)
static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
int size, u32 *val)
{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct device *dev = &hbus->hdev->device;
+ int offset = where + CFG_PAGE_OFFSET;
unsigned long flags;
- void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
/*
* If the attempt is to read the IDs or the ROM BAR, simulate that.
@@ -650,8 +1198,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
PCI_CAPABILITY_LIST) {
/* ROM BARs are unimplemented */
*val = 0;
- } else if (where >= PCI_INTERRUPT_LINE && where + size <=
- PCI_INTERRUPT_PIN) {
+ } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
+ (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
/*
* Interrupt Line and Interrupt PIN are hard-wired to zero
* because this front-end only supports message-signaled
@@ -659,56 +1207,79 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
*/
*val = 0;
} else if (where + size <= CFG_PAGE_SIZE) {
- spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
- /* Choose the function to be read. (See comment above) */
- writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
- /* Make sure the function was chosen before we start reading. */
- mb();
- /* Read from that function's config space. */
- switch (size) {
- case 1:
- *val = readb(addr);
- break;
- case 2:
- *val = readw(addr);
- break;
- default:
- *val = readl(addr);
- break;
+
+ spin_lock_irqsave(&hbus->config_lock, flags);
+ if (hbus->use_calls) {
+ phys_addr_t addr = hbus->mem_config->start + offset;
+
+ hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
+ hpdev->desc.win_slot.slot);
+ hv_pci_read_mmio(dev, addr, size, val);
+ } else {
+ void __iomem *addr = hbus->cfg_addr + offset;
+
+ /* Choose the function to be read. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
+ /* Make sure the function was chosen before reading. */
+ mb();
+ /* Read from that function's config space. */
+ switch (size) {
+ case 1:
+ *val = readb(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ default:
+ *val = readl(addr);
+ break;
+ }
+ /*
+ * Make sure the read was done before we release the
+ * spinlock allowing consecutive reads/writes.
+ */
+ mb();
}
- /*
- * Make sure the read was done before we release the spinlock
- * allowing consecutive reads/writes.
- */
- mb();
- spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+ spin_unlock_irqrestore(&hbus->config_lock, flags);
} else {
- dev_err(&hpdev->hbus->hdev->device,
- "Attempt to read beyond a function's config space.\n");
+ dev_err(dev, "Attempt to read beyond a function's config space.\n");
}
}
static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct device *dev = &hbus->hdev->device;
+ u32 val;
u16 ret;
unsigned long flags;
- void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
- PCI_VENDOR_ID;
- spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
+ spin_lock_irqsave(&hbus->config_lock, flags);
- /* Choose the function to be read. (See comment above) */
- writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
- /* Make sure the function was chosen before we start reading. */
- mb();
- /* Read from that function's config space. */
- ret = readw(addr);
- /*
- * mb() is not required here, because the spin_unlock_irqrestore()
- * is a barrier.
- */
+ if (hbus->use_calls) {
+ phys_addr_t addr = hbus->mem_config->start +
+ CFG_PAGE_OFFSET + PCI_VENDOR_ID;
- spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+ hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
+ hpdev->desc.win_slot.slot);
+ hv_pci_read_mmio(dev, addr, 2, &val);
+ ret = val; /* Truncates to 16 bits */
+ } else {
+ void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
+ PCI_VENDOR_ID;
+ /* Choose the function to be read. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
+ /* Make sure the function was chosen before we start reading. */
+ mb();
+ /* Read from that function's config space. */
+ ret = readw(addr);
+ /*
+ * mb() is not required here, because the
+ * spin_unlock_irqrestore() is a barrier.
+ */
+ }
+
+ spin_unlock_irqrestore(&hbus->config_lock, flags);
return ret;
}
@@ -723,39 +1294,51 @@ static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
int size, u32 val)
{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct device *dev = &hbus->hdev->device;
+ int offset = where + CFG_PAGE_OFFSET;
unsigned long flags;
- void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
where + size <= PCI_CAPABILITY_LIST) {
/* SSIDs and ROM BARs are read-only */
} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
- spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
- /* Choose the function to be written. (See comment above) */
- writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
- /* Make sure the function was chosen before we start writing. */
- wmb();
- /* Write to that function's config space. */
- switch (size) {
- case 1:
- writeb(val, addr);
- break;
- case 2:
- writew(val, addr);
- break;
- default:
- writel(val, addr);
- break;
+ spin_lock_irqsave(&hbus->config_lock, flags);
+
+ if (hbus->use_calls) {
+ phys_addr_t addr = hbus->mem_config->start + offset;
+
+ hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
+ hpdev->desc.win_slot.slot);
+ hv_pci_write_mmio(dev, addr, size, val);
+ } else {
+ void __iomem *addr = hbus->cfg_addr + offset;
+
+ /* Choose the function to write. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
+ /* Make sure the function was chosen before writing. */
+ wmb();
+ /* Write to that function's config space. */
+ switch (size) {
+ case 1:
+ writeb(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ default:
+ writel(val, addr);
+ break;
+ }
+ /*
+ * Make sure the write was done before we release the
+ * spinlock allowing consecutive reads/writes.
+ */
+ mb();
}
- /*
- * Make sure the write was done before we release the spinlock
- * allowing consecutive reads/writes.
- */
- mb();
- spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+ spin_unlock_irqrestore(&hbus->config_lock, flags);
} else {
- dev_err(&hpdev->hbus->hdev->device,
- "Attempt to write beyond a function's config space.\n");
+ dev_err(dev, "Attempt to write beyond a function's config space.\n");
}
}
@@ -821,6 +1404,254 @@ static struct pci_ops hv_pcifront_ops = {
.write = hv_pcifront_write_config,
};
+/*
+ * Paravirtual backchannel
+ *
+ * Hyper-V SR-IOV provides a backchannel mechanism in software for
+ * communication between a VF driver and a PF driver. These
+ * "configuration blocks" are similar in concept to PCI configuration space,
+ * but instead of doing reads and writes in 32-bit chunks through a very slow
+ * path, packets of up to 128 bytes can be sent or received asynchronously.
+ *
+ * Nearly every SR-IOV device contains just such a communications channel in
+ * hardware, so using this one in software is usually optional. Using the
+ * software channel, however, allows driver implementers to leverage software
+ * tools that fuzz the communications channel looking for vulnerabilities.
+ *
+ * The usage model for these packets puts the responsibility for reading or
+ * writing on the VF driver. The VF driver sends a read or a write packet,
+ * indicating which "block" is being referred to by number.
+ *
+ * If the PF driver wishes to initiate communication, it can "invalidate" one or
+ * more of the first 64 blocks. This invalidation is delivered via a callback
+ * supplied to the VF driver by this driver.
+ *
+ * No protocol is implied, except that supplied by the PF and VF drivers.
+ */
+
+struct hv_read_config_compl {
+ struct hv_pci_compl comp_pkt;
+ void *buf;
+ unsigned int len;
+ unsigned int bytes_returned;
+};
+
+/**
+ * hv_pci_read_config_compl() - Invoked when a response packet
+ * for a read config block operation arrives.
+ * @context: Identifies the read config operation
+ * @resp: The response packet itself
+ * @resp_packet_size: Size in bytes of the response packet
+ */
+static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct hv_read_config_compl *comp = context;
+ struct pci_read_block_response *read_resp =
+ (struct pci_read_block_response *)resp;
+ unsigned int data_len, hdr_len;
+
+ hdr_len = offsetof(struct pci_read_block_response, bytes);
+ if (resp_packet_size < hdr_len) {
+ comp->comp_pkt.completion_status = -1;
+ goto out;
+ }
+
+ data_len = resp_packet_size - hdr_len;
+ if (data_len > 0 && read_resp->status == 0) {
+ comp->bytes_returned = min(comp->len, data_len);
+ memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
+ } else {
+ comp->bytes_returned = 0;
+ }
+
+ comp->comp_pkt.completion_status = read_resp->status;
+out:
+ complete(&comp->comp_pkt.host_event);
+}
+
+/**
+ * hv_read_config_block() - Sends a read config block request to
+ * the back-end driver running in the Hyper-V parent partition.
+ * @pdev: The PCI driver's representation for this device.
+ * @buf: Buffer into which the config block will be copied.
+ * @len: Size in bytes of buf.
+ * @block_id: Identifies the config block which has been requested.
+ * @bytes_returned: Size which came back from the back-end driver.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_read_config_block(struct pci_dev *pdev, void *buf,
+ unsigned int len, unsigned int block_id,
+ unsigned int *bytes_returned)
+{
+ struct hv_pcibus_device *hbus =
+ container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+ sysdata);
+ struct {
+ struct pci_packet pkt;
+ char buf[sizeof(struct pci_read_block)];
+ } pkt;
+ struct hv_read_config_compl comp_pkt;
+ struct pci_read_block *read_blk;
+ int ret;
+
+ if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
+ return -EINVAL;
+
+ init_completion(&comp_pkt.comp_pkt.host_event);
+ comp_pkt.buf = buf;
+ comp_pkt.len = len;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.pkt.completion_func = hv_pci_read_config_compl;
+ pkt.pkt.compl_ctxt = &comp_pkt;
+ read_blk = (struct pci_read_block *)pkt.buf;
+ read_blk->message_type.type = PCI_READ_BLOCK;
+ read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
+ read_blk->block_id = block_id;
+ read_blk->bytes_requested = len;
+
+ ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
+ sizeof(*read_blk), (unsigned long)&pkt.pkt,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ return ret;
+
+ ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
+ if (ret)
+ return ret;
+
+ if (comp_pkt.comp_pkt.completion_status != 0 ||
+ comp_pkt.bytes_returned == 0) {
+ dev_err(&hbus->hdev->device,
+ "Read Config Block failed: 0x%x, bytes_returned=%d\n",
+ comp_pkt.comp_pkt.completion_status,
+ comp_pkt.bytes_returned);
+ return -EIO;
+ }
+
+ *bytes_returned = comp_pkt.bytes_returned;
+ return 0;
+}
+
+/**
+ * hv_pci_write_config_compl() - Invoked when a response packet for a write
+ * config block operation arrives.
+ * @context: Identifies the write config operation
+ * @resp: The response packet itself
+ * @resp_packet_size: Size in bytes of the response packet
+ */
+static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct hv_pci_compl *comp_pkt = context;
+
+ comp_pkt->completion_status = resp->status;
+ complete(&comp_pkt->host_event);
+}
+
+/**
+ * hv_write_config_block() - Sends a write config block request to the
+ * back-end driver running in the Hyper-V parent partition.
+ * @pdev: The PCI driver's representation for this device.
+ * @buf: Buffer from which the config block will be copied.
+ * @len: Size in bytes of buf.
+ * @block_id: Identifies the config block which is being written.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_write_config_block(struct pci_dev *pdev, void *buf,
+ unsigned int len, unsigned int block_id)
+{
+ struct hv_pcibus_device *hbus =
+ container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+ sysdata);
+ struct {
+ struct pci_packet pkt;
+ char buf[sizeof(struct pci_write_block)];
+ u32 reserved;
+ } pkt;
+ struct hv_pci_compl comp_pkt;
+ struct pci_write_block *write_blk;
+ u32 pkt_size;
+ int ret;
+
+ if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
+ return -EINVAL;
+
+ init_completion(&comp_pkt.host_event);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.pkt.completion_func = hv_pci_write_config_compl;
+ pkt.pkt.compl_ctxt = &comp_pkt;
+ write_blk = (struct pci_write_block *)pkt.buf;
+ write_blk->message_type.type = PCI_WRITE_BLOCK;
+ write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
+ write_blk->block_id = block_id;
+ write_blk->byte_count = len;
+ memcpy(write_blk->bytes, buf, len);
+ pkt_size = offsetof(struct pci_write_block, bytes) + len;
+ /*
+ * This quirk is required on some hosts shipped around 2018, because
+ * these hosts don't check the pkt_size correctly (new hosts have been
+ * fixed since early 2019). The quirk is also safe on very old hosts
+ * and new hosts, because, on them, what really matters is the length
+ * specified in write_blk->byte_count.
+ */
+ pkt_size += sizeof(pkt.reserved);
+
+ ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
+ (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ return ret;
+
+ ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
+ if (ret)
+ return ret;
+
+ if (comp_pkt.completion_status != 0) {
+ dev_err(&hbus->hdev->device,
+ "Write Config Block failed: 0x%x\n",
+ comp_pkt.completion_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * hv_register_block_invalidate() - Invoked when a config block invalidation
+ * arrives from the back-end driver.
+ * @pdev: The PCI driver's representation for this device.
+ * @context: Identifies the device.
+ * @block_invalidate: Identifies all of the blocks being invalidated.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask))
+{
+ struct hv_pcibus_device *hbus =
+ container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+ sysdata);
+ struct hv_pci_dev *hpdev;
+
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+ if (!hpdev)
+ return -ENODEV;
+
+ hpdev->block_invalidate = block_invalidate;
+ hpdev->invalidate_context = context;
+
+ put_pcichild(hpdev);
+ return 0;
+
+}
+
/* Interrupt management hooks */
static void hv_int_desc_free(struct hv_pci_dev *hpdev,
struct tran_int_desc *int_desc)
@@ -831,21 +1662,24 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
u8 buffer[sizeof(struct pci_delete_interrupt)];
} ctxt;
+ if (!int_desc->vector_count) {
+ kfree(int_desc);
+ return;
+ }
memset(&ctxt, 0, sizeof(ctxt));
- int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
+ int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
int_pkt->message_type.type =
PCI_DELETE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
int_pkt->int_desc = *int_desc;
vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
- (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
+ 0, VM_PKT_DATA_INBAND, 0);
kfree(int_desc);
}
/**
* hv_msi_free() - Free the MSI.
* @domain: The interrupt domain pointer
- * @info: Extra MSI-related context
* @irq: Identifies the IRQ.
*
* The Hyper-V parent partition and hypervisor are tracking the
@@ -853,8 +1687,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
* table up to date. This callback sends a message that frees
* the IRT entry and related tracking nonsense.
*/
-static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
- unsigned int irq)
+static void hv_msi_free(struct irq_domain *domain, unsigned int irq)
{
struct hv_pcibus_device *hbus;
struct hv_pci_dev *hpdev;
@@ -864,7 +1697,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
pdev = msi_desc_to_pci_dev(msi);
- hbus = info->data;
+ hbus = domain->host_data;
int_desc = irq_data_get_irq_chip_data(irq_data);
if (!int_desc)
return;
@@ -880,123 +1713,18 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
put_pcichild(hpdev);
}
-static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
- bool force)
-{
- struct irq_data *parent = data->parent_data;
-
- return parent->chip->irq_set_affinity(parent, dest, force);
-}
-
static void hv_irq_mask(struct irq_data *data)
{
- pci_msi_mask_irq(data);
+ if (data->parent_data->chip->irq_mask)
+ irq_chip_mask_parent(data);
}
-/**
- * hv_irq_unmask() - "Unmask" the IRQ by setting its current
- * affinity.
- * @data: Describes the IRQ
- *
- * Build new a destination for the MSI and make a hypercall to
- * update the Interrupt Redirection Table. "Device Logical ID"
- * is built out of this PCI bus's instance GUID and the function
- * number of the device.
- */
static void hv_irq_unmask(struct irq_data *data)
{
- struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
- struct irq_cfg *cfg = irqd_cfg(data);
- struct retarget_msi_interrupt *params;
- struct hv_pcibus_device *hbus;
- struct cpumask *dest;
- struct pci_bus *pbus;
- struct pci_dev *pdev;
- unsigned long flags;
- u32 var_size = 0;
- int cpu_vmbus;
- int cpu;
- u64 res;
-
- dest = irq_data_get_effective_affinity_mask(data);
- pdev = msi_desc_to_pci_dev(msi_desc);
- pbus = pdev->bus;
- hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
-
- spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
-
- params = &hbus->retarget_msi_interrupt_params;
- memset(params, 0, sizeof(*params));
- params->partition_id = HV_PARTITION_ID_SELF;
- params->int_entry.source = 1; /* MSI(-X) */
- params->int_entry.address = msi_desc->msg.address_lo;
- params->int_entry.data = msi_desc->msg.data;
- params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
- (hbus->hdev->dev_instance.b[4] << 16) |
- (hbus->hdev->dev_instance.b[7] << 8) |
- (hbus->hdev->dev_instance.b[6] & 0xf8) |
- PCI_FUNC(pdev->devfn);
- params->int_target.vector = cfg->vector;
-
- /*
- * Honoring apic->irq_delivery_mode set to dest_Fixed by
- * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
- * spurious interrupt storm. Not doing so does not seem to have a
- * negative effect (yet?).
- */
-
- if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
- /*
- * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
- * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
- * with >64 VP support.
- * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
- * is not sufficient for this hypercall.
- */
- params->int_target.flags |=
- HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
- params->int_target.vp_set.valid_banks =
- (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1;
-
- /*
- * var-sized hypercall, var-size starts after vp_mask (thus
- * vp_set.format does not count, but vp_set.valid_banks does).
- */
- var_size = 1 + HV_VP_SET_BANK_COUNT_MAX;
-
- for_each_cpu_and(cpu, dest, cpu_online_mask) {
- cpu_vmbus = hv_cpu_number_to_vp_number(cpu);
-
- if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) {
- dev_err(&hbus->hdev->device,
- "too high CPU %d", cpu_vmbus);
- res = 1;
- goto exit_unlock;
- }
+ hv_arch_irq_unmask(data);
- params->int_target.vp_set.masks[cpu_vmbus / 64] |=
- (1ULL << (cpu_vmbus & 63));
- }
- } else {
- for_each_cpu_and(cpu, dest, cpu_online_mask) {
- params->int_target.vp_mask |=
- (1ULL << hv_cpu_number_to_vp_number(cpu));
- }
- }
-
- res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
- params, NULL);
-
-exit_unlock:
- spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
-
- if (res) {
- dev_err(&hbus->hdev->device,
- "%s() failed: %#llx", __func__, res);
- return;
- }
-
- pci_msi_unmask_irq(data);
+ if (data->parent_data->chip->irq_unmask)
+ irq_chip_unmask_parent(data);
}
struct compose_comp_ctxt {
@@ -1011,20 +1739,25 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
struct pci_create_int_response *int_resp =
(struct pci_create_int_response *)resp;
+ if (resp_packet_size < sizeof(*int_resp)) {
+ comp_pkt->comp_pkt.completion_status = -1;
+ goto out;
+ }
comp_pkt->comp_pkt.completion_status = resp->status;
comp_pkt->int_desc = int_resp->int_desc;
+out:
complete(&comp_pkt->comp_pkt.host_event);
}
static u32 hv_compose_msi_req_v1(
- struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
- u32 slot, u8 vector)
+ struct pci_create_interrupt *int_pkt,
+ u32 slot, u8 vector, u16 vector_count)
{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
- int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode = dest_Fixed;
+ int_pkt->int_desc.vector_count = vector_count;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
/*
* Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
@@ -1035,23 +1768,93 @@ static u32 hv_compose_msi_req_v1(
return sizeof(*int_pkt);
}
-static u32 hv_compose_msi_req_v2(
- struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
- u32 slot, u8 vector)
+/*
+ * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
+ * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
+ * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
+ * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
+ * not irrelevant because Hyper-V chooses the physical CPU to handle the
+ * interrupts based on the vCPU specified in message sent to the vPCI VSP in
+ * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
+ * but assigning too many vPCI device interrupts to the same pCPU can cause a
+ * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
+ * to spread out the pCPUs that it selects.
+ *
+ * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
+ * to always return the same dummy vCPU, because a second call to
+ * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
+ * new pCPU for the interrupt. But for the multi-MSI case, the second call to
+ * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
+ * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
+ * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
+ * the same pCPU, even though the vCPUs will be spread out by later calls
+ * to hv_irq_unmask(), but that is the best we can do now.
+ *
+ * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
+ * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
+ * enhancement is planned for a future version. With that enhancement, the
+ * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
+ * device will be spread across multiple pCPUs.
+ */
+
+/*
+ * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
+ * by subsequent retarget in hv_irq_unmask().
+ */
+static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
{
+ return cpumask_first_and(affinity, cpu_online_mask);
+}
+
+/*
+ * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
+ */
+static int hv_compose_multi_msi_req_get_cpu(void)
+{
+ static DEFINE_SPINLOCK(multi_msi_cpu_lock);
+
+ /* -1 means starting with CPU 0 */
+ static int cpu_next = -1;
+
+ unsigned long flags;
int cpu;
+ spin_lock_irqsave(&multi_msi_cpu_lock, flags);
+
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
+ cpu = cpu_next;
+
+ spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
+
+ return cpu;
+}
+
+static u32 hv_compose_msi_req_v2(
+ struct pci_create_interrupt2 *int_pkt, int cpu,
+ u32 slot, u8 vector, u16 vector_count)
+{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
- int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode = dest_Fixed;
+ int_pkt->int_desc.vector_count = vector_count;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
+ int_pkt->int_desc.processor_array[0] =
+ hv_cpu_number_to_vp_number(cpu);
+ int_pkt->int_desc.processor_count = 1;
- /*
- * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
- * by subsequent retarget in hv_irq_unmask().
- */
- cpu = cpumask_first_and(affinity, cpu_online_mask);
+ return sizeof(*int_pkt);
+}
+
+static u32 hv_compose_msi_req_v3(
+ struct pci_create_interrupt3 *int_pkt, int cpu,
+ u32 slot, u32 vector, u16 vector_count)
+{
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
+ int_pkt->wslot.slot = slot;
+ int_pkt->int_desc.vector = vector;
+ int_pkt->int_desc.reserved = 0;
+ int_pkt->int_desc.vector_count = vector_count;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
int_pkt->int_desc.processor_count = 1;
@@ -1072,36 +1875,59 @@ static u32 hv_compose_msi_req_v2(
*/
static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct irq_cfg *cfg = irqd_cfg(data);
struct hv_pcibus_device *hbus;
+ struct vmbus_channel *channel;
struct hv_pci_dev *hpdev;
struct pci_bus *pbus;
struct pci_dev *pdev;
- struct cpumask *dest;
- unsigned long flags;
+ const struct cpumask *dest;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
+ struct msi_desc *msi_desc;
+ /*
+ * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
+ * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
+ */
+ u16 vector_count;
+ u32 vector;
struct {
struct pci_packet pci_pkt;
union {
struct pci_create_interrupt v1;
struct pci_create_interrupt2 v2;
+ struct pci_create_interrupt3 v3;
} int_pkts;
} __packed ctxt;
-
+ bool multi_msi;
+ u64 trans_id;
u32 size;
int ret;
+ int cpu;
+
+ msi_desc = irq_data_get_msi_desc(data);
+ multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
+ msi_desc->nvec_used > 1;
+
+ /* Reuse the previous allocation */
+ if (data->chip_data && multi_msi) {
+ int_desc = data->chip_data;
+ msg->address_hi = int_desc->address >> 32;
+ msg->address_lo = int_desc->address & 0xffffffff;
+ msg->data = int_desc->data;
+ return;
+ }
- pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
+ pdev = msi_desc_to_pci_dev(msi_desc);
dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ channel = hbus->hdev->channel;
hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
if (!hpdev)
goto return_null_message;
/* Free any previous message that might have already been composed. */
- if (data->chip_data) {
+ if (data->chip_data && !multi_msi) {
int_desc = data->chip_data;
data->chip_data = NULL;
hv_int_desc_free(hpdev, int_desc);
@@ -1111,24 +1937,71 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (!int_desc)
goto drop_reference;
+ if (multi_msi) {
+ /*
+ * If this is not the first MSI of Multi MSI, we already have
+ * a mapping. Can exit early.
+ */
+ if (msi_desc->irq != data->irq) {
+ data->chip_data = int_desc;
+ int_desc->address = msi_desc->msg.address_lo |
+ (u64)msi_desc->msg.address_hi << 32;
+ int_desc->data = msi_desc->msg.data +
+ (data->irq - msi_desc->irq);
+ msg->address_hi = msi_desc->msg.address_hi;
+ msg->address_lo = msi_desc->msg.address_lo;
+ msg->data = int_desc->data;
+ put_pcichild(hpdev);
+ return;
+ }
+ /*
+ * The vector we select here is a dummy value. The correct
+ * value gets sent to the hypervisor in unmask(). This needs
+ * to be aligned with the count, and also not zero. Multi-msi
+ * is powers of 2 up to 32, so 32 will always work here.
+ */
+ vector = 32;
+ vector_count = msi_desc->nvec_used;
+ cpu = hv_compose_multi_msi_req_get_cpu();
+ } else {
+ vector = hv_msi_get_int_vector(data);
+ vector_count = 1;
+ cpu = hv_compose_msi_req_get_cpu(dest);
+ }
+
+ /*
+ * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
+ * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
+ * for better readability.
+ */
memset(&ctxt, 0, sizeof(ctxt));
init_completion(&comp.comp_pkt.host_event);
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
ctxt.pci_pkt.compl_ctxt = &comp;
- switch (pci_protocol_version) {
+ switch (hbus->protocol_version) {
case PCI_PROTOCOL_VERSION_1_1:
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
- dest,
hpdev->desc.win_slot.slot,
- cfg->vector);
+ (u8)vector,
+ vector_count);
break;
case PCI_PROTOCOL_VERSION_1_2:
+ case PCI_PROTOCOL_VERSION_1_3:
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
- dest,
+ cpu,
+ hpdev->desc.win_slot.slot,
+ (u8)vector,
+ vector_count);
+ break;
+
+ case PCI_PROTOCOL_VERSION_1_4:
+ size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
+ cpu,
hpdev->desc.win_slot.slot,
- cfg->vector);
+ vector,
+ vector_count);
break;
default:
@@ -1141,10 +2014,10 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
goto free_int_desc;
}
- ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
- size, (unsigned long)&ctxt.pci_pkt,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
+ size, (unsigned long)&ctxt.pci_pkt,
+ &trans_id, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret) {
dev_err(&hbus->hdev->device,
"Sending request for interrupt failed: 0x%x",
@@ -1153,42 +2026,45 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
}
/*
+ * Prevents hv_pci_onchannelcallback() from running concurrently
+ * in the tasklet.
+ */
+ tasklet_disable_in_atomic(&channel->callback_event);
+
+ /*
* Since this function is called with IRQ locks held, can't
* do normal wait for completion; instead poll.
*/
while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
+ unsigned long flags;
+
/* 0xFFFF means an invalid PCI VENDOR ID. */
if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
dev_err_once(&hbus->hdev->device,
"the device has gone\n");
- goto free_int_desc;
+ goto enable_tasklet;
}
/*
- * When the higher level interrupt code calls us with
- * interrupt disabled, we must poll the channel by calling
- * the channel callback directly when channel->target_cpu is
- * the current CPU. When the higher level interrupt code
- * calls us with interrupt enabled, let's add the
- * local_irq_save()/restore() to avoid race:
- * hv_pci_onchannelcallback() can also run in tasklet.
+ * Make sure that the ring buffer data structure doesn't get
+ * freed while we dereference the ring buffer pointer. Test
+ * for the channel's onchannel_callback being NULL within a
+ * sched_lock critical section. See also the inline comments
+ * in vmbus_reset_channel_cb().
*/
- local_irq_save(flags);
-
- if (hbus->hdev->channel->target_cpu == smp_processor_id())
- hv_pci_onchannelcallback(hbus);
-
- local_irq_restore(flags);
-
- if (hpdev->state == hv_pcichild_ejecting) {
- dev_err_once(&hbus->hdev->device,
- "the device is being ejected\n");
- goto free_int_desc;
+ spin_lock_irqsave(&channel->sched_lock, flags);
+ if (unlikely(channel->onchannel_callback == NULL)) {
+ spin_unlock_irqrestore(&channel->sched_lock, flags);
+ goto enable_tasklet;
}
+ hv_pci_onchannelcallback(hbus);
+ spin_unlock_irqrestore(&channel->sched_lock, flags);
udelay(100);
}
+ tasklet_enable(&channel->callback_event);
+
if (comp.comp_pkt.completion_status < 0) {
dev_err(&hbus->hdev->device,
"Request for interrupt failed: 0x%x",
@@ -1212,6 +2088,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
put_pcichild(hpdev);
return;
+enable_tasklet:
+ tasklet_enable(&channel->callback_event);
+ /*
+ * The completion packet on the stack becomes invalid after 'return';
+ * remove the ID from the VMbus requestor if the identifier is still
+ * mapped to/associated with the packet. (The identifier could have
+ * been 're-used', i.e., already removed and (re-)mapped.)
+ *
+ * Cf. hv_pci_onchannelcallback().
+ */
+ vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
free_int_desc:
kfree(int_desc);
drop_reference:
@@ -1222,27 +2109,85 @@ return_null_message:
msg->data = 0;
}
+static bool hv_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ info->ops->msi_prepare = hv_msi_prepare;
+
+ chip->irq_set_affinity = irq_chip_set_affinity_parent;
+
+ if (IS_ENABLED(CONFIG_X86))
+ chip->flags |= IRQCHIP_MOVE_DEFERRED;
+
+ return true;
+}
+
+#define HV_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+#define HV_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_PCI_MSIX_ALLOC_DYN | \
+ MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops hv_pcie_msi_parent_ops = {
+ .required_flags = HV_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = HV_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = HV_MSI_CHIP_FLAGS,
+ .prefix = "HV-",
+ .init_dev_msi_info = hv_pcie_init_dev_msi_info,
+};
+
/* HW Interrupt Chip Descriptor */
static struct irq_chip hv_msi_irq_chip = {
.name = "Hyper-V PCIe MSI",
.irq_compose_msi_msg = hv_compose_msi_msg,
- .irq_set_affinity = hv_set_affinity,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
.irq_ack = irq_chip_ack_parent,
+ .irq_eoi = irq_chip_eoi_parent,
.irq_mask = hv_irq_mask,
.irq_unmask = hv_irq_unmask,
};
-static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info,
- msi_alloc_info_t *arg)
+static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs,
+ void *arg)
{
- return arg->msi_hwirq;
+ /*
+ * TODO: Allocating and populating struct tran_int_desc in hv_compose_msi_msg()
+ * should be moved here.
+ */
+ int ret;
+
+ ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg);
+ if (ret < 0)
+ return ret;
+
+ for (int i = 0; i < nr_irqs; i++) {
+ irq_domain_set_hwirq_and_chip(d, virq + i, 0, &hv_msi_irq_chip, NULL);
+ if (IS_ENABLED(CONFIG_X86))
+ __irq_set_handler(virq + i, handle_edge_irq, 0, "edge");
+ }
+
+ return 0;
}
-static struct msi_domain_ops hv_msi_ops = {
- .get_hwirq = hv_msi_domain_ops_get_hwirq,
- .msi_prepare = pci_msi_prepare,
- .set_desc = pci_msi_set_desc,
- .msi_free = hv_msi_free,
+static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
+{
+ for (int i = 0; i < nr_irqs; i++)
+ hv_msi_free(d, virq + i);
+
+ irq_domain_free_irqs_top(d, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops hv_pcie_domain_ops = {
+ .alloc = hv_pcie_domain_alloc,
+ .free = hv_pcie_domain_free,
};
/**
@@ -1260,23 +2205,22 @@ static struct msi_domain_ops hv_msi_ops = {
*/
static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
{
- hbus->msi_info.chip = &hv_msi_irq_chip;
- hbus->msi_info.ops = &hv_msi_ops;
- hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
- MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
- MSI_FLAG_PCI_MSIX);
- hbus->msi_info.handler = handle_edge_irq;
- hbus->msi_info.handler_name = "edge";
- hbus->msi_info.data = hbus;
- hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode,
- &hbus->msi_info,
- x86_vector_domain);
+ struct irq_domain_info info = {
+ .fwnode = hbus->fwnode,
+ .ops = &hv_pcie_domain_ops,
+ .host_data = hbus,
+ .parent = hv_pci_get_root_domain(),
+ };
+
+ hbus->irq_domain = msi_create_parent_irq_domain(&info, &hv_pcie_msi_parent_ops);
if (!hbus->irq_domain) {
dev_err(&hbus->hdev->device,
"Failed to build an MSI IRQ domain\n");
return -ENODEV;
}
+ dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
+
return 0;
}
@@ -1332,7 +2276,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
* so it's sufficient to just add them up without tracking alignment.
*/
list_for_each_entry(hpdev, &hbus->children, list_entry) {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
dev_err(&hbus->hdev->device,
"There's an I/O BAR in this list!\n");
@@ -1403,10 +2347,27 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
spin_lock_irqsave(&hbus->device_list_lock, flags);
+ /*
+ * Clear the memory enable bit, in case it's already set. This occurs
+ * in the suspend path of hibernation, where the device is suspended,
+ * resumed and suspended again: see hibernation_snapshot() and
+ * hibernation_platform_enter().
+ *
+ * If the memory enable bit is already set, Hyper-V silently ignores
+ * the below BAR updates, and the related PCI device driver can not
+ * work, because reading from the device register(s) always returns
+ * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
+ */
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
+ command &= ~PCI_COMMAND_MEMORY;
+ _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
+ }
+
/* Pick addresses for the BARs. */
do {
list_for_each_entry(hpdev, &hbus->children, list_entry) {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar_val = hpdev->probed_bar[i];
if (bar_val == 0)
continue;
@@ -1444,12 +2405,17 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
}
}
if (high_size <= 1 && low_size <= 1) {
- /* Set the memory enable bit. */
- _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2,
- &command);
- command |= PCI_COMMAND_MEMORY;
- _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2,
- command);
+ /*
+ * No need to set the PCI_COMMAND_MEMORY bit as
+ * the core PCI driver doesn't require the bit
+ * to be pre-set. Actually here we intentionally
+ * keep the bit off so that the PCI BAR probing
+ * in the core PCI driver doesn't cause Hyper-V
+ * to unnecessarily unmap/map the virtual BARs
+ * from/to the physical BARs multiple times.
+ * This reduces the VM boot time significantly
+ * if the BAR sizes are huge.
+ */
break;
}
}
@@ -1482,7 +2448,7 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
- hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
+ hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
name, NULL);
if (IS_ERR(hpdev->pci_slot)) {
pr_warn("pci_create slot %s failed\n", name);
@@ -1491,6 +2457,51 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
}
}
+/*
+ * Remove entries in sysfs pci slot directory.
+ */
+static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
+{
+ struct hv_pci_dev *hpdev;
+
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if (!hpdev->pci_slot)
+ continue;
+ pci_destroy_slot(hpdev->pci_slot);
+ hpdev->pci_slot = NULL;
+ }
+}
+
+/*
+ * Set NUMA node for the devices on the bus
+ */
+static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus = hbus->bridge->bus;
+ struct hv_pci_dev *hv_dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
+ if (!hv_dev)
+ continue;
+
+ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
+ hv_dev->desc.virtual_numa_node < num_possible_nodes())
+ /*
+ * The kernel may boot with some NUMA nodes offline
+ * (e.g. in a KDUMP kernel) or with NUMA disabled via
+ * "numa=off". In those cases, adjust the host provided
+ * NUMA node to a valid NUMA node used by the kernel.
+ */
+ set_dev_node(&dev->dev,
+ numa_map_to_online_node(
+ hv_dev->desc.virtual_numa_node));
+
+ put_pcichild(hv_dev);
+ }
+}
+
/**
* create_root_hv_pci_bus() - Expose a new root PCI bus
* @hbus: Root PCI bus, as understood by this driver
@@ -1499,23 +2510,22 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
*/
static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
{
- /* Register the device */
- hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device,
- 0, /* bus number is always zero */
- &hv_pcifront_ops,
- &hbus->sysdata,
- &hbus->resources_for_children);
- if (!hbus->pci_bus)
- return -ENODEV;
+ int error;
+ struct pci_host_bridge *bridge = hbus->bridge;
- hbus->pci_bus->msi = &hbus->msi_chip;
- hbus->pci_bus->msi->dev = &hbus->hdev->device;
+ bridge->dev.parent = &hbus->hdev->device;
+ bridge->sysdata = &hbus->sysdata;
+ bridge->ops = &hv_pcifront_ops;
+
+ error = pci_scan_root_bus_bridge(bridge);
+ if (error)
+ return error;
pci_lock_rescan_remove();
- pci_scan_child_bus(hbus->pci_bus);
- pci_bus_assign_resources(hbus->pci_bus);
+ hv_pci_assign_numa_node(hbus);
+ pci_bus_assign_resources(bridge->bus);
hv_pci_assign_slots(hbus);
- pci_bus_add_devices(hbus->pci_bus);
+ pci_bus_add_devices(bridge->bus);
pci_unlock_rescan_remove();
hbus->state = hv_pcibus_installed;
return 0;
@@ -1541,14 +2551,16 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
struct q_res_req_compl *completion = context;
struct pci_q_res_req_response *q_res_req =
(struct pci_q_res_req_response *)resp;
+ s32 status;
int i;
- if (resp->status < 0) {
+ status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
+ if (status < 0) {
dev_err(&completion->hpdev->hbus->hdev->device,
"query resource requirements failed: %x\n",
- resp->status);
+ status);
} else {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
completion->hpdev->probed_bar[i] =
q_res_req->probed_bar[i];
}
@@ -1569,7 +2581,7 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
* Return: Pointer to the new tracking struct
*/
static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
- struct pci_function_description *desc)
+ struct hv_pcidev_description *desc)
{
struct hv_pci_dev *hpdev;
struct pci_child_message *res_req;
@@ -1592,7 +2604,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
comp_pkt.hpdev = hpdev;
pkt.init_packet.compl_ctxt = &comp_pkt;
pkt.init_packet.completion_func = q_resource_requirements;
- res_req = (struct pci_child_message *)&pkt.init_packet.message;
+ res_req = (struct pci_child_message *)pkt.buffer;
res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
res_req->wslot.slot = desc->win_slot.slot;
@@ -1680,7 +2692,7 @@ static void pci_devices_present_work(struct work_struct *work)
{
u32 child_no;
bool found;
- struct pci_function_description *new_desc;
+ struct hv_pcidev_description *new_desc;
struct hv_pci_dev *hpdev;
struct hv_pcibus_device *hbus;
struct list_head removed;
@@ -1709,10 +2721,10 @@ static void pci_devices_present_work(struct work_struct *work)
}
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
- if (!dr) {
- put_hvpcibus(hbus);
+ if (!dr)
return;
- }
+
+ mutex_lock(&hbus->state_lock);
/* First, mark all existing children as reported missing. */
spin_lock_irqsave(&hbus->device_list_lock, flags);
@@ -1766,6 +2778,10 @@ static void pci_devices_present_work(struct work_struct *work)
hpdev = list_first_entry(&removed, struct hv_pci_dev,
list_entry);
list_del(&hpdev->list_entry);
+
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+
put_pcichild(hpdev);
}
@@ -1776,7 +2792,8 @@ static void pci_devices_present_work(struct work_struct *work)
* because there may have been changes.
*/
pci_lock_rescan_remove();
- pci_scan_child_bus(hbus->pci_bus);
+ pci_scan_child_bus(hbus->bridge->bus);
+ hv_pci_assign_numa_node(hbus);
hv_pci_assign_slots(hbus);
pci_unlock_rescan_remove();
break;
@@ -1790,46 +2807,37 @@ static void pci_devices_present_work(struct work_struct *work)
break;
}
- put_hvpcibus(hbus);
+ mutex_unlock(&hbus->state_lock);
+
kfree(dr);
}
/**
- * hv_pci_devices_present() - Handles list of new children
+ * hv_pci_start_relations_work() - Queue work to start device discovery
* @hbus: Root PCI bus, as understood by this driver
- * @relations: Packet from host listing children
+ * @dr: The list of children returned from host
*
- * This function is invoked whenever a new list of devices for
- * this bus appears.
+ * Return: 0 on success, -errno on failure
*/
-static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
- struct pci_bus_relations *relations)
+static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
+ struct hv_dr_state *dr)
{
- struct hv_dr_state *dr;
struct hv_dr_work *dr_wrk;
unsigned long flags;
bool pending_dr;
+ if (hbus->state == hv_pcibus_removing) {
+ dev_info(&hbus->hdev->device,
+ "PCI VMBus BUS_RELATIONS: ignored\n");
+ return -ENOENT;
+ }
+
dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
if (!dr_wrk)
- return;
-
- dr = kzalloc(offsetof(struct hv_dr_state, func) +
- (sizeof(struct pci_function_description) *
- (relations->device_count)), GFP_NOWAIT);
- if (!dr) {
- kfree(dr_wrk);
- return;
- }
+ return -ENOMEM;
INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
dr_wrk->bus = hbus;
- dr->device_count = relations->device_count;
- if (dr->device_count != 0) {
- memcpy(dr->func, relations->func,
- sizeof(struct pci_function_description) *
- dr->device_count);
- }
spin_lock_irqsave(&hbus->device_list_lock, flags);
/*
@@ -1841,12 +2849,87 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
list_add_tail(&dr->list_entry, &hbus->dr_list);
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
- if (pending_dr) {
+ if (pending_dr)
kfree(dr_wrk);
- } else {
- get_hvpcibus(hbus);
+ else
queue_work(hbus->wq, &dr_wrk->wrk);
+
+ return 0;
+}
+
+/**
+ * hv_pci_devices_present() - Handle list of new children
+ * @hbus: Root PCI bus, as understood by this driver
+ * @relations: Packet from host listing children
+ *
+ * Process a new list of devices on the bus. The list of devices is
+ * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
+ * whenever a new list of devices for this bus appears.
+ */
+static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
+ struct pci_bus_relations *relations)
+{
+ struct hv_dr_state *dr;
+ int i;
+
+ dr = kzalloc(struct_size(dr, func, relations->device_count),
+ GFP_NOWAIT);
+ if (!dr)
+ return;
+
+ dr->device_count = relations->device_count;
+ for (i = 0; i < dr->device_count; i++) {
+ dr->func[i].v_id = relations->func[i].v_id;
+ dr->func[i].d_id = relations->func[i].d_id;
+ dr->func[i].rev = relations->func[i].rev;
+ dr->func[i].prog_intf = relations->func[i].prog_intf;
+ dr->func[i].subclass = relations->func[i].subclass;
+ dr->func[i].base_class = relations->func[i].base_class;
+ dr->func[i].subsystem_id = relations->func[i].subsystem_id;
+ dr->func[i].win_slot = relations->func[i].win_slot;
+ dr->func[i].ser = relations->func[i].ser;
}
+
+ if (hv_pci_start_relations_work(hbus, dr))
+ kfree(dr);
+}
+
+/**
+ * hv_pci_devices_present2() - Handle list of new children
+ * @hbus: Root PCI bus, as understood by this driver
+ * @relations: Packet from host listing children
+ *
+ * This function is the v2 version of hv_pci_devices_present()
+ */
+static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
+ struct pci_bus_relations2 *relations)
+{
+ struct hv_dr_state *dr;
+ int i;
+
+ dr = kzalloc(struct_size(dr, func, relations->device_count),
+ GFP_NOWAIT);
+ if (!dr)
+ return;
+
+ dr->device_count = relations->device_count;
+ for (i = 0; i < dr->device_count; i++) {
+ dr->func[i].v_id = relations->func[i].v_id;
+ dr->func[i].d_id = relations->func[i].d_id;
+ dr->func[i].rev = relations->func[i].rev;
+ dr->func[i].prog_intf = relations->func[i].prog_intf;
+ dr->func[i].subclass = relations->func[i].subclass;
+ dr->func[i].base_class = relations->func[i].base_class;
+ dr->func[i].subsystem_id = relations->func[i].subsystem_id;
+ dr->func[i].win_slot = relations->func[i].win_slot;
+ dr->func[i].ser = relations->func[i].ser;
+ dr->func[i].flags = relations->func[i].flags;
+ dr->func[i].virtual_numa_node =
+ relations->func[i].virtual_numa_node;
+ }
+
+ if (hv_pci_start_relations_work(hbus, dr))
+ kfree(dr);
}
/**
@@ -1861,6 +2944,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
static void hv_eject_device_work(struct work_struct *work)
{
struct pci_eject_response *ejct_pkt;
+ struct hv_pcibus_device *hbus;
struct hv_pci_dev *hpdev;
struct pci_dev *pdev;
unsigned long flags;
@@ -1871,18 +2955,18 @@ static void hv_eject_device_work(struct work_struct *work)
} ctxt;
hpdev = container_of(work, struct hv_pci_dev, wrk);
+ hbus = hpdev->hbus;
- WARN_ON(hpdev->state != hv_pcichild_ejecting);
+ mutex_lock(&hbus->state_lock);
/*
* Ejection can come before or after the PCI bus has been set up, so
* attempt to find it and tear down the bus state, if it exists. This
- * must be done without constructs like pci_domain_nr(hbus->pci_bus)
- * because hbus->pci_bus may not exist yet.
+ * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
+ * because hbus->bridge->bus may not exist yet.
*/
wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
- pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
- wslot);
+ pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
if (pdev) {
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
@@ -1890,24 +2974,29 @@ static void hv_eject_device_work(struct work_struct *work)
pci_unlock_rescan_remove();
}
- spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
list_del(&hpdev->list_entry);
- spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
if (hpdev->pci_slot)
pci_destroy_slot(hpdev->pci_slot);
memset(&ctxt, 0, sizeof(ctxt));
- ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
- vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
- sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
+ vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
+ sizeof(*ejct_pkt), 0,
VM_PKT_DATA_INBAND, 0);
+ /* For the get_pcichild() in hv_pci_eject_device() */
put_pcichild(hpdev);
+ /* For the two refs got in new_pcichild_device() */
put_pcichild(hpdev);
- put_hvpcibus(hpdev->hbus);
+ put_pcichild(hpdev);
+ /* hpdev has been freed. Do not use it any more. */
+
+ mutex_unlock(&hbus->state_lock);
}
/**
@@ -1920,11 +3009,17 @@ static void hv_eject_device_work(struct work_struct *work)
*/
static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
{
- hpdev->state = hv_pcichild_ejecting;
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct hv_device *hdev = hbus->hdev;
+
+ if (hbus->state == hv_pcibus_removing) {
+ dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
+ return;
+ }
+
get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
- get_hvpcibus(hpdev->hbus);
- queue_work(hpdev->hbus->wq, &hpdev->wrk);
+ queue_work(hbus->wq, &hpdev->wrk);
}
/**
@@ -1939,8 +3034,9 @@ static void hv_pci_onchannelcallback(void *context)
const int packet_size = 0x100;
int ret;
struct hv_pcibus_device *hbus = context;
+ struct vmbus_channel *chan = hbus->hdev->channel;
u32 bytes_recvd;
- u64 req_id;
+ u64 req_id, req_addr;
struct vmpacket_descriptor *desc;
unsigned char *buffer;
int bufferlen = packet_size;
@@ -1948,16 +3044,19 @@ static void hv_pci_onchannelcallback(void *context)
struct pci_response *response;
struct pci_incoming_message *new_message;
struct pci_bus_relations *bus_rel;
+ struct pci_bus_relations2 *bus_rel2;
+ struct pci_dev_inval_block *inval;
struct pci_dev_incoming *dev_message;
struct hv_pci_dev *hpdev;
+ unsigned long flags;
buffer = kmalloc(bufferlen, GFP_ATOMIC);
if (!buffer)
return;
while (1) {
- ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer,
- bufferlen, &bytes_recvd, &req_id);
+ ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
+ &bytes_recvd, &req_id);
if (ret == -ENOBUFS) {
kfree(buffer);
@@ -1984,15 +3083,29 @@ static void hv_pci_onchannelcallback(void *context)
switch (desc->type) {
case VM_PKT_COMP:
+ lock_requestor(chan, flags);
+ req_addr = __vmbus_request_addr_match(chan, req_id,
+ VMBUS_RQST_ADDR_ANY);
+ if (req_addr == VMBUS_RQST_ERROR) {
+ unlock_requestor(chan, flags);
+ dev_err(&hbus->hdev->device,
+ "Invalid transaction ID %llx\n",
+ req_id);
+ break;
+ }
+ comp_packet = (struct pci_packet *)req_addr;
+ response = (struct pci_response *)buffer;
/*
- * The host is trusted, and thus it's safe to interpret
- * this transaction ID as a pointer.
+ * Call ->completion_func() within the critical section to make
+ * sure that the packet pointer is still valid during the call:
+ * here 'valid' means that there's a task still waiting for the
+ * completion, and that the packet data is still on the waiting
+ * task's stack. Cf. hv_compose_msi_msg().
*/
- comp_packet = (struct pci_packet *)req_id;
- response = (struct pci_response *)buffer;
comp_packet->completion_func(comp_packet->compl_ctxt,
response,
bytes_recvd);
+ unlock_requestor(chan, flags);
break;
case VM_PKT_DATA_INBAND:
@@ -2002,10 +3115,10 @@ static void hv_pci_onchannelcallback(void *context)
case PCI_BUS_RELATIONS:
bus_rel = (struct pci_bus_relations *)buffer;
- if (bytes_recvd <
- offsetof(struct pci_bus_relations, func) +
- (sizeof(struct pci_function_description) *
- (bus_rel->device_count))) {
+ if (bytes_recvd < sizeof(*bus_rel) ||
+ bytes_recvd <
+ struct_size(bus_rel, func,
+ bus_rel->device_count)) {
dev_err(&hbus->hdev->device,
"bus relations too small\n");
break;
@@ -2014,9 +3127,29 @@ static void hv_pci_onchannelcallback(void *context)
hv_pci_devices_present(hbus, bus_rel);
break;
+ case PCI_BUS_RELATIONS2:
+
+ bus_rel2 = (struct pci_bus_relations2 *)buffer;
+ if (bytes_recvd < sizeof(*bus_rel2) ||
+ bytes_recvd <
+ struct_size(bus_rel2, func,
+ bus_rel2->device_count)) {
+ dev_err(&hbus->hdev->device,
+ "bus relations v2 too small\n");
+ break;
+ }
+
+ hv_pci_devices_present2(hbus, bus_rel2);
+ break;
+
case PCI_EJECT:
dev_message = (struct pci_dev_incoming *)buffer;
+ if (bytes_recvd < sizeof(*dev_message)) {
+ dev_err(&hbus->hdev->device,
+ "eject message too small\n");
+ break;
+ }
hpdev = get_pcichild_wslot(hbus,
dev_message->wslot.slot);
if (hpdev) {
@@ -2025,6 +3158,26 @@ static void hv_pci_onchannelcallback(void *context)
}
break;
+ case PCI_INVALIDATE_BLOCK:
+
+ inval = (struct pci_dev_inval_block *)buffer;
+ if (bytes_recvd < sizeof(*inval)) {
+ dev_err(&hbus->hdev->device,
+ "invalidate message too small\n");
+ break;
+ }
+ hpdev = get_pcichild_wslot(hbus,
+ inval->wslot.slot);
+ if (hpdev) {
+ if (hpdev->block_invalidate) {
+ hpdev->block_invalidate(
+ hpdev->invalidate_context,
+ inval->block_mask);
+ }
+ put_pcichild(hpdev);
+ }
+ break;
+
default:
dev_warn(&hbus->hdev->device,
"Unimplemented protocol message %x\n",
@@ -2046,7 +3199,10 @@ static void hv_pci_onchannelcallback(void *context)
/**
* hv_pci_protocol_negotiation() - Set up protocol
- * @hdev: VMBus's tracking struct for this root PCI bus
+ * @hdev: VMBus's tracking struct for this root PCI bus.
+ * @version: Array of supported channel protocol versions in
+ * the order of probing - highest go first.
+ * @num_version: Number of elements in the version array.
*
* This driver is intended to support running on Windows 10
* (server) and later versions. It will not run on earlier
@@ -2060,8 +3216,11 @@ static void hv_pci_onchannelcallback(void *context)
* failing if the host doesn't support the necessary protocol
* level.
*/
-static int hv_pci_protocol_negotiation(struct hv_device *hdev)
+static int hv_pci_protocol_negotiation(struct hv_device *hdev,
+ enum pci_protocol_version_t version[],
+ int num_version)
{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct pci_version_request *version_req;
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
@@ -2081,11 +3240,11 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- version_req = (struct pci_version_request *)&pkt->message;
+ version_req = (struct pci_version_request *)(pkt + 1);
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
- for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
- version_req->protocol_version = pci_protocol_versions[i];
+ for (i = 0; i < num_version; i++) {
+ version_req->protocol_version = version[i];
ret = vmbus_sendpacket(hdev->channel, version_req,
sizeof(struct pci_version_request),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
@@ -2101,10 +3260,10 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
}
if (comp_pkt.completion_status >= 0) {
- pci_protocol_version = pci_protocol_versions[i];
+ hbus->protocol_version = version[i];
dev_info(&hdev->device,
"PCI VMBus probing: Using version %#x\n",
- pci_protocol_version);
+ hbus->protocol_version);
goto exit;
}
@@ -2199,8 +3358,7 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
/* Modify this resource to become a bridge window. */
hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
- pci_add_resource(&hbus->resources_for_children,
- hbus->low_mmio_res);
+ pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
}
if (hbus->high_mmio_space) {
@@ -2219,8 +3377,7 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
/* Modify this resource to become a bridge window. */
hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
- pci_add_resource(&hbus->resources_for_children,
- hbus->high_mmio_res);
+ pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
}
return 0;
@@ -2274,6 +3431,8 @@ static void hv_free_config_window(struct hv_pcibus_device *hbus)
vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
}
+static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
+
/**
* hv_pci_enter_d0() - Bring the "bus" into the D0 power state
* @hdev: VMBus's tracking struct for this root PCI bus
@@ -2286,8 +3445,10 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
struct pci_bus_d0_entry *d0_entry;
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
+ bool retry = true;
int ret;
+enter_d0_retry:
/*
* Tell the host that the bus is ready to use, and moved into the
* powered-on state. This includes telling the host which region
@@ -2301,7 +3462,7 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
+ d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
d0_entry->message_type.type = PCI_BUS_D0ENTRY;
d0_entry->mmio_base = hbus->mem_config->start;
@@ -2314,6 +3475,38 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
if (ret)
goto exit;
+ /*
+ * In certain case (Kdump) the pci device of interest was
+ * not cleanly shut down and resource is still held on host
+ * side, the host could return invalid device status.
+ * We need to explicitly request host to release the resource
+ * and try to enter D0 again.
+ */
+ if (comp_pkt.completion_status < 0 && retry) {
+ retry = false;
+
+ dev_err(&hdev->device, "Retrying D0 Entry\n");
+
+ /*
+ * Hv_pci_bus_exit() calls hv_send_resource_released()
+ * to free up resources of its child devices.
+ * In the kdump kernel we need to set the
+ * wslot_res_allocated to 255 so it scans all child
+ * devices to release resources allocated in the
+ * normal kernel before panic happened.
+ */
+ hbus->wslot_res_allocated = 255;
+
+ ret = hv_pci_bus_exit(hdev, true);
+
+ if (ret == 0) {
+ kfree(pkt);
+ goto enter_d0_retry;
+ }
+ dev_err(&hdev->device,
+ "Retrying D0 failed with ret %d\n", ret);
+ }
+
if (comp_pkt.completion_status < 0) {
dev_err(&hdev->device,
"PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -2356,6 +3549,24 @@ static int hv_pci_query_relations(struct hv_device *hdev)
if (!ret)
ret = wait_for_response(hdev, &comp);
+ /*
+ * In the case of fast device addition/removal, it's possible that
+ * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
+ * already got a PCI_BUS_RELATIONS* message from the host and the
+ * channel callback already scheduled a work to hbus->wq, which can be
+ * running pci_devices_present_work() -> survey_child_resources() ->
+ * complete(&hbus->survey_event), even after hv_pci_query_relations()
+ * exits and the stack variable 'comp' is no longer valid; as a result,
+ * a hang or a page fault may happen when the complete() calls
+ * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
+ * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
+ * -ENODEV, there can't be any more work item scheduled to hbus->wq
+ * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
+ * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
+ * channel->rescind = true.
+ */
+ flush_workqueue(hbus->wq);
+
return ret;
}
@@ -2385,10 +3596,10 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
struct hv_pci_dev *hpdev;
struct pci_packet *pkt;
size_t size_res;
- u32 wslot;
+ int wslot;
int ret;
- size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
+ size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
? sizeof(*res_assigned) : sizeof(*res_assigned2);
pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
@@ -2407,22 +3618,22 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
+ if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
res_assigned =
- (struct pci_resources_assigned *)&pkt->message;
+ (struct pci_resources_assigned *)(pkt + 1);
res_assigned->message_type.type =
PCI_RESOURCES_ASSIGNED;
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
} else {
res_assigned2 =
- (struct pci_resources_assigned2 *)&pkt->message;
+ (struct pci_resources_assigned2 *)(pkt + 1);
res_assigned2->message_type.type =
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
put_pcichild(hpdev);
- ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+ ret = vmbus_sendpacket(hdev->channel, pkt + 1,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -2438,6 +3649,8 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
comp_pkt.completion_status);
break;
}
+
+ hbus->wslot_res_allocated = wslot;
}
kfree(pkt);
@@ -2456,10 +3669,10 @@ static int hv_send_resources_released(struct hv_device *hdev)
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct pci_child_message pkt;
struct hv_pci_dev *hpdev;
- u32 wslot;
+ int wslot;
int ret;
- for (wslot = 0; wslot < 256; wslot++) {
+ for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
hpdev = get_pcichild_wslot(hbus, wslot);
if (!hpdev)
continue;
@@ -2474,20 +3687,13 @@ static int hv_send_resources_released(struct hv_device *hdev)
VM_PKT_DATA_INBAND, 0);
if (ret)
return ret;
- }
- return 0;
-}
+ hbus->wslot_res_allocated = wslot - 1;
+ }
-static void get_hvpcibus(struct hv_pcibus_device *hbus)
-{
- refcount_inc(&hbus->remove_lock);
-}
+ hbus->wslot_res_allocated = -1;
-static void put_hvpcibus(struct hv_pcibus_device *hbus)
-{
- if (refcount_dec_and_test(&hbus->remove_lock))
- complete(&hbus->remove_event);
+ return 0;
}
/**
@@ -2500,51 +3706,88 @@ static void put_hvpcibus(struct hv_pcibus_device *hbus)
static int hv_pci_probe(struct hv_device *hdev,
const struct hv_vmbus_device_id *dev_id)
{
+ struct pci_host_bridge *bridge;
struct hv_pcibus_device *hbus;
- int ret;
+ int ret, dom;
+ u16 dom_req;
+ char *name;
- /*
- * hv_pcibus_device contains the hypercall arguments for retargeting in
- * hv_irq_unmask(). Those must not cross a page boundary.
- */
- BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
+ bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
+ if (!bridge)
+ return -ENOMEM;
- hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
+ hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
if (!hbus)
return -ENOMEM;
+
+ hbus->bridge = bridge;
+ mutex_init(&hbus->state_lock);
hbus->state = hv_pcibus_init;
+ hbus->wslot_res_allocated = -1;
+
+ /*
+ * The PCI bus "domain" is what is called "segment" in ACPI and other
+ * specs. Pull it from the instance ID, to get something usually
+ * unique. In rare cases of collision, we will find out another number
+ * not in use.
+ *
+ * Note that, since this code only runs in a Hyper-V VM, Hyper-V
+ * together with this guest driver can guarantee that (1) The only
+ * domain used by Gen1 VMs for something that looks like a physical
+ * PCI bus (which is actually emulated by the hypervisor) is domain 0.
+ * (2) There will be no overlap between domains (after fixing possible
+ * collisions) in the same VM.
+ *
+ * Because Gen1 VMs use domain 0, don't allow picking domain 0 here,
+ * even if bytes 4 and 5 of the instance GUID are both zero. For wider
+ * userspace compatibility, limit the domain ID to a 16-bit value.
+ */
+ dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
+ dom = pci_bus_find_emul_domain_nr(dom_req, 1, U16_MAX);
+ if (dom < 0) {
+ dev_err(&hdev->device,
+ "Unable to use dom# 0x%x or other numbers", dom_req);
+ ret = -EINVAL;
+ goto free_bus;
+ }
+ if (dom != dom_req)
+ dev_info(&hdev->device,
+ "PCI dom# 0x%x has collision, using 0x%x",
+ dom_req, dom);
+
+ hbus->bridge->domain_nr = dom;
+#ifdef CONFIG_X86
+ hbus->sysdata.domain = dom;
+ hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
+#elif defined(CONFIG_ARM64)
/*
- * The PCI bus "domain" is what is called "segment" in ACPI and
- * other specs. Pull it from the instance ID, to get something
- * unique. Bytes 8 and 9 are what is used in Windows guests, so
- * do the same thing for consistency. Note that, since this code
- * only runs in a Hyper-V VM, Hyper-V can (and does) guarantee
- * that (1) the only domain in use for something that looks like
- * a physical PCI bus (which is actually emulated by the
- * hypervisor) is domain 0 and (2) there will be no overlap
- * between domains derived from these instance IDs in the same
- * VM.
+ * Set the PCI bus parent to be the corresponding VMbus
+ * device. Then the VMbus device will be assigned as the
+ * ACPI companion in pcibios_root_bridge_prepare() and
+ * pci_dma_configure() will propagate device coherence
+ * information to devices created on the bus.
*/
- hbus->sysdata.domain = hdev->dev_instance.b[9] |
- hdev->dev_instance.b[8] << 8;
+ hbus->sysdata.parent = hdev->device.parent;
+ hbus->use_calls = false;
+#endif
hbus->hdev = hdev;
- refcount_set(&hbus->remove_lock, 1);
INIT_LIST_HEAD(&hbus->children);
INIT_LIST_HEAD(&hbus->dr_list);
- INIT_LIST_HEAD(&hbus->resources_for_children);
spin_lock_init(&hbus->config_lock);
spin_lock_init(&hbus->device_list_lock);
- spin_lock_init(&hbus->retarget_msi_interrupt_lock);
- init_completion(&hbus->remove_event);
hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
- hbus->sysdata.domain);
+ hbus->bridge->domain_nr);
if (!hbus->wq) {
ret = -ENOMEM;
- goto free_bus;
+ goto free_dom;
}
+ hdev->channel->next_request_id_callback = vmbus_next_request_id;
+ hdev->channel->request_addr_callback = vmbus_request_addr;
+ hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
+
ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
hv_pci_onchannelcallback, hbus);
if (ret)
@@ -2552,7 +3795,8 @@ static int hv_pci_probe(struct hv_device *hdev,
hv_set_drvdata(hdev, hbus);
- ret = hv_pci_protocol_negotiation(hdev);
+ ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
+ ARRAY_SIZE(pci_protocol_versions));
if (ret)
goto close;
@@ -2569,8 +3813,15 @@ static int hv_pci_probe(struct hv_device *hdev,
goto free_config;
}
- hbus->sysdata.fwnode = irq_domain_alloc_fwnode(hbus);
- if (!hbus->sysdata.fwnode) {
+ name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
+ if (!name) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ hbus->fwnode = irq_domain_alloc_named_fwnode(name);
+ kfree(name);
+ if (!hbus->fwnode) {
ret = -ENOMEM;
goto unmap;
}
@@ -2583,13 +3834,15 @@ static int hv_pci_probe(struct hv_device *hdev,
if (ret)
goto free_irq_domain;
+ mutex_lock(&hbus->state_lock);
+
ret = hv_pci_enter_d0(hdev);
if (ret)
- goto free_irq_domain;
+ goto release_state_lock;
ret = hv_pci_allocate_bridge_windows(hbus);
if (ret)
- goto free_irq_domain;
+ goto exit_d0;
ret = hv_send_resources_allocated(hdev);
if (ret)
@@ -2603,14 +3856,19 @@ static int hv_pci_probe(struct hv_device *hdev,
if (ret)
goto free_windows;
+ mutex_unlock(&hbus->state_lock);
return 0;
free_windows:
hv_pci_free_bridge_windows(hbus);
+exit_d0:
+ (void) hv_pci_bus_exit(hdev, true);
+release_state_lock:
+ mutex_unlock(&hbus->state_lock);
free_irq_domain:
irq_domain_remove(hbus->irq_domain);
free_fwnode:
- irq_domain_free_fwnode(hbus->sysdata.fwnode);
+ irq_domain_free_fwnode(hbus->fwnode);
unmap:
iounmap(hbus->cfg_addr);
free_config:
@@ -2619,88 +3877,262 @@ close:
vmbus_close(hdev->channel);
destroy_wq:
destroy_workqueue(hbus->wq);
+free_dom:
+ pci_bus_release_emul_domain_nr(hbus->bridge->domain_nr);
free_bus:
- free_page((unsigned long)hbus);
+ kfree(hbus);
return ret;
}
-static void hv_pci_bus_exit(struct hv_device *hdev)
+static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
{
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct vmbus_channel *chan = hdev->channel;
struct {
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
} pkt;
- struct pci_bus_relations relations;
+ struct pci_message *msg;
struct hv_pci_compl comp_pkt;
+ struct hv_pci_dev *hpdev, *tmp;
+ unsigned long flags;
+ u64 trans_id;
int ret;
/*
* After the host sends the RESCIND_CHANNEL message, it doesn't
* access the per-channel ringbuffer any longer.
*/
- if (hdev->channel->rescind)
- return;
+ if (chan->rescind)
+ return 0;
+
+ if (!keep_devs) {
+ struct list_head removed;
+
+ /* Move all present children to the list on stack */
+ INIT_LIST_HEAD(&removed);
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
+ list_move_tail(&hpdev->list_entry, &removed);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
- /* Delete any children which might still exist. */
- memset(&relations, 0, sizeof(relations));
- hv_pci_devices_present(hbus, &relations);
+ /* Remove all children in the list */
+ list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
+ list_del(&hpdev->list_entry);
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+ /* For the two refs got in new_pcichild_device() */
+ put_pcichild(hpdev);
+ put_pcichild(hpdev);
+ }
+ }
ret = hv_send_resources_released(hdev);
- if (ret)
+ if (ret) {
dev_err(&hdev->device,
"Couldn't send resources released packet(s)\n");
+ return ret;
+ }
memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt.teardown_packet.compl_ctxt = &comp_pkt;
- pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
+ msg = (struct pci_message *)pkt.buffer;
+ msg->type = PCI_BUS_D0EXIT;
- ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
- sizeof(struct pci_message),
- (unsigned long)&pkt.teardown_packet,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (!ret)
- wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+ ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
+ (unsigned long)&pkt.teardown_packet,
+ &trans_id, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ return ret;
+
+ if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
+ /*
+ * The completion packet on the stack becomes invalid after
+ * 'return'; remove the ID from the VMbus requestor if the
+ * identifier is still mapped to/associated with the packet.
+ *
+ * Cf. hv_pci_onchannelcallback().
+ */
+ vmbus_request_addr_match(chan, trans_id,
+ (unsigned long)&pkt.teardown_packet);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
/**
* hv_pci_remove() - Remove routine for this VMBus channel
* @hdev: VMBus's tracking struct for this root PCI bus
- *
- * Return: 0 on success, -errno on failure
*/
-static int hv_pci_remove(struct hv_device *hdev)
+static void hv_pci_remove(struct hv_device *hdev)
{
struct hv_pcibus_device *hbus;
hbus = hv_get_drvdata(hdev);
if (hbus->state == hv_pcibus_installed) {
+ tasklet_disable(&hdev->channel->callback_event);
+ hbus->state = hv_pcibus_removing;
+ tasklet_enable(&hdev->channel->callback_event);
+ destroy_workqueue(hbus->wq);
+ hbus->wq = NULL;
+ /*
+ * At this point, no work is running or can be scheduled
+ * on hbus-wq. We can't race with hv_pci_devices_present()
+ * or hv_pci_eject_device(), it's safe to proceed.
+ */
+
/* Remove the bus from PCI's point of view. */
pci_lock_rescan_remove();
- pci_stop_root_bus(hbus->pci_bus);
- pci_remove_root_bus(hbus->pci_bus);
+ pci_stop_root_bus(hbus->bridge->bus);
+ hv_pci_remove_slots(hbus);
+ pci_remove_root_bus(hbus->bridge->bus);
pci_unlock_rescan_remove();
- hbus->state = hv_pcibus_removed;
}
- hv_pci_bus_exit(hdev);
+ hv_pci_bus_exit(hdev, false);
vmbus_close(hdev->channel);
iounmap(hbus->cfg_addr);
hv_free_config_window(hbus);
- pci_free_resource_list(&hbus->resources_for_children);
hv_pci_free_bridge_windows(hbus);
irq_domain_remove(hbus->irq_domain);
- irq_domain_free_fwnode(hbus->sysdata.fwnode);
- put_hvpcibus(hbus);
- wait_for_completion(&hbus->remove_event);
- destroy_workqueue(hbus->wq);
- free_page((unsigned long)hbus);
+ irq_domain_free_fwnode(hbus->fwnode);
+
+ kfree(hbus);
+}
+
+static int hv_pci_suspend(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ enum hv_pcibus_state old_state;
+ int ret;
+
+ /*
+ * hv_pci_suspend() must make sure there are no pending work items
+ * before calling vmbus_close(), since it runs in a process context
+ * as a callback in dpm_suspend(). When it starts to run, the channel
+ * callback hv_pci_onchannelcallback(), which runs in a tasklet
+ * context, can be still running concurrently and scheduling new work
+ * items onto hbus->wq in hv_pci_devices_present() and
+ * hv_pci_eject_device(), and the work item handlers can access the
+ * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
+ * the work item handler pci_devices_present_work() ->
+ * new_pcichild_device() writes to the vmbus channel.
+ *
+ * To eliminate the race, hv_pci_suspend() disables the channel
+ * callback tasklet, sets hbus->state to hv_pcibus_removing, and
+ * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
+ * it knows that no new work item can be scheduled, and then it flushes
+ * hbus->wq and safely closes the vmbus channel.
+ */
+ tasklet_disable(&hdev->channel->callback_event);
+
+ /* Change the hbus state to prevent new work items. */
+ old_state = hbus->state;
+ if (hbus->state == hv_pcibus_installed)
+ hbus->state = hv_pcibus_removing;
+
+ tasklet_enable(&hdev->channel->callback_event);
+
+ if (old_state != hv_pcibus_installed)
+ return -EINVAL;
+
+ flush_workqueue(hbus->wq);
+
+ ret = hv_pci_bus_exit(hdev, true);
+ if (ret)
+ return ret;
+
+ vmbus_close(hdev->channel);
+
+ return 0;
+}
+
+static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
+{
+ struct irq_data *irq_data;
+ struct msi_desc *entry;
+
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ return 0;
+
+ guard(msi_descs_lock)(&pdev->dev);
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
+ irq_data = irq_get_irq_data(entry->irq);
+ if (WARN_ON_ONCE(!irq_data))
+ return -EINVAL;
+ hv_compose_msi_msg(irq_data, &entry->msg);
+ }
+ return 0;
+}
+
+/*
+ * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
+ * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
+ * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
+ * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
+ * Table entries.
+ */
+static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
+{
+ pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
+}
+
+static int hv_pci_resume(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ enum pci_protocol_version_t version[1];
+ int ret;
+
+ hbus->state = hv_pcibus_init;
+
+ hdev->channel->next_request_id_callback = vmbus_next_request_id;
+ hdev->channel->request_addr_callback = vmbus_request_addr;
+ hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
+
+ ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
+ hv_pci_onchannelcallback, hbus);
+ if (ret)
+ return ret;
+
+ /* Only use the version that was in use before hibernation. */
+ version[0] = hbus->protocol_version;
+ ret = hv_pci_protocol_negotiation(hdev, version, 1);
+ if (ret)
+ goto out;
+
+ ret = hv_pci_query_relations(hdev);
+ if (ret)
+ goto out;
+
+ mutex_lock(&hbus->state_lock);
+
+ ret = hv_pci_enter_d0(hdev);
+ if (ret)
+ goto release_state_lock;
+
+ ret = hv_send_resources_allocated(hdev);
+ if (ret)
+ goto release_state_lock;
+
+ prepopulate_bars(hbus);
+
+ hv_pci_restore_msi_state(hbus);
+
+ hbus->state = hv_pcibus_installed;
+ mutex_unlock(&hbus->state_lock);
return 0;
+
+release_state_lock:
+ mutex_unlock(&hbus->state_lock);
+out:
+ vmbus_close(hdev->channel);
+ return ret;
}
static const struct hv_vmbus_device_id hv_pci_id_table[] = {
@@ -2717,15 +4149,38 @@ static struct hv_driver hv_pci_drv = {
.id_table = hv_pci_id_table,
.probe = hv_pci_probe,
.remove = hv_pci_remove,
+ .suspend = hv_pci_suspend,
+ .resume = hv_pci_resume,
};
static void __exit exit_hv_pci_drv(void)
{
vmbus_driver_unregister(&hv_pci_drv);
+
+ hvpci_block_ops.read_block = NULL;
+ hvpci_block_ops.write_block = NULL;
+ hvpci_block_ops.reg_blk_invalidate = NULL;
}
static int __init init_hv_pci_drv(void)
{
+ int ret;
+
+ if (!hv_is_hyperv_initialized())
+ return -ENODEV;
+
+ if (hv_root_partition() && !hv_nested)
+ return -ENODEV;
+
+ ret = hv_pci_irqchip_init();
+ if (ret)
+ return ret;
+
+ /* Initialize PCI block r/w interface */
+ hvpci_block_ops.read_block = hv_read_config_block;
+ hvpci_block_ops.write_block = hv_write_config_block;
+ hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
+
return vmbus_driver_register(&hv_pci_drv);
}