diff options
Diffstat (limited to 'drivers/dma/idxd')
-rw-r--r-- | drivers/dma/idxd/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/idxd/bus.c | 6 | ||||
-rw-r--r-- | drivers/dma/idxd/cdev.c | 122 | ||||
-rw-r--r-- | drivers/dma/idxd/compat.c | 3 | ||||
-rw-r--r-- | drivers/dma/idxd/debugfs.c | 4 | ||||
-rw-r--r-- | drivers/dma/idxd/device.c | 22 | ||||
-rw-r--r-- | drivers/dma/idxd/dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/idxd/idxd.h | 29 | ||||
-rw-r--r-- | drivers/dma/idxd/init.c | 698 | ||||
-rw-r--r-- | drivers/dma/idxd/irq.c | 93 | ||||
-rw-r--r-- | drivers/dma/idxd/perfmon.c | 105 | ||||
-rw-r--r-- | drivers/dma/idxd/registers.h | 6 | ||||
-rw-r--r-- | drivers/dma/idxd/submit.c | 8 | ||||
-rw-r--r-- | drivers/dma/idxd/sysfs.c | 39 |
14 files changed, 809 insertions, 330 deletions
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile index 2b4a0d406e1e..9ff9d7b87b64 100644 --- a/drivers/dma/idxd/Makefile +++ b/drivers/dma/idxd/Makefile @@ -1,4 +1,4 @@ -ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD +ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"IDXD"' obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o idxd_bus-y := bus.o diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c index b83b27e04f2a..e647a684485d 100644 --- a/drivers/dma/idxd/bus.c +++ b/drivers/dma/idxd/bus.c @@ -33,10 +33,10 @@ void idxd_driver_unregister(struct idxd_device_driver *idxd_drv) EXPORT_SYMBOL_GPL(idxd_driver_unregister); static int idxd_config_bus_match(struct device *dev, - struct device_driver *drv) + const struct device_driver *drv) { - struct idxd_device_driver *idxd_drv = - container_of(drv, struct idxd_device_driver, drv); + const struct idxd_device_driver *idxd_drv = + container_of_const(drv, struct idxd_device_driver, drv); struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); int i = 0; diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 8078ab9acfbc..7e4715f92773 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -28,7 +28,6 @@ struct idxd_cdev_context { * global to avoid conflict file names. */ static DEFINE_IDA(file_ida); -static DEFINE_MUTEX(ida_lock); /* * ictx is an array based off of accelerator types. enum idxd_type @@ -123,9 +122,7 @@ static void idxd_file_dev_release(struct device *dev) struct idxd_device *idxd = wq->idxd; int rc; - mutex_lock(&ida_lock); ida_free(&file_ida, ctx->id); - mutex_unlock(&ida_lock); /* Wait for in-flight operations to complete. */ if (wq_shared(wq)) { @@ -225,7 +222,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) struct idxd_wq *wq; struct device *dev, *fdev; int rc = 0; - struct iommu_sva *sva; + struct iommu_sva *sva = NULL; unsigned int pasid; struct idxd_cdev *idxd_cdev; @@ -284,9 +281,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) } idxd_cdev = wq->idxd_cdev; - mutex_lock(&ida_lock); ctx->id = ida_alloc(&file_ida, GFP_KERNEL); - mutex_unlock(&ida_lock); if (ctx->id < 0) { dev_warn(dev, "ida alloc failure\n"); goto failed_ida; @@ -322,7 +317,7 @@ failed_set_pasid: if (device_user_pasid_enabled(idxd)) idxd_xa_pasid_remove(ctx); failed_get_pasid: - if (device_user_pasid_enabled(idxd)) + if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva)) iommu_sva_unbind_device(sva); failed: mutex_unlock(&wq->wq_lock); @@ -342,7 +337,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) if (!evl) return; - spin_lock(&evl->lock); + mutex_lock(&evl->lock); status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); t = status.tail; h = status.head; @@ -354,9 +349,10 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) set_bit(h, evl->bmap); h = (h + 1) % size; } - spin_unlock(&evl->lock); + if (wq->wq) + drain_workqueue(wq->wq); - drain_workqueue(wq->wq); + mutex_unlock(&evl->lock); } static int idxd_cdev_release(struct inode *node, struct file *filep) @@ -401,6 +397,21 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) int rc; dev_dbg(&pdev->dev, "%s called\n", __func__); + + /* + * Due to an erratum in some of the devices supported by the driver, + * direct user submission to the device can be unsafe. + * (See the INTEL-SA-01084 security advisory) + * + * For the devices that exhibit this behavior, require that the user + * has CAP_SYS_RAWIO capabilities. + */ + if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) + return -EPERM; + + if (current->mm != ctx->mm) + return -EPERM; + rc = check_vma(wq, vma, __func__); if (rc < 0) return rc; @@ -415,6 +426,75 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_page_prot); } +static int idxd_submit_user_descriptor(struct idxd_user_context *ctx, + struct dsa_hw_desc __user *udesc) +{ + struct idxd_wq *wq = ctx->wq; + struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev; + const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40; + void __iomem *portal = idxd_wq_portal_addr(wq); + struct dsa_hw_desc descriptor __aligned(64); + int rc; + + rc = copy_from_user(&descriptor, udesc, sizeof(descriptor)); + if (rc) + return -EFAULT; + + /* + * DSA devices are capable of indirect ("batch") command submission. + * On devices where direct user submissions are not safe, we cannot + * allow this since there is no good way for us to verify these + * indirect commands. Narrow the restriction of operations with the + * BATCH opcode to only DSA version 1 devices. + */ + if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH && + wq->idxd->hw.version == DEVICE_VERSION_1 && + !wq->idxd->user_submission_safe) + return -EINVAL; + /* + * As per the programming specification, the completion address must be + * aligned to 32 or 64 bytes. If this is violated the hardware + * engine can get very confused (security issue). + */ + if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align)) + return -EINVAL; + + if (wq_dedicated(wq)) + iosubmit_cmds512(portal, &descriptor, 1); + else { + descriptor.priv = 0; + descriptor.pasid = ctx->pasid; + rc = idxd_enqcmds(wq, portal, &descriptor); + if (rc < 0) + return rc; + } + + return 0; +} + +static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len, + loff_t *unused) +{ + struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf; + struct idxd_user_context *ctx = filp->private_data; + ssize_t written = 0; + int i; + + if (current->mm != ctx->mm) + return -EPERM; + + for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { + int rc = idxd_submit_user_descriptor(ctx, udesc + i); + + if (rc) + return written ? written : rc; + + written += sizeof(struct dsa_hw_desc); + } + + return written; +} + static __poll_t idxd_cdev_poll(struct file *filp, struct poll_table_struct *wait) { @@ -423,6 +503,9 @@ static __poll_t idxd_cdev_poll(struct file *filp, struct idxd_device *idxd = wq->idxd; __poll_t out = 0; + if (current->mm != ctx->mm) + return POLLNVAL; + poll_wait(filp, &wq->err_queue, wait); spin_lock(&idxd->dev_lock); if (idxd->sw_err.valid) @@ -437,6 +520,7 @@ static const struct file_operations idxd_cdev_fops = { .open = idxd_cdev_open, .release = idxd_cdev_release, .mmap = idxd_cdev_mmap, + .write = idxd_cdev_write, .poll = idxd_cdev_poll, }; @@ -501,7 +585,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq) struct idxd_cdev *idxd_cdev; idxd_cdev = wq->idxd_cdev; - ida_destroy(&file_ida); wq->idxd_cdev = NULL; cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev)); put_device(cdev_dev(idxd_cdev)); @@ -517,6 +600,14 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) if (idxd->state != IDXD_DEV_ENABLED) return -ENXIO; + mutex_lock(&wq->wq_lock); + + if (!idxd_wq_driver_name_match(wq, dev)) { + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; + rc = -ENODEV; + goto wq_err; + } + /* * User type WQ is enabled only when SVA is enabled for two reasons: * - If no IOMMU or IOMMU Passthrough without SVA, userspace @@ -532,14 +623,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) dev_dbg(&idxd->pdev->dev, "User type WQ cannot be enabled without SVA.\n"); - return -EOPNOTSUPP; - } - - mutex_lock(&wq->wq_lock); - - if (!idxd_wq_driver_name_match(wq, dev)) { - idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; - rc = -ENODEV; + rc = -EOPNOTSUPP; goto wq_err; } diff --git a/drivers/dma/idxd/compat.c b/drivers/dma/idxd/compat.c index 5fd38d1b9d28..eff9943f1a42 100644 --- a/drivers/dma/idxd/compat.c +++ b/drivers/dma/idxd/compat.c @@ -7,7 +7,6 @@ #include <linux/device/bus.h> #include "idxd.h" -extern int device_driver_attach(struct device_driver *drv, struct device *dev); extern void device_driver_detach(struct device *dev); #define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ @@ -104,4 +103,4 @@ struct idxd_device_driver dsa_drv = { }; module_idxd_driver(dsa_drv); -MODULE_IMPORT_NS(IDXD); +MODULE_IMPORT_NS("IDXD"); diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c index f3f25ee676f3..ad4245cb301d 100644 --- a/drivers/dma/idxd/debugfs.c +++ b/drivers/dma/idxd/debugfs.c @@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d) if (!evl || !evl->log) return 0; - spin_lock(&evl->lock); + mutex_lock(&evl->lock); evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); t = evl_status.tail; @@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d) dump_event_entry(idxd, s, i, &count, processed); } - spin_unlock(&evl->lock); + mutex_unlock(&evl->lock); return 0; } diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index ecfdf4a8f1f8..5cf419fe6b46 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -161,7 +161,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq) free_hw_descs(wq); return rc; } -EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, IDXD); +EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, "IDXD"); void idxd_wq_free_resources(struct idxd_wq *wq) { @@ -175,7 +175,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq) dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); sbitmap_queue_free(&wq->sbq); } -EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, IDXD); +EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, "IDXD"); int idxd_wq_enable(struct idxd_wq *wq) { @@ -407,7 +407,7 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq) reinit_completion(&wq->wq_resurrect); return 0; } -EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, IDXD); +EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, "IDXD"); void __idxd_wq_quiesce(struct idxd_wq *wq) { @@ -417,7 +417,7 @@ void __idxd_wq_quiesce(struct idxd_wq *wq) complete_all(&wq->wq_resurrect); wait_for_completion(&wq->wq_dead); } -EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, IDXD); +EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, "IDXD"); void idxd_wq_quiesce(struct idxd_wq *wq) { @@ -425,7 +425,7 @@ void idxd_wq_quiesce(struct idxd_wq *wq) __idxd_wq_quiesce(wq); mutex_unlock(&wq->wq_lock); } -EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, IDXD); +EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, "IDXD"); /* Device control bits */ static inline bool idxd_is_enabled(struct idxd_device *idxd) @@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd) goto err_alloc; } - spin_lock(&evl->lock); + mutex_lock(&evl->lock); evl->log = addr; evl->dma = dma_addr; evl->log_size = size; @@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd) gencfg.evl_en = 1; iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); - spin_unlock(&evl->lock); + mutex_unlock(&evl->lock); return 0; err_alloc: @@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd) if (!gencfg.evl_en) return; - spin_lock(&evl->lock); + mutex_lock(&evl->lock); gencfg.evl_en = 0; iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); @@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd) evl_dma = evl->dma; evl->log = NULL; evl->size = IDXD_EVL_SIZE_MIN; - spin_unlock(&evl->lock); + mutex_unlock(&evl->lock); dma_free_coherent(dev, evl_log_size, evl_log, evl_dma); } @@ -1494,7 +1494,7 @@ err_map_portal: err: return rc; } -EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, IDXD); +EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, "IDXD"); void idxd_drv_disable_wq(struct idxd_wq *wq) { @@ -1516,7 +1516,7 @@ void idxd_drv_disable_wq(struct idxd_wq *wq) wq->type = IDXD_WQT_NONE; wq->client_count = 0; } -EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, IDXD); +EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, "IDXD"); int idxd_device_drv_probe(struct idxd_dev *idxd_dev) { diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index cd835eabd31b..dbecd699237e 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -269,7 +269,7 @@ static int idxd_register_dma_channel(struct idxd_wq *wq) desc->txd.tx_submit = idxd_dma_tx_submit; } - rc = dma_async_device_channel_register(dma, chan); + rc = dma_async_device_channel_register(dma, chan, NULL); if (rc < 0) { kfree(idxd_chan); return rc; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index a4099a1e2340..74e6695881e6 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -19,7 +19,6 @@ #define IDXD_DRIVER_VERSION "1.00" -extern struct kmem_cache *idxd_desc_pool; extern bool tc_override; struct idxd_wq; @@ -124,7 +123,6 @@ struct idxd_pmu { struct pmu pmu; char name[IDXD_NAME_SIZE]; - int cpu; int n_counters; int counter_width; @@ -135,8 +133,6 @@ struct idxd_pmu { unsigned long supported_filters; int n_filters; - - struct hlist_node cpuhp_node; }; #define IDXD_MAX_PRIORITY 0xf @@ -174,7 +170,6 @@ struct idxd_cdev { #define DRIVER_NAME_SIZE 128 -#define IDXD_ALLOCATED_BATCH_SIZE 128U #define WQ_NAME_SIZE 1024 #define WQ_TYPE_SIZE 10 @@ -288,12 +283,13 @@ struct idxd_driver_data { int evl_cr_off; int cr_status_off; int cr_result_off; + bool user_submission_safe; load_device_defaults_fn_t load_device_defaults; }; struct idxd_evl { /* Lock to protect event log access. */ - spinlock_t lock; + struct mutex lock; void *log; dma_addr_t dma; /* Total size of event log = number of entries * entry size. */ @@ -374,6 +370,19 @@ struct idxd_device { struct dentry *dbgfs_dir; struct dentry *dbgfs_evl_file; + + bool user_submission_safe; + + struct idxd_saved_states *idxd_saved; +}; + +struct idxd_saved_states { + struct idxd_device saved_idxd; + struct idxd_evl saved_evl; + struct idxd_engine **saved_engines; + struct idxd_wq **saved_wqs; + struct idxd_group **saved_groups; + unsigned long *saved_wq_enable_map; }; static inline unsigned int evl_ent_size(struct idxd_device *idxd) @@ -725,8 +734,6 @@ static inline void idxd_desc_complete(struct idxd_desc *desc, &desc->txd, &status); } -int idxd_register_bus_type(void); -void idxd_unregister_bus_type(void); int idxd_register_devices(struct idxd_device *idxd); void idxd_unregister_devices(struct idxd_device *idxd); void idxd_wqs_quiesce(struct idxd_device *idxd); @@ -742,6 +749,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd); /* device control */ int idxd_device_drv_probe(struct idxd_dev *idxd_dev); +int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev, + const struct pci_device_id *id); void idxd_device_drv_remove(struct idxd_dev *idxd_dev); int idxd_drv_enable_wq(struct idxd_wq *wq); void idxd_drv_disable_wq(struct idxd_wq *wq); @@ -800,14 +809,10 @@ void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index); int perfmon_pmu_init(struct idxd_device *idxd); void perfmon_pmu_remove(struct idxd_device *idxd); void perfmon_counter_overflow(struct idxd_device *idxd); -void perfmon_init(void); -void perfmon_exit(void); #else static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; } static inline void perfmon_pmu_remove(struct idxd_device *idxd) {} static inline void perfmon_counter_overflow(struct idxd_device *idxd) {} -static inline void perfmon_init(void) {} -static inline void perfmon_exit(void) {} #endif /* debugfs */ diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 4954adc6bb60..80355d03004d 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -22,9 +22,10 @@ #include "perfmon.h" MODULE_VERSION(IDXD_DRIVER_VERSION); +MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); -MODULE_IMPORT_NS(IDXD); +MODULE_IMPORT_NS("IDXD"); static bool sva = true; module_param(sva, bool, 0644); @@ -47,6 +48,7 @@ static struct idxd_driver_data idxd_driver_data[] = { .align = 32, .dev_type = &dsa_device_type, .evl_cr_off = offsetof(struct dsa_evl_entry, cr), + .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ .cr_status_off = offsetof(struct dsa_completion_record, status), .cr_result_off = offsetof(struct dsa_completion_record, result), }, @@ -57,6 +59,7 @@ static struct idxd_driver_data idxd_driver_data[] = { .align = 64, .dev_type = &iax_device_type, .evl_cr_off = offsetof(struct iax_evl_entry, cr), + .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ .cr_status_off = offsetof(struct iax_completion_record, status), .cr_result_off = offsetof(struct iax_completion_record, error_code), .load_device_defaults = idxd_load_iaa_device_defaults, @@ -66,9 +69,17 @@ static struct idxd_driver_data idxd_driver_data[] = { static struct pci_device_id idxd_pci_tbl[] = { /* DSA ver 1.0 platforms */ { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, + /* DSA on GNR-D platforms */ + { PCI_DEVICE_DATA(INTEL, DSA_GNRD, &idxd_driver_data[IDXD_TYPE_DSA]) }, + /* DSA on DMR platforms */ + { PCI_DEVICE_DATA(INTEL, DSA_DMR, &idxd_driver_data[IDXD_TYPE_DSA]) }, /* IAX ver 1.0 platforms */ { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, + /* IAA on DMR platforms */ + { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) }, + /* IAA PTL platforms */ + { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) }, { 0, } }; MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); @@ -144,6 +155,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd) pci_free_irq_vectors(pdev); } +static void idxd_clean_wqs(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); + conf_dev = wq_confdev(wq); + put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + kfree(idxd->wqs); +} + static int idxd_setup_wqs(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -158,8 +188,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); if (!idxd->wq_enable_map) { - kfree(idxd->wqs); - return -ENOMEM; + rc = -ENOMEM; + goto err_bitmap; } for (i = 0; i < idxd->max_wqs; i++) { @@ -178,10 +208,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) conf_dev->bus = &dsa_bus_type; conf_dev->type = &idxd_wq_device_type; rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); - if (rc < 0) { - put_device(conf_dev); + if (rc < 0) goto err; - } mutex_init(&wq->wq_lock); init_waitqueue_head(&wq->err_queue); @@ -192,7 +220,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd) wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); if (!wq->wqcfg) { - put_device(conf_dev); rc = -ENOMEM; goto err; } @@ -200,9 +227,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) if (idxd->hw.wq_cap.op_config) { wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); if (!wq->opcap_bmap) { - put_device(conf_dev); rc = -ENOMEM; - goto err; + goto err_opcap_bmap; } bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } @@ -213,15 +239,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd) return 0; - err: +err_opcap_bmap: + kfree(wq->wqcfg); + +err: + put_device(conf_dev); + kfree(wq); + while (--i >= 0) { wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); conf_dev = wq_confdev(wq); put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + +err_bitmap: + kfree(idxd->wqs); + return rc; } +static void idxd_clean_engines(struct idxd_device *idxd) +{ + struct idxd_engine *engine; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_engines; i++) { + engine = idxd->engines[i]; + conf_dev = engine_confdev(engine); + put_device(conf_dev); + kfree(engine); + } + kfree(idxd->engines); +} + static int idxd_setup_engines(struct idxd_device *idxd) { struct idxd_engine *engine; @@ -252,6 +309,7 @@ static int idxd_setup_engines(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); if (rc < 0) { put_device(conf_dev); + kfree(engine); goto err; } @@ -265,10 +323,26 @@ static int idxd_setup_engines(struct idxd_device *idxd) engine = idxd->engines[i]; conf_dev = engine_confdev(engine); put_device(conf_dev); + kfree(engine); } + kfree(idxd->engines); + return rc; } +static void idxd_clean_groups(struct idxd_device *idxd) +{ + struct idxd_group *group; + int i; + + for (i = 0; i < idxd->max_groups; i++) { + group = idxd->groups[i]; + put_device(group_confdev(group)); + kfree(group); + } + kfree(idxd->groups); +} + static int idxd_setup_groups(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -299,6 +373,7 @@ static int idxd_setup_groups(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); if (rc < 0) { put_device(conf_dev); + kfree(group); goto err; } @@ -323,20 +398,18 @@ static int idxd_setup_groups(struct idxd_device *idxd) while (--i >= 0) { group = idxd->groups[i]; put_device(group_confdev(group)); + kfree(group); } + kfree(idxd->groups); + return rc; } static void idxd_cleanup_internals(struct idxd_device *idxd) { - int i; - - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_groups(idxd); + idxd_clean_engines(idxd); + idxd_clean_wqs(idxd); destroy_workqueue(idxd->wq); } @@ -354,7 +427,7 @@ static int idxd_init_evl(struct idxd_device *idxd) if (!evl) return -ENOMEM; - spin_lock_init(&evl->lock); + mutex_init(&evl->lock); evl->size = IDXD_EVL_SIZE_MIN; idxd_name = dev_name(idxd_confdev(idxd)); @@ -379,7 +452,7 @@ static int idxd_init_evl(struct idxd_device *idxd) static int idxd_setup_internals(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; - int rc, i; + int rc; init_waitqueue_head(&idxd->cmd_waitq); @@ -410,14 +483,11 @@ static int idxd_setup_internals(struct idxd_device *idxd) err_evl: destroy_workqueue(idxd->wq); err_wkq_create: - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); + idxd_clean_groups(idxd); err_group: - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); + idxd_clean_engines(idxd); err_engine: - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_wqs(idxd); err_wqs: return rc; } @@ -517,6 +587,17 @@ static void idxd_read_caps(struct idxd_device *idxd) idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); } +static void idxd_free(struct idxd_device *idxd) +{ + if (!idxd) + return; + + put_device(idxd_confdev(idxd)); + bitmap_free(idxd->opcap_bmap); + ida_free(&idxd_ida, idxd->id); + kfree(idxd); +} + static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) { struct device *dev = &pdev->dev; @@ -534,28 +615,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); if (idxd->id < 0) - return NULL; + goto err_ida; idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); - if (!idxd->opcap_bmap) { - ida_free(&idxd_ida, idxd->id); - return NULL; - } + if (!idxd->opcap_bmap) + goto err_opcap; device_initialize(conf_dev); conf_dev->parent = dev; conf_dev->bus = &dsa_bus_type; conf_dev->type = idxd->data->dev_type; rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); - if (rc < 0) { - put_device(conf_dev); - return NULL; - } + if (rc < 0) + goto err_name; spin_lock_init(&idxd->dev_lock); spin_lock_init(&idxd->cmd_lock); return idxd; + +err_name: + put_device(conf_dev); + bitmap_free(idxd->opcap_bmap); +err_opcap: + ida_free(&idxd_ida, idxd->id); +err_ida: + kfree(idxd); + + return NULL; } static int idxd_enable_system_pasid(struct idxd_device *idxd) @@ -582,7 +669,7 @@ static int idxd_enable_system_pasid(struct idxd_device *idxd) * DMA domain is owned by the driver, it should support all valid * types such as DMA-FQ, identity, etc. */ - ret = iommu_attach_device_pasid(domain, dev, pasid); + ret = iommu_attach_device_pasid(domain, dev, pasid, NULL); if (ret) { dev_err(dev, "failed to attach device pasid %d, domain type %d", pasid, domain->type); @@ -615,27 +702,6 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd) idxd->pasid = IOMMU_PASID_INVALID; } -static int idxd_enable_sva(struct pci_dev *pdev) -{ - int ret; - - ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); - if (ret) - return ret; - - ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); - if (ret) - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); - - return ret; -} - -static void idxd_disable_sva(struct pci_dev *pdev) -{ - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); -} - static int idxd_probe(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; @@ -650,17 +716,13 @@ static int idxd_probe(struct idxd_device *idxd) dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { - if (idxd_enable_sva(pdev)) { - dev_warn(dev, "Unable to turn on user SVA feature.\n"); - } else { - set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); + set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); - rc = idxd_enable_system_pasid(idxd); - if (rc) - dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); - else - set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); - } + rc = idxd_enable_system_pasid(idxd); + if (rc) + dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); + else + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); } else if (!sva) { dev_warn(dev, "User forced SVA off via module param.\n"); } @@ -698,8 +760,6 @@ static int idxd_probe(struct idxd_device *idxd) err: if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - if (device_user_pasid_enabled(idxd)) - idxd_disable_sva(pdev); return rc; } @@ -710,70 +770,466 @@ static void idxd_cleanup(struct idxd_device *idxd) idxd_cleanup_internals(idxd); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - if (device_user_pasid_enabled(idxd)) - idxd_disable_sva(idxd->pdev); } -static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +/* + * Attach IDXD device to IDXD driver. + */ +static int idxd_bind(struct device_driver *drv, const char *buf) { - struct device *dev = &pdev->dev; - struct idxd_device *idxd; - struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; + const struct bus_type *bus = drv->bus; + struct device *dev; + int err = -ENODEV; + + dev = bus_find_device_by_name(bus, NULL, buf); + if (dev) + err = device_driver_attach(drv, dev); + + put_device(dev); + + return err; +} + +/* + * Detach IDXD device from driver. + */ +static void idxd_unbind(struct device_driver *drv, const char *buf) +{ + const struct bus_type *bus = drv->bus; + struct device *dev; + + dev = bus_find_device_by_name(bus, NULL, buf); + if (dev && dev->driver == drv) + device_release_driver(dev); + + put_device(dev); +} + +#define idxd_free_saved_configs(saved_configs, count) \ + do { \ + int i; \ + \ + for (i = 0; i < (count); i++) \ + kfree(saved_configs[i]); \ + } while (0) + +static void idxd_free_saved(struct idxd_group **saved_groups, + struct idxd_engine **saved_engines, + struct idxd_wq **saved_wqs, + struct idxd_device *idxd) +{ + if (saved_groups) + idxd_free_saved_configs(saved_groups, idxd->max_groups); + if (saved_engines) + idxd_free_saved_configs(saved_engines, idxd->max_engines); + if (saved_wqs) + idxd_free_saved_configs(saved_wqs, idxd->max_wqs); +} + +/* + * Save IDXD device configurations including engines, groups, wqs etc. + * The saved configurations can be restored when needed. + */ +static int idxd_device_config_save(struct idxd_device *idxd, + struct idxd_saved_states *idxd_saved) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd)); + + if (idxd->evl) { + memcpy(&idxd_saved->saved_evl, idxd->evl, + sizeof(struct idxd_evl)); + } + + struct idxd_group **saved_groups __free(kfree) = + kcalloc_node(idxd->max_groups, + sizeof(struct idxd_group *), + GFP_KERNEL, dev_to_node(dev)); + if (!saved_groups) + return -ENOMEM; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *saved_group __free(kfree) = + kzalloc_node(sizeof(*saved_group), GFP_KERNEL, + dev_to_node(dev)); + + if (!saved_group) { + /* Free saved groups */ + idxd_free_saved(saved_groups, NULL, NULL, idxd); + + return -ENOMEM; + } + + memcpy(saved_group, idxd->groups[i], sizeof(*saved_group)); + saved_groups[i] = no_free_ptr(saved_group); + } + + struct idxd_engine **saved_engines = + kcalloc_node(idxd->max_engines, + sizeof(struct idxd_engine *), + GFP_KERNEL, dev_to_node(dev)); + if (!saved_engines) { + /* Free saved groups */ + idxd_free_saved(saved_groups, NULL, NULL, idxd); + + return -ENOMEM; + } + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *saved_engine __free(kfree) = + kzalloc_node(sizeof(*saved_engine), GFP_KERNEL, + dev_to_node(dev)); + if (!saved_engine) { + /* Free saved groups and engines */ + idxd_free_saved(saved_groups, saved_engines, NULL, + idxd); + + return -ENOMEM; + } + + memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine)); + saved_engines[i] = no_free_ptr(saved_engine); + } + + unsigned long *saved_wq_enable_map __free(bitmap) = + bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, + dev_to_node(dev)); + if (!saved_wq_enable_map) { + /* Free saved groups and engines */ + idxd_free_saved(saved_groups, saved_engines, NULL, idxd); + + return -ENOMEM; + } + + bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs); + + struct idxd_wq **saved_wqs __free(kfree) = + kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), + GFP_KERNEL, dev_to_node(dev)); + if (!saved_wqs) { + /* Free saved groups and engines */ + idxd_free_saved(saved_groups, saved_engines, NULL, idxd); + + return -ENOMEM; + } + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *saved_wq __free(kfree) = + kzalloc_node(sizeof(*saved_wq), GFP_KERNEL, + dev_to_node(dev)); + struct idxd_wq *wq; + + if (!saved_wq) { + /* Free saved groups, engines, and wqs */ + idxd_free_saved(saved_groups, saved_engines, saved_wqs, + idxd); + + return -ENOMEM; + } + + if (!test_bit(i, saved_wq_enable_map)) + continue; + + wq = idxd->wqs[i]; + mutex_lock(&wq->wq_lock); + memcpy(saved_wq, wq, sizeof(*saved_wq)); + saved_wqs[i] = no_free_ptr(saved_wq); + mutex_unlock(&wq->wq_lock); + } + + /* Save configurations */ + idxd_saved->saved_groups = no_free_ptr(saved_groups); + idxd_saved->saved_engines = no_free_ptr(saved_engines); + idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map); + idxd_saved->saved_wqs = no_free_ptr(saved_wqs); + + return 0; +} + +/* + * Restore IDXD device configurations including engines, groups, wqs etc + * that were saved before. + */ +static void idxd_device_config_restore(struct idxd_device *idxd, + struct idxd_saved_states *idxd_saved) +{ + struct idxd_evl *saved_evl = &idxd_saved->saved_evl; + int i; + + idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit; + + idxd->evl->size = saved_evl->size; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *saved_group, *group; + + saved_group = idxd_saved->saved_groups[i]; + group = idxd->groups[i]; + + group->rdbufs_allowed = saved_group->rdbufs_allowed; + group->rdbufs_reserved = saved_group->rdbufs_reserved; + group->tc_a = saved_group->tc_a; + group->tc_b = saved_group->tc_b; + group->use_rdbuf_limit = saved_group->use_rdbuf_limit; + + kfree(saved_group); + } + kfree(idxd_saved->saved_groups); + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *saved_engine, *engine; + + saved_engine = idxd_saved->saved_engines[i]; + engine = idxd->engines[i]; + + engine->group = saved_engine->group; + + kfree(saved_engine); + } + kfree(idxd_saved->saved_engines); + + bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map, + idxd->max_wqs); + bitmap_free(idxd_saved->saved_wq_enable_map); + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *saved_wq, *wq; + size_t len; + + if (!test_bit(i, idxd->wq_enable_map)) + continue; + + saved_wq = idxd_saved->saved_wqs[i]; + wq = idxd->wqs[i]; + + mutex_lock(&wq->wq_lock); + + wq->group = saved_wq->group; + wq->flags = saved_wq->flags; + wq->threshold = saved_wq->threshold; + wq->size = saved_wq->size; + wq->priority = saved_wq->priority; + wq->type = saved_wq->type; + len = strlen(saved_wq->name) + 1; + strscpy(wq->name, saved_wq->name, len); + wq->max_xfer_bytes = saved_wq->max_xfer_bytes; + wq->max_batch_size = saved_wq->max_batch_size; + wq->enqcmds_retries = saved_wq->enqcmds_retries; + wq->descs = saved_wq->descs; + wq->idxd_chan = saved_wq->idxd_chan; + len = strlen(saved_wq->driver_name) + 1; + strscpy(wq->driver_name, saved_wq->driver_name, len); + + mutex_unlock(&wq->wq_lock); + + kfree(saved_wq); + } + + kfree(idxd_saved->saved_wqs); +} + +static void idxd_reset_prepare(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + struct device *dev = &idxd->pdev->dev; + const char *idxd_name; int rc; - rc = pci_enable_device(pdev); - if (rc) - return rc; + dev = &idxd->pdev->dev; + idxd_name = dev_name(idxd_confdev(idxd)); - dev_dbg(dev, "Alloc IDXD context\n"); - idxd = idxd_alloc(pdev, data); - if (!idxd) { - rc = -ENOMEM; - goto err_idxd_alloc; + struct idxd_saved_states *idxd_saved __free(kfree) = + kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!idxd_saved) { + dev_err(dev, "HALT: no memory\n"); + + return; } - dev_dbg(dev, "Mapping BARs\n"); - idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); - if (!idxd->reg_base) { - rc = -ENOMEM; - goto err_iomap; + /* Save IDXD configurations. */ + rc = idxd_device_config_save(idxd, idxd_saved); + if (rc < 0) { + dev_err(dev, "HALT: cannot save %s configs\n", idxd_name); + + return; + } + + idxd->idxd_saved = no_free_ptr(idxd_saved); + + /* Save PCI device state. */ + pci_save_state(idxd->pdev); +} + +static void idxd_reset_done(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + const char *idxd_name; + struct device *dev; + int rc, i; + + if (!idxd->idxd_saved) + return; + + dev = &idxd->pdev->dev; + idxd_name = dev_name(idxd_confdev(idxd)); + + /* Restore PCI device state. */ + pci_restore_state(idxd->pdev); + + /* Unbind idxd device from driver. */ + idxd_unbind(&idxd_drv.drv, idxd_name); + + /* + * Probe PCI device without allocating or changing + * idxd software data which keeps the same as before FLR. + */ + idxd_pci_probe_alloc(idxd, NULL, NULL); + + /* Restore IDXD configurations. */ + idxd_device_config_restore(idxd, idxd->idxd_saved); + + /* Re-configure IDXD device if allowed. */ + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { + rc = idxd_device_config(idxd); + if (rc < 0) { + dev_err(dev, "HALT: %s config fails\n", idxd_name); + goto out; + } + } + + /* Bind IDXD device to driver. */ + rc = idxd_bind(&idxd_drv.drv, idxd_name); + if (rc < 0) { + dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name); + goto out; } - dev_dbg(dev, "Set DMA masks\n"); - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + /* Bind enabled wq in the IDXD device to driver. */ + for (i = 0; i < idxd->max_wqs; i++) { + if (test_bit(i, idxd->wq_enable_map)) { + struct idxd_wq *wq = idxd->wqs[i]; + char wq_name[32]; + + wq->state = IDXD_WQ_DISABLED; + sprintf(wq_name, "wq%d.%d", idxd->id, wq->id); + /* + * Bind to user driver depending on wq type. + * + * Currently only support user type WQ. Will support + * kernel type WQ in the future. + */ + if (wq->type == IDXD_WQT_USER) + rc = idxd_bind(&idxd_user_drv.drv, wq_name); + else + rc = -EINVAL; + if (rc < 0) { + clear_bit(i, idxd->wq_enable_map); + dev_err(dev, + "HALT: unable to re-enable wq %s\n", + dev_name(wq_confdev(wq))); + } + } + } +out: + kfree(idxd->idxd_saved); +} + +static const struct pci_error_handlers idxd_error_handler = { + .reset_prepare = idxd_reset_prepare, + .reset_done = idxd_reset_done, +}; + +/* + * Probe idxd PCI device. + * If idxd is not given, need to allocate idxd and set up its data. + * + * If idxd is given, idxd was allocated and setup already. Just need to + * configure device without re-allocating and re-configuring idxd data. + * This is useful for recovering from FLR. + */ +int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev, + const struct pci_device_id *id) +{ + bool alloc_idxd = idxd ? false : true; + struct idxd_driver_data *data; + struct device *dev; + int rc; + + pdev = idxd ? idxd->pdev : pdev; + dev = &pdev->dev; + data = id ? (struct idxd_driver_data *)id->driver_data : NULL; + rc = pci_enable_device(pdev); if (rc) - goto err; + return rc; + + if (alloc_idxd) { + dev_dbg(dev, "Alloc IDXD context\n"); + idxd = idxd_alloc(pdev, data); + if (!idxd) { + rc = -ENOMEM; + goto err_idxd_alloc; + } + + dev_dbg(dev, "Mapping BARs\n"); + idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); + if (!idxd->reg_base) { + rc = -ENOMEM; + goto err_iomap; + } + + dev_dbg(dev, "Set DMA masks\n"); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) + goto err; + } dev_dbg(dev, "Set PCI master\n"); pci_set_master(pdev); pci_set_drvdata(pdev, idxd); - idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); - rc = idxd_probe(idxd); - if (rc) { - dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); - goto err; - } + if (alloc_idxd) { + idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); + rc = idxd_probe(idxd); + if (rc) { + dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); + goto err; + } - if (data->load_device_defaults) { - rc = data->load_device_defaults(idxd); + if (data->load_device_defaults) { + rc = data->load_device_defaults(idxd); + if (rc) + dev_warn(dev, "IDXD loading device defaults failed\n"); + } + + rc = idxd_register_devices(idxd); + if (rc) { + dev_err(dev, "IDXD sysfs setup failed\n"); + goto err_dev_register; + } + + rc = idxd_device_init_debugfs(idxd); if (rc) - dev_warn(dev, "IDXD loading device defaults failed\n"); + dev_warn(dev, "IDXD debugfs failed to setup\n"); } - rc = idxd_register_devices(idxd); - if (rc) { - dev_err(dev, "IDXD sysfs setup failed\n"); - goto err_dev_register; - } + if (!alloc_idxd) { + /* Release interrupts in the IDXD device. */ + idxd_cleanup_interrupts(idxd); - rc = idxd_device_init_debugfs(idxd); - if (rc) - dev_warn(dev, "IDXD debugfs failed to setup\n"); + /* Re-enable interrupts in the IDXD device. */ + rc = idxd_setup_interrupts(idxd); + if (rc) + dev_warn(dev, "IDXD interrupts failed to setup\n"); + } dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", idxd->hw.version); + if (data) + idxd->user_submission_safe = data->user_submission_safe; + return 0; err_dev_register: @@ -781,12 +1237,17 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) err: pci_iounmap(pdev, idxd->reg_base); err_iomap: - put_device(idxd_confdev(idxd)); + idxd_free(idxd); err_idxd_alloc: pci_disable_device(pdev); return rc; } +static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return idxd_pci_probe_alloc(NULL, pdev, id); +} + void idxd_wqs_quiesce(struct idxd_device *idxd) { struct idxd_wq *wq; @@ -818,7 +1279,6 @@ static void idxd_shutdown(struct pci_dev *pdev) static void idxd_remove(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); - struct idxd_irq_entry *irq_entry; idxd_unregister_devices(idxd); /* @@ -831,20 +1291,12 @@ static void idxd_remove(struct pci_dev *pdev) get_device(idxd_confdev(idxd)); device_unregister(idxd_confdev(idxd)); idxd_shutdown(pdev); - if (device_pasid_enabled(idxd)) - idxd_disable_system_pasid(idxd); idxd_device_remove_debugfs(idxd); - - irq_entry = idxd_get_ie(idxd, 0); - free_irq(irq_entry->vector, irq_entry); - pci_free_irq_vectors(pdev); + idxd_cleanup(idxd); pci_iounmap(pdev, idxd->reg_base); - if (device_user_pasid_enabled(idxd)) - idxd_disable_sva(pdev); - pci_disable_device(pdev); - destroy_workqueue(idxd->wq); - perfmon_pmu_remove(idxd); put_device(idxd_confdev(idxd)); + idxd_free(idxd); + pci_disable_device(pdev); } static struct pci_driver idxd_pci_driver = { @@ -853,6 +1305,7 @@ static struct pci_driver idxd_pci_driver = { .probe = idxd_pci_probe, .remove = idxd_remove, .shutdown = idxd_shutdown, + .err_handler = &idxd_error_handler, }; static int __init idxd_init_module(void) @@ -873,8 +1326,6 @@ static int __init idxd_init_module(void) else support_enqcmd = true; - perfmon_init(); - err = idxd_driver_register(&idxd_drv); if (err < 0) goto err_idxd_driver_register; @@ -923,7 +1374,6 @@ static void __exit idxd_exit_module(void) idxd_driver_unregister(&idxd_drv); pci_unregister_driver(&idxd_pci_driver); idxd_cdev_remove(); - perfmon_exit(); idxd_remove_debugfs(); } module_exit(idxd_exit_module); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index 348aa21389a9..1107db3ce0a3 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd) evl_status.bits = 0; evl_status.int_pending = 1; - spin_lock(&evl->lock); + mutex_lock(&evl->lock); /* Clear interrupt pending bit */ iowrite32(evl_status.bits_upper32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32)); @@ -380,7 +380,59 @@ static void process_evl_entries(struct idxd_device *idxd) evl_status.head = h; iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET); - spin_unlock(&evl->lock); + mutex_unlock(&evl->lock); +} + +static void idxd_device_flr(struct work_struct *work) +{ + struct idxd_device *idxd = container_of(work, struct idxd_device, work); + int rc; + + /* + * IDXD device requires a Function Level Reset (FLR). + * pci_reset_function() will reset the device with FLR. + */ + rc = pci_reset_function(idxd->pdev); + if (rc) + dev_err(&idxd->pdev->dev, "FLR failed\n"); +} + +static irqreturn_t idxd_halt(struct idxd_device *idxd) +{ + union gensts_reg gensts; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + if (gensts.state == IDXD_DEVICE_STATE_HALT) { + idxd->state = IDXD_DEV_HALTED; + if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { + /* + * If we need a software reset, we will throw the work + * on a system workqueue in order to allow interrupts + * for the device command completions. + */ + INIT_WORK(&idxd->work, idxd_device_reinit); + queue_work(idxd->wq, &idxd->work); + } else if (gensts.reset_type == IDXD_DEVICE_RESET_FLR) { + idxd->state = IDXD_DEV_HALTED; + idxd_mask_error_interrupts(idxd); + dev_dbg(&idxd->pdev->dev, + "idxd halted, doing FLR. After FLR, configs are restored\n"); + INIT_WORK(&idxd->work, idxd_device_flr); + queue_work(idxd->wq, &idxd->work); + + } else { + idxd->state = IDXD_DEV_HALTED; + idxd_wqs_quiesce(idxd); + idxd_wqs_unmap_portal(idxd); + idxd_device_clear_state(idxd); + dev_err(&idxd->pdev->dev, + "idxd halted, need system reset"); + + return -ENXIO; + } + } + + return IRQ_HANDLED; } irqreturn_t idxd_misc_thread(int vec, void *data) @@ -388,10 +440,8 @@ irqreturn_t idxd_misc_thread(int vec, void *data) struct idxd_irq_entry *irq_entry = data; struct idxd_device *idxd = ie_to_idxd(irq_entry); struct device *dev = &idxd->pdev->dev; - union gensts_reg gensts; u32 val = 0; int i; - bool err = false; u32 cause; cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); @@ -401,7 +451,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data) iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); if (cause & IDXD_INTC_HALT_STATE) - goto halt; + return idxd_halt(idxd); if (cause & IDXD_INTC_ERR) { spin_lock(&idxd->dev_lock); @@ -435,7 +485,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data) for (i = 0; i < 4; i++) dev_warn_ratelimited(dev, "err[%d]: %#16.16llx\n", i, idxd->sw_err.bits[i]); - err = true; } if (cause & IDXD_INTC_INT_HANDLE_REVOKED) { @@ -480,34 +529,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data) dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", val); - if (!err) - goto out; - -halt: - gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); - if (gensts.state == IDXD_DEVICE_STATE_HALT) { - idxd->state = IDXD_DEV_HALTED; - if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { - /* - * If we need a software reset, we will throw the work - * on a system workqueue in order to allow interrupts - * for the device command completions. - */ - INIT_WORK(&idxd->work, idxd_device_reinit); - queue_work(idxd->wq, &idxd->work); - } else { - idxd->state = IDXD_DEV_HALTED; - idxd_wqs_quiesce(idxd); - idxd_wqs_unmap_portal(idxd); - idxd_device_clear_state(idxd); - dev_err(&idxd->pdev->dev, - "idxd halted, need %s.\n", - gensts.reset_type == IDXD_DEVICE_RESET_FLR ? - "FLR" : "system reset"); - } - } - -out: return IRQ_HANDLED; } @@ -611,11 +632,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry) spin_unlock(&irq_entry->list_lock); - list_for_each_entry(desc, &flist, list) { + list_for_each_entry_safe(desc, n, &flist, list) { /* * Check against the original status as ABORT is software defined * and 0xff, which DSA_COMP_STATUS_MASK can mask out. */ + list_del(&desc->list); + if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true); continue; diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c index fdda6d604262..4b6af2f15d8a 100644 --- a/drivers/dma/idxd/perfmon.c +++ b/drivers/dma/idxd/perfmon.c @@ -6,29 +6,6 @@ #include "idxd.h" #include "perfmon.h" -static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, - char *buf); - -static cpumask_t perfmon_dsa_cpu_mask; -static bool cpuhp_set_up; -static enum cpuhp_state cpuhp_slot; - -/* - * perf userspace reads this attribute to determine which cpus to open - * counters on. It's connected to perfmon_dsa_cpu_mask, which is - * maintained by the cpu hotplug handlers. - */ -static DEVICE_ATTR_RO(cpumask); - -static struct attribute *perfmon_cpumask_attrs[] = { - &dev_attr_cpumask.attr, - NULL, -}; - -static struct attribute_group cpumask_attr_group = { - .attrs = perfmon_cpumask_attrs, -}; - /* * These attributes specify the bits in the config word that the perf * syscall uses to pass the event ids and categories to perfmon. @@ -67,16 +44,9 @@ static struct attribute_group perfmon_format_attr_group = { static const struct attribute_group *perfmon_attr_groups[] = { &perfmon_format_attr_group, - &cpumask_attr_group, NULL, }; -static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask); -} - static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event) { return &idxd_pmu->pmu == event->pmu; @@ -217,7 +187,6 @@ static int perfmon_pmu_event_init(struct perf_event *event) return -EINVAL; event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd)); - event->cpu = idxd->idxd_pmu->cpu; event->hw.config = event->attr.config; if (event->group_leader != event) @@ -480,14 +449,15 @@ static void idxd_pmu_init(struct idxd_pmu *idxd_pmu) idxd_pmu->pmu.attr_groups = perfmon_attr_groups; idxd_pmu->pmu.task_ctx_nr = perf_invalid_context; idxd_pmu->pmu.event_init = perfmon_pmu_event_init; - idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable, - idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable, + idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable; + idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable; idxd_pmu->pmu.add = perfmon_pmu_event_add; idxd_pmu->pmu.del = perfmon_pmu_event_del; idxd_pmu->pmu.start = perfmon_pmu_event_start; idxd_pmu->pmu.stop = perfmon_pmu_event_stop; idxd_pmu->pmu.read = perfmon_pmu_event_update; idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; + idxd_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE; idxd_pmu->pmu.module = THIS_MODULE; } @@ -496,50 +466,11 @@ void perfmon_pmu_remove(struct idxd_device *idxd) if (!idxd->idxd_pmu) return; - cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node); perf_pmu_unregister(&idxd->idxd_pmu->pmu); kfree(idxd->idxd_pmu); idxd->idxd_pmu = NULL; } -static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node) -{ - struct idxd_pmu *idxd_pmu; - - idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node); - - /* select the first online CPU as the designated reader */ - if (cpumask_empty(&perfmon_dsa_cpu_mask)) { - cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask); - idxd_pmu->cpu = cpu; - } - - return 0; -} - -static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node) -{ - struct idxd_pmu *idxd_pmu; - unsigned int target; - - idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node); - - if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask)) - return 0; - - target = cpumask_any_but(cpu_online_mask, cpu); - - /* migrate events if there is a valid target */ - if (target < nr_cpu_ids) - cpumask_set_cpu(target, &perfmon_dsa_cpu_mask); - else - target = -1; - - perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target); - - return 0; -} - int perfmon_pmu_init(struct idxd_device *idxd) { union idxd_perfcap perfcap; @@ -547,12 +478,6 @@ int perfmon_pmu_init(struct idxd_device *idxd) int rc = -ENODEV; /* - * perfmon module initialization failed, nothing to do - */ - if (!cpuhp_set_up) - return -ENODEV; - - /* * If perfmon_offset or num_counters is 0, it means perfmon is * not supported on this hardware. */ @@ -627,11 +552,6 @@ int perfmon_pmu_init(struct idxd_device *idxd) if (rc) goto free; - rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node); - if (rc) { - perf_pmu_unregister(&idxd->idxd_pmu->pmu); - goto free; - } out: return rc; free: @@ -640,22 +560,3 @@ free: goto out; } - -void __init perfmon_init(void) -{ - int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, - "driver/dma/idxd/perf:online", - perf_event_cpu_online, - perf_event_cpu_offline); - if (WARN_ON(rc < 0)) - return; - - cpuhp_slot = rc; - cpuhp_set_up = true; -} - -void __exit perfmon_exit(void) -{ - if (cpuhp_set_up) - cpuhp_remove_multi_state(cpuhp_slot); -} diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index 315c004f58e4..006ba206ab1b 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -6,8 +6,10 @@ #include <uapi/linux/idxd.h> /* PCI Config */ -#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25 -#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe +#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb +#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212 +#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216 +#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d #define DEVICE_VERSION_1 0x100 #define DEVICE_VERSION_2 0x200 diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c index 817a564413b0..6db1c5fcedc5 100644 --- a/drivers/dma/idxd/submit.c +++ b/drivers/dma/idxd/submit.c @@ -61,7 +61,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) return __get_desc(wq, idx, cpu); } -EXPORT_SYMBOL_NS_GPL(idxd_alloc_desc, IDXD); +EXPORT_SYMBOL_NS_GPL(idxd_alloc_desc, "IDXD"); void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) { @@ -70,7 +70,7 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) desc->cpu = -1; sbitmap_queue_clear(&wq->sbq, desc->id, cpu); } -EXPORT_SYMBOL_NS_GPL(idxd_free_desc, IDXD); +EXPORT_SYMBOL_NS_GPL(idxd_free_desc, "IDXD"); static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, struct idxd_desc *desc) @@ -134,7 +134,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, * completing the descriptor will return desc to allocator and * the desc can be acquired by a different process and the * desc->list can be modified. Delete desc from list so the - * list trasversing does not get corrupted by the other process. + * list traversing does not get corrupted by the other process. */ list_for_each_entry_safe(d, t, &flist, list) { list_del_init(&d->list); @@ -219,4 +219,4 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) percpu_ref_put(&wq->wq_active); return 0; } -EXPORT_SYMBOL_NS_GPL(idxd_submit_desc, IDXD); +EXPORT_SYMBOL_NS_GPL(idxd_submit_desc, "IDXD"); diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 7f28f01be672..9f0701021af0 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1197,12 +1197,37 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib static struct device_attribute dev_attr_wq_enqcmds_retries = __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store); +static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap) +{ + ssize_t pos; + int i; + + pos = 0; + for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) { + unsigned long val = opcap_bmap[i]; + + /* On systems where direct user submissions are not safe, we need to clear out + * the BATCH capability from the capability mask in sysfs since we cannot support + * that command on such systems. Narrow the restriction of operations with the + * BATCH opcode to only DSA version 1 devices. + */ + if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe && + confdev_to_idxd(dev)->hw.version == DEVICE_VERSION_1) + clear_bit(DSA_OPCODE_BATCH % 64, &val); + + pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val); + pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ','); + } + + return pos; +} + static ssize_t wq_op_config_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); - return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap); + return op_cap_show_common(dev, buf, wq->opcap_bmap); } static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask) @@ -1455,7 +1480,7 @@ static ssize_t op_cap_show(struct device *dev, { struct idxd_device *idxd = confdev_to_idxd(dev); - return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap); + return op_cap_show_common(dev, buf, idxd->opcap_bmap); } static DEVICE_ATTR_RO(op_cap); @@ -1956,13 +1981,3 @@ void idxd_unregister_devices(struct idxd_device *idxd) device_unregister(group_confdev(group)); } } - -int idxd_register_bus_type(void) -{ - return bus_register(&dsa_bus_type); -} - -void idxd_unregister_bus_type(void) -{ - bus_unregister(&dsa_bus_type); -} |