diff options
Diffstat (limited to 'drivers/dma/idxd/init.c')
| -rw-r--r-- | drivers/dma/idxd/init.c | 828 |
1 files changed, 718 insertions, 110 deletions
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 08a5f4310188..2acc34b3daff 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -9,12 +9,10 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/workqueue.h> -#include <linux/aer.h> #include <linux/fs.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/device.h> #include <linux/idr.h> -#include <linux/intel-svm.h> #include <linux/iommu.h> #include <uapi/linux/idxd.h> #include <linux/dmaengine.h> @@ -24,9 +22,10 @@ #include "perfmon.h" MODULE_VERSION(IDXD_DRIVER_VERSION); +MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); -MODULE_IMPORT_NS(IDXD); +MODULE_IMPORT_NS("IDXD"); static bool sva = true; module_param(sva, bool, 0644); @@ -48,6 +47,10 @@ static struct idxd_driver_data idxd_driver_data[] = { .compl_size = sizeof(struct dsa_completion_record), .align = 32, .dev_type = &dsa_device_type, + .evl_cr_off = offsetof(struct dsa_evl_entry, cr), + .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ + .cr_status_off = offsetof(struct dsa_completion_record, status), + .cr_result_off = offsetof(struct dsa_completion_record, result), }, [IDXD_TYPE_IAX] = { .name_prefix = "iax", @@ -55,15 +58,30 @@ static struct idxd_driver_data idxd_driver_data[] = { .compl_size = sizeof(struct iax_completion_record), .align = 64, .dev_type = &iax_device_type, + .evl_cr_off = offsetof(struct iax_evl_entry, cr), + .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ + .cr_status_off = offsetof(struct iax_completion_record, status), + .cr_result_off = offsetof(struct iax_completion_record, error_code), + .load_device_defaults = idxd_load_iaa_device_defaults, }, }; static struct pci_device_id idxd_pci_tbl[] = { /* DSA ver 1.0 platforms */ { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, + /* DSA on GNR-D platforms */ + { PCI_DEVICE_DATA(INTEL, DSA_GNRD, &idxd_driver_data[IDXD_TYPE_DSA]) }, + /* DSA on DMR platforms */ + { PCI_DEVICE_DATA(INTEL, DSA_DMR, &idxd_driver_data[IDXD_TYPE_DSA]) }, /* IAX ver 1.0 platforms */ { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, + /* IAA on DMR platforms */ + { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) }, + /* IAA PTL platforms */ + { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) }, + /* IAA WCL platforms */ + { PCI_DEVICE_DATA(INTEL, IAA_WCL, &idxd_driver_data[IDXD_TYPE_IAX]) }, { 0, } }; MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); @@ -106,7 +124,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) ie = idxd_get_ie(idxd, msix_idx); ie->id = msix_idx; ie->int_handle = INVALID_INT_HANDLE; - ie->pasid = INVALID_IOASID; + ie->pasid = IOMMU_PASID_INVALID; spin_lock_init(&ie->list_lock); init_llist_head(&ie->pending_llist); @@ -139,6 +157,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd) pci_free_irq_vectors(pdev); } +static void idxd_clean_wqs(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); + conf_dev = wq_confdev(wq); + put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + kfree(idxd->wqs); +} + static int idxd_setup_wqs(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -151,25 +188,32 @@ static int idxd_setup_wqs(struct idxd_device *idxd) if (!idxd->wqs) return -ENOMEM; + idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); + if (!idxd->wq_enable_map) { + rc = -ENOMEM; + goto err_free_wqs; + } + for (i = 0; i < idxd->max_wqs; i++) { wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); if (!wq) { rc = -ENOMEM; - goto err; + goto err_unwind; } idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); conf_dev = wq_confdev(wq); wq->id = i; wq->idxd = idxd; - device_initialize(wq_confdev(wq)); + device_initialize(conf_dev); conf_dev->parent = idxd_confdev(idxd); conf_dev->bus = &dsa_bus_type; conf_dev->type = &idxd_wq_device_type; rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); if (rc < 0) { put_device(conf_dev); - goto err; + kfree(wq); + goto err_unwind; } mutex_init(&wq->wq_lock); @@ -177,28 +221,67 @@ static int idxd_setup_wqs(struct idxd_device *idxd) init_completion(&wq->wq_dead); init_completion(&wq->wq_resurrect); wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; - wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; + idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); if (!wq->wqcfg) { put_device(conf_dev); + kfree(wq); rc = -ENOMEM; - goto err; + goto err_unwind; } + + if (idxd->hw.wq_cap.op_config) { + wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); + if (!wq->opcap_bmap) { + kfree(wq->wqcfg); + put_device(conf_dev); + kfree(wq); + rc = -ENOMEM; + goto err_unwind; + } + bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); + } + mutex_init(&wq->uc_lock); + xa_init(&wq->upasid_xa); idxd->wqs[i] = wq; } return 0; - err: +err_unwind: while (--i >= 0) { wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); conf_dev = wq_confdev(wq); put_device(conf_dev); + kfree(wq); } + bitmap_free(idxd->wq_enable_map); + +err_free_wqs: + kfree(idxd->wqs); + return rc; } +static void idxd_clean_engines(struct idxd_device *idxd) +{ + struct idxd_engine *engine; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_engines; i++) { + engine = idxd->engines[i]; + conf_dev = engine_confdev(engine); + put_device(conf_dev); + kfree(engine); + } + kfree(idxd->engines); +} + static int idxd_setup_engines(struct idxd_device *idxd) { struct idxd_engine *engine; @@ -229,6 +312,7 @@ static int idxd_setup_engines(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); if (rc < 0) { put_device(conf_dev); + kfree(engine); goto err; } @@ -242,10 +326,26 @@ static int idxd_setup_engines(struct idxd_device *idxd) engine = idxd->engines[i]; conf_dev = engine_confdev(engine); put_device(conf_dev); + kfree(engine); } + kfree(idxd->engines); + return rc; } +static void idxd_clean_groups(struct idxd_device *idxd) +{ + struct idxd_group *group; + int i; + + for (i = 0; i < idxd->max_groups; i++) { + group = idxd->groups[i]; + put_device(group_confdev(group)); + kfree(group); + } + kfree(idxd->groups); +} + static int idxd_setup_groups(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -276,17 +376,23 @@ static int idxd_setup_groups(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); if (rc < 0) { put_device(conf_dev); + kfree(group); goto err; } idxd->groups[i] = group; - if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { + if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { group->tc_a = 1; group->tc_b = 1; } else { group->tc_a = -1; group->tc_b = -1; } + /* + * The default value is the same as the value of + * total read buffers in GRPCAP. + */ + group->rdbufs_allowed = idxd->max_rdbufs; } return 0; @@ -295,27 +401,61 @@ static int idxd_setup_groups(struct idxd_device *idxd) while (--i >= 0) { group = idxd->groups[i]; put_device(group_confdev(group)); + kfree(group); } + kfree(idxd->groups); + return rc; } static void idxd_cleanup_internals(struct idxd_device *idxd) { - int i; - - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_groups(idxd); + idxd_clean_engines(idxd); + idxd_clean_wqs(idxd); destroy_workqueue(idxd->wq); } +static int idxd_init_evl(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + unsigned int evl_cache_size; + struct idxd_evl *evl; + const char *idxd_name; + + if (idxd->hw.gen_cap.evl_support == 0) + return 0; + + evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev)); + if (!evl) + return -ENOMEM; + + mutex_init(&evl->lock); + evl->size = IDXD_EVL_SIZE_MIN; + + idxd_name = dev_name(idxd_confdev(idxd)); + evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd); + /* + * Since completion record in evl_cache will be copied to user + * when handling completion record page fault, need to create + * the cache suitable for user copy. + */ + idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size, + 0, 0, 0, evl_cache_size, + NULL); + if (!idxd->evl_cache) { + kfree(evl); + return -ENOMEM; + } + + idxd->evl = evl; + return 0; +} + static int idxd_setup_internals(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; - int rc, i; + int rc; init_waitqueue_head(&idxd->cmd_waitq); @@ -337,17 +477,20 @@ static int idxd_setup_internals(struct idxd_device *idxd) goto err_wkq_create; } + rc = idxd_init_evl(idxd); + if (rc < 0) + goto err_evl; + return 0; + err_evl: + destroy_workqueue(idxd->wq); err_wkq_create: - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); + idxd_clean_groups(idxd); err_group: - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); + idxd_clean_engines(idxd); err_engine: - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_wqs(idxd); err_wqs: return rc; } @@ -369,6 +512,19 @@ static void idxd_read_table_offsets(struct idxd_device *idxd) dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); } +void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count) +{ + int i, j, nr; + + for (i = 0, nr = 0; i < count; i++) { + for (j = 0; j < BITS_PER_LONG_LONG; j++) { + if (val[i] & BIT(j)) + set_bit(nr, bmap); + nr++; + } + } +} + static void idxd_read_caps(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -389,7 +545,7 @@ static void idxd_read_caps(struct idxd_device *idxd) idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); - idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; + idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); if (idxd->hw.gen_cap.config_en) set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); @@ -427,6 +583,22 @@ static void idxd_read_caps(struct idxd_device *idxd) IDXD_OPCAP_OFFSET + i * sizeof(u64)); dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); } + multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); + + /* read iaa cap */ + if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2) + idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); +} + +static void idxd_free(struct idxd_device *idxd) +{ + if (!idxd) + return; + + put_device(idxd_confdev(idxd)); + bitmap_free(idxd->opcap_bmap); + ida_free(&idxd_ida, idxd->id); + kfree(idxd); } static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) @@ -446,56 +618,91 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); if (idxd->id < 0) - return NULL; + goto err_ida; + + idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); + if (!idxd->opcap_bmap) + goto err_opcap; device_initialize(conf_dev); conf_dev->parent = dev; conf_dev->bus = &dsa_bus_type; conf_dev->type = idxd->data->dev_type; rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); - if (rc < 0) { - put_device(conf_dev); - return NULL; - } + if (rc < 0) + goto err_name; spin_lock_init(&idxd->dev_lock); spin_lock_init(&idxd->cmd_lock); return idxd; + +err_name: + put_device(conf_dev); + bitmap_free(idxd->opcap_bmap); +err_opcap: + ida_free(&idxd_ida, idxd->id); +err_ida: + kfree(idxd); + + return NULL; } static int idxd_enable_system_pasid(struct idxd_device *idxd) { - int flags; - unsigned int pasid; - struct iommu_sva *sva; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct iommu_domain *domain; + ioasid_t pasid; + int ret; - flags = SVM_FLAG_SUPERVISOR_MODE; + /* + * Attach a global PASID to the DMA domain so that we can use ENQCMDS + * to submit work on buffers mapped by DMA API. + */ + domain = iommu_get_domain_for_dev(dev); + if (!domain) + return -EPERM; - sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags); - if (IS_ERR(sva)) { - dev_warn(&idxd->pdev->dev, - "iommu sva bind failed: %ld\n", PTR_ERR(sva)); - return PTR_ERR(sva); - } + pasid = iommu_alloc_global_pasid(dev); + if (pasid == IOMMU_PASID_INVALID) + return -ENOSPC; - pasid = iommu_sva_get_pasid(sva); - if (pasid == IOMMU_PASID_INVALID) { - iommu_sva_unbind_device(sva); - return -ENODEV; + /* + * DMA domain is owned by the driver, it should support all valid + * types such as DMA-FQ, identity, etc. + */ + ret = iommu_attach_device_pasid(domain, dev, pasid, NULL); + if (ret) { + dev_err(dev, "failed to attach device pasid %d, domain type %d", + pasid, domain->type); + iommu_free_global_pasid(pasid); + return ret; } - idxd->sva = sva; + /* Since we set user privilege for kernel DMA, enable completion IRQ */ + idxd_set_user_intr(idxd, 1); idxd->pasid = pasid; - dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid); - return 0; + + return ret; } static void idxd_disable_system_pasid(struct idxd_device *idxd) { + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(dev); + if (!domain) + return; - iommu_sva_unbind_device(idxd->sva); + iommu_detach_device_pasid(domain, dev, idxd->pasid); + iommu_free_global_pasid(idxd->pasid); + + idxd_set_user_intr(idxd, 0); idxd->sva = NULL; + idxd->pasid = IOMMU_PASID_INVALID; } static int idxd_probe(struct idxd_device *idxd) @@ -512,18 +719,13 @@ static int idxd_probe(struct idxd_device *idxd) dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { - rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); - if (rc == 0) { - rc = idxd_enable_system_pasid(idxd); - if (rc < 0) { - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); - dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); - } else { - set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); - } - } else { - dev_warn(dev, "Unable to turn on SVA feature.\n"); - } + set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); + + rc = idxd_enable_system_pasid(idxd); + if (rc) + dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); + else + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); } else if (!sva) { dev_warn(dev, "User forced SVA off via module param.\n"); } @@ -561,74 +763,475 @@ static int idxd_probe(struct idxd_device *idxd) err: if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); return rc; } static void idxd_cleanup(struct idxd_device *idxd) { - struct device *dev = &idxd->pdev->dev; - perfmon_pmu_remove(idxd); idxd_cleanup_interrupts(idxd); idxd_cleanup_internals(idxd); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); } -static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +/* + * Attach IDXD device to IDXD driver. + */ +static int idxd_bind(struct device_driver *drv, const char *buf) { - struct device *dev = &pdev->dev; - struct idxd_device *idxd; - struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; + const struct bus_type *bus = drv->bus; + struct device *dev; + int err = -ENODEV; + + dev = bus_find_device_by_name(bus, NULL, buf); + if (dev) + err = device_driver_attach(drv, dev); + + put_device(dev); + + return err; +} + +/* + * Detach IDXD device from driver. + */ +static void idxd_unbind(struct device_driver *drv, const char *buf) +{ + const struct bus_type *bus = drv->bus; + struct device *dev; + + dev = bus_find_device_by_name(bus, NULL, buf); + if (dev && dev->driver == drv) + device_release_driver(dev); + + put_device(dev); +} + +#define idxd_free_saved_configs(saved_configs, count) \ + do { \ + int i; \ + \ + for (i = 0; i < (count); i++) \ + kfree(saved_configs[i]); \ + } while (0) + +static void idxd_free_saved(struct idxd_group **saved_groups, + struct idxd_engine **saved_engines, + struct idxd_wq **saved_wqs, + struct idxd_device *idxd) +{ + if (saved_groups) + idxd_free_saved_configs(saved_groups, idxd->max_groups); + if (saved_engines) + idxd_free_saved_configs(saved_engines, idxd->max_engines); + if (saved_wqs) + idxd_free_saved_configs(saved_wqs, idxd->max_wqs); +} + +/* + * Save IDXD device configurations including engines, groups, wqs etc. + * The saved configurations can be restored when needed. + */ +static int idxd_device_config_save(struct idxd_device *idxd, + struct idxd_saved_states *idxd_saved) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd)); + + if (idxd->evl) { + memcpy(&idxd_saved->saved_evl, idxd->evl, + sizeof(struct idxd_evl)); + } + + struct idxd_group **saved_groups __free(kfree) = + kcalloc_node(idxd->max_groups, + sizeof(struct idxd_group *), + GFP_KERNEL, dev_to_node(dev)); + if (!saved_groups) + return -ENOMEM; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *saved_group __free(kfree) = + kzalloc_node(sizeof(*saved_group), GFP_KERNEL, + dev_to_node(dev)); + + if (!saved_group) { + /* Free saved groups */ + idxd_free_saved(saved_groups, NULL, NULL, idxd); + + return -ENOMEM; + } + + memcpy(saved_group, idxd->groups[i], sizeof(*saved_group)); + saved_groups[i] = no_free_ptr(saved_group); + } + + struct idxd_engine **saved_engines = + kcalloc_node(idxd->max_engines, + sizeof(struct idxd_engine *), + GFP_KERNEL, dev_to_node(dev)); + if (!saved_engines) { + /* Free saved groups */ + idxd_free_saved(saved_groups, NULL, NULL, idxd); + + return -ENOMEM; + } + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *saved_engine __free(kfree) = + kzalloc_node(sizeof(*saved_engine), GFP_KERNEL, + dev_to_node(dev)); + if (!saved_engine) { + /* Free saved groups and engines */ + idxd_free_saved(saved_groups, saved_engines, NULL, + idxd); + + return -ENOMEM; + } + + memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine)); + saved_engines[i] = no_free_ptr(saved_engine); + } + + unsigned long *saved_wq_enable_map __free(bitmap) = + bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, + dev_to_node(dev)); + if (!saved_wq_enable_map) { + /* Free saved groups and engines */ + idxd_free_saved(saved_groups, saved_engines, NULL, idxd); + + return -ENOMEM; + } + + bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs); + + struct idxd_wq **saved_wqs __free(kfree) = + kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), + GFP_KERNEL, dev_to_node(dev)); + if (!saved_wqs) { + /* Free saved groups and engines */ + idxd_free_saved(saved_groups, saved_engines, NULL, idxd); + + return -ENOMEM; + } + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *saved_wq __free(kfree) = + kzalloc_node(sizeof(*saved_wq), GFP_KERNEL, + dev_to_node(dev)); + struct idxd_wq *wq; + + if (!saved_wq) { + /* Free saved groups, engines, and wqs */ + idxd_free_saved(saved_groups, saved_engines, saved_wqs, + idxd); + + return -ENOMEM; + } + + if (!test_bit(i, saved_wq_enable_map)) + continue; + + wq = idxd->wqs[i]; + mutex_lock(&wq->wq_lock); + memcpy(saved_wq, wq, sizeof(*saved_wq)); + saved_wqs[i] = no_free_ptr(saved_wq); + mutex_unlock(&wq->wq_lock); + } + + /* Save configurations */ + idxd_saved->saved_groups = no_free_ptr(saved_groups); + idxd_saved->saved_engines = no_free_ptr(saved_engines); + idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map); + idxd_saved->saved_wqs = no_free_ptr(saved_wqs); + + return 0; +} + +/* + * Restore IDXD device configurations including engines, groups, wqs etc + * that were saved before. + */ +static void idxd_device_config_restore(struct idxd_device *idxd, + struct idxd_saved_states *idxd_saved) +{ + struct idxd_evl *saved_evl = &idxd_saved->saved_evl; + int i; + + idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit; + + idxd->evl->size = saved_evl->size; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *saved_group, *group; + + saved_group = idxd_saved->saved_groups[i]; + group = idxd->groups[i]; + + group->rdbufs_allowed = saved_group->rdbufs_allowed; + group->rdbufs_reserved = saved_group->rdbufs_reserved; + group->tc_a = saved_group->tc_a; + group->tc_b = saved_group->tc_b; + group->use_rdbuf_limit = saved_group->use_rdbuf_limit; + + kfree(saved_group); + } + kfree(idxd_saved->saved_groups); + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *saved_engine, *engine; + + saved_engine = idxd_saved->saved_engines[i]; + engine = idxd->engines[i]; + + engine->group = saved_engine->group; + + kfree(saved_engine); + } + kfree(idxd_saved->saved_engines); + + bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map, + idxd->max_wqs); + bitmap_free(idxd_saved->saved_wq_enable_map); + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *saved_wq, *wq; + size_t len; + + if (!test_bit(i, idxd->wq_enable_map)) + continue; + + saved_wq = idxd_saved->saved_wqs[i]; + wq = idxd->wqs[i]; + + mutex_lock(&wq->wq_lock); + + wq->group = saved_wq->group; + wq->flags = saved_wq->flags; + wq->threshold = saved_wq->threshold; + wq->size = saved_wq->size; + wq->priority = saved_wq->priority; + wq->type = saved_wq->type; + len = strlen(saved_wq->name) + 1; + strscpy(wq->name, saved_wq->name, len); + wq->max_xfer_bytes = saved_wq->max_xfer_bytes; + wq->max_batch_size = saved_wq->max_batch_size; + wq->enqcmds_retries = saved_wq->enqcmds_retries; + wq->descs = saved_wq->descs; + wq->idxd_chan = saved_wq->idxd_chan; + len = strlen(saved_wq->driver_name) + 1; + strscpy(wq->driver_name, saved_wq->driver_name, len); + + mutex_unlock(&wq->wq_lock); + + kfree(saved_wq); + } + + kfree(idxd_saved->saved_wqs); +} + +static void idxd_reset_prepare(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + struct device *dev = &idxd->pdev->dev; + const char *idxd_name; int rc; - rc = pci_enable_device(pdev); - if (rc) - return rc; + idxd_name = dev_name(idxd_confdev(idxd)); - dev_dbg(dev, "Alloc IDXD context\n"); - idxd = idxd_alloc(pdev, data); - if (!idxd) { - rc = -ENOMEM; - goto err_idxd_alloc; + struct idxd_saved_states *idxd_saved __free(kfree) = + kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!idxd_saved) { + dev_err(dev, "HALT: no memory\n"); + + return; } - dev_dbg(dev, "Mapping BARs\n"); - idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); - if (!idxd->reg_base) { - rc = -ENOMEM; - goto err_iomap; + /* Save IDXD configurations. */ + rc = idxd_device_config_save(idxd, idxd_saved); + if (rc < 0) { + dev_err(dev, "HALT: cannot save %s configs\n", idxd_name); + + return; } - dev_dbg(dev, "Set DMA masks\n"); - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (rc) - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + idxd->idxd_saved = no_free_ptr(idxd_saved); + + /* Save PCI device state. */ + pci_save_state(idxd->pdev); +} + +static void idxd_reset_done(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + const char *idxd_name; + struct device *dev; + int rc, i; + + if (!idxd->idxd_saved) + return; + + dev = &idxd->pdev->dev; + idxd_name = dev_name(idxd_confdev(idxd)); + + /* Restore PCI device state. */ + pci_restore_state(idxd->pdev); + + /* Unbind idxd device from driver. */ + idxd_unbind(&idxd_drv.drv, idxd_name); + + /* + * Probe PCI device without allocating or changing + * idxd software data which keeps the same as before FLR. + */ + idxd_pci_probe_alloc(idxd, NULL, NULL); + + /* Restore IDXD configurations. */ + idxd_device_config_restore(idxd, idxd->idxd_saved); + + /* Re-configure IDXD device if allowed. */ + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { + rc = idxd_device_config(idxd); + if (rc < 0) { + dev_err(dev, "HALT: %s config fails\n", idxd_name); + goto out; + } + } + + /* Bind IDXD device to driver. */ + rc = idxd_bind(&idxd_drv.drv, idxd_name); + if (rc < 0) { + dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name); + goto out; + } + + /* Bind enabled wq in the IDXD device to driver. */ + for (i = 0; i < idxd->max_wqs; i++) { + if (test_bit(i, idxd->wq_enable_map)) { + struct idxd_wq *wq = idxd->wqs[i]; + char wq_name[32]; + + wq->state = IDXD_WQ_DISABLED; + sprintf(wq_name, "wq%d.%d", idxd->id, wq->id); + /* + * Bind to user driver depending on wq type. + * + * Currently only support user type WQ. Will support + * kernel type WQ in the future. + */ + if (wq->type == IDXD_WQT_USER) + rc = idxd_bind(&idxd_user_drv.drv, wq_name); + else + rc = -EINVAL; + if (rc < 0) { + clear_bit(i, idxd->wq_enable_map); + dev_err(dev, + "HALT: unable to re-enable wq %s\n", + dev_name(wq_confdev(wq))); + } + } + } +out: + kfree(idxd->idxd_saved); +} + +static const struct pci_error_handlers idxd_error_handler = { + .reset_prepare = idxd_reset_prepare, + .reset_done = idxd_reset_done, +}; + +/* + * Probe idxd PCI device. + * If idxd is not given, need to allocate idxd and set up its data. + * + * If idxd is given, idxd was allocated and setup already. Just need to + * configure device without re-allocating and re-configuring idxd data. + * This is useful for recovering from FLR. + */ +int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev, + const struct pci_device_id *id) +{ + bool alloc_idxd = idxd ? false : true; + struct idxd_driver_data *data; + struct device *dev; + int rc; + + pdev = idxd ? idxd->pdev : pdev; + dev = &pdev->dev; + data = id ? (struct idxd_driver_data *)id->driver_data : NULL; + rc = pci_enable_device(pdev); if (rc) - goto err; + return rc; + + if (alloc_idxd) { + dev_dbg(dev, "Alloc IDXD context\n"); + idxd = idxd_alloc(pdev, data); + if (!idxd) { + rc = -ENOMEM; + goto err_idxd_alloc; + } + + dev_dbg(dev, "Mapping BARs\n"); + idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); + if (!idxd->reg_base) { + rc = -ENOMEM; + goto err_iomap; + } + + dev_dbg(dev, "Set DMA masks\n"); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) + goto err; + } dev_dbg(dev, "Set PCI master\n"); pci_set_master(pdev); pci_set_drvdata(pdev, idxd); - idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); - rc = idxd_probe(idxd); - if (rc) { - dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); - goto err; + if (alloc_idxd) { + idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); + rc = idxd_probe(idxd); + if (rc) { + dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); + goto err; + } + + if (data->load_device_defaults) { + rc = data->load_device_defaults(idxd); + if (rc) + dev_warn(dev, "IDXD loading device defaults failed\n"); + } + + rc = idxd_register_devices(idxd); + if (rc) { + dev_err(dev, "IDXD sysfs setup failed\n"); + goto err_dev_register; + } + + rc = idxd_device_init_debugfs(idxd); + if (rc) + dev_warn(dev, "IDXD debugfs failed to setup\n"); } - rc = idxd_register_devices(idxd); - if (rc) { - dev_err(dev, "IDXD sysfs setup failed\n"); - goto err_dev_register; + if (!alloc_idxd) { + /* Release interrupts in the IDXD device. */ + idxd_cleanup_interrupts(idxd); + + /* Re-enable interrupts in the IDXD device. */ + rc = idxd_setup_interrupts(idxd); + if (rc) + dev_warn(dev, "IDXD interrupts failed to setup\n"); } dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", idxd->hw.version); + if (data) + idxd->user_submission_safe = data->user_submission_safe; + return 0; err_dev_register: @@ -636,12 +1239,17 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) err: pci_iounmap(pdev, idxd->reg_base); err_iomap: - put_device(idxd_confdev(idxd)); + idxd_free(idxd); err_idxd_alloc: pci_disable_device(pdev); return rc; } +static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return idxd_pci_probe_alloc(NULL, pdev, id); +} + void idxd_wqs_quiesce(struct idxd_device *idxd) { struct idxd_wq *wq; @@ -673,7 +1281,6 @@ static void idxd_shutdown(struct pci_dev *pdev) static void idxd_remove(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); - struct idxd_irq_entry *irq_entry; idxd_unregister_devices(idxd); /* @@ -686,18 +1293,14 @@ static void idxd_remove(struct pci_dev *pdev) get_device(idxd_confdev(idxd)); device_unregister(idxd_confdev(idxd)); idxd_shutdown(pdev); + idxd_device_remove_debugfs(idxd); + perfmon_pmu_remove(idxd); + idxd_cleanup_interrupts(idxd); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - - irq_entry = idxd_get_ie(idxd, 0); - free_irq(irq_entry->vector, irq_entry); - pci_free_irq_vectors(pdev); pci_iounmap(pdev, idxd->reg_base); - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); - pci_disable_device(pdev); - destroy_workqueue(idxd->wq); - perfmon_pmu_remove(idxd); put_device(idxd_confdev(idxd)); + pci_disable_device(pdev); } static struct pci_driver idxd_pci_driver = { @@ -706,6 +1309,7 @@ static struct pci_driver idxd_pci_driver = { .probe = idxd_pci_probe, .remove = idxd_remove, .shutdown = idxd_shutdown, + .err_handler = &idxd_error_handler, }; static int __init idxd_init_module(void) @@ -726,8 +1330,6 @@ static int __init idxd_init_module(void) else support_enqcmd = true; - perfmon_init(); - err = idxd_driver_register(&idxd_drv); if (err < 0) goto err_idxd_driver_register; @@ -744,6 +1346,10 @@ static int __init idxd_init_module(void) if (err) goto err_cdev_register; + err = idxd_init_debugfs(); + if (err) + goto err_debugfs; + err = pci_register_driver(&idxd_pci_driver); if (err) goto err_pci_register; @@ -751,6 +1357,8 @@ static int __init idxd_init_module(void) return 0; err_pci_register: + idxd_remove_debugfs(); +err_debugfs: idxd_cdev_remove(); err_cdev_register: idxd_driver_unregister(&idxd_user_drv); @@ -770,6 +1378,6 @@ static void __exit idxd_exit_module(void) idxd_driver_unregister(&idxd_drv); pci_unregister_driver(&idxd_pci_driver); idxd_cdev_remove(); - perfmon_exit(); + idxd_remove_debugfs(); } module_exit(idxd_exit_module); |
