summaryrefslogtreecommitdiff
path: root/drivers/dma/idxd/device.c
diff options
context:
space:
mode:
authorJacob Pan <jacob.jun.pan@linux.intel.com>2023-08-09 20:48:01 +0800
committerJoerg Roedel <jroedel@suse.de>2023-08-09 17:44:39 +0200
commitf5ccf55e10281ae4523b83fe87f2c27fd634dc9d (patch)
tree151a64b7074aa47bbb22df816bb39135c834c0cf /drivers/dma/idxd/device.c
parent7d0c9da6c1509664d96488042bacc02308ca33b2 (diff)
dmaengine/idxd: Re-enable kernel workqueue under DMA API
Kernel workqueues were disabled due to flawed use of kernel VA and SVA API. Now that we have the support for attaching PASID to the device's default domain and the ability to reserve global PASIDs from SVA APIs, we can re-enable the kernel work queues and use them under DMA API. We also use non-privileged access for in-kernel DMA to be consistent with the IOMMU settings. Consequently, interrupt for user privilege is enabled for work completion IRQs. Link: https://lore.kernel.org/linux-iommu/20210511194726.GP1002214@nvidia.com/ Tested-by: Tony Zhu <tony.zhu@intel.com> Reviewed-by: Dave Jiang <dave.jiang@intel.com> Reviewed-by: Fenghua Yu <fenghua.yu@intel.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: Vinod Koul <vkoul@kernel.org> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Link: https://lore.kernel.org/r/20230802212427.1497170-9-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/dma/idxd/device.c')
-rw-r--r--drivers/dma/idxd/device.c39
1 files changed, 14 insertions, 25 deletions
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 5abbcc61c528..169b7ade8919 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -299,21 +299,6 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd)
}
}
-static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
-{
- struct idxd_device *idxd = wq->idxd;
- union wqcfg wqcfg;
- unsigned int offset;
-
- offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
- spin_lock(&idxd->dev_lock);
- wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
- wqcfg.priv = priv;
- wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
- iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
- spin_unlock(&idxd->dev_lock);
-}
-
static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
{
struct idxd_device *idxd = wq->idxd;
@@ -1423,15 +1408,14 @@ int drv_enable_wq(struct idxd_wq *wq)
}
/*
- * In the event that the WQ is configurable for pasid and priv bits.
- * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
- * However, for non-kernel wq, the driver should only set the pasid_en bit for
- * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
+ * In the event that the WQ is configurable for pasid, the driver
+ * should setup the pasid, pasid_en bit. This is true for both kernel
+ * and user shared workqueues. There is no need to setup priv bit in
+ * that in-kernel DMA will also do user privileged requests.
+ * A dedicated wq that is not 'kernel' type will configure pasid and
* pasid_en later on so there is no need to setup.
*/
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
- int priv = 0;
-
if (wq_pasid_enabled(wq)) {
if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
@@ -1439,10 +1423,6 @@ int drv_enable_wq(struct idxd_wq *wq)
__idxd_wq_set_pasid_locked(wq, pasid);
}
}
-
- if (is_idxd_wq_kernel(wq))
- priv = 1;
- __idxd_wq_set_priv_locked(wq, priv);
}
rc = 0;
@@ -1550,6 +1530,15 @@ int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
if (rc < 0)
return -ENXIO;
+ /*
+ * System PASID is preserved across device disable/enable cycle, but
+ * genconfig register content gets cleared during device reset. We
+ * need to re-enable user interrupts for kernel work queue completion
+ * IRQ to function.
+ */
+ if (idxd->pasid != IOMMU_PASID_INVALID)
+ idxd_set_user_intr(idxd, 1);
+
rc = idxd_device_evl_setup(idxd);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;