summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd/Kconfig1
-rw-r--r--drivers/iommu/amd/amd_iommu.h4
-rw-r--r--drivers/iommu/amd/iommu.c39
-rw-r--r--drivers/iommu/amd/ppr.c41
4 files changed, 77 insertions, 8 deletions
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index 443b2c13c37b..d563f6d496ca 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -10,6 +10,7 @@ config AMD_IOMMU
select IOMMU_API
select IOMMU_IOVA
select IOMMU_IO_PGTABLE
+ select IOMMU_IOPF
select IOMMUFD_DRIVER if IOMMUFD
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index bb9a4c2e40da..dabbb85a71e9 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -51,6 +51,10 @@ int amd_iommu_iopf_init(struct amd_iommu *iommu);
void amd_iommu_iopf_uninit(struct amd_iommu *iommu);
void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
struct iommu_page_response *resp);
+int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data);
+void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data);
/* GCR3 setup */
int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 11b50a6c744e..9d5bffa16cb6 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2057,8 +2057,17 @@ static int do_attach(struct iommu_dev_data *dev_data,
if (ret)
return ret;
- if (pdev)
+ if (pdev) {
pdev_enable_caps(pdev);
+
+ /*
+ * Device can continue to function even if IOPF
+ * enablement failed. Hence in error path just
+ * disable device PRI support.
+ */
+ if (amd_iommu_iopf_add_device(iommu, dev_data))
+ pdev_disable_cap_pri(pdev);
+ }
} else if (pdev) {
pdev_enable_cap_ats(pdev);
}
@@ -2130,12 +2139,11 @@ out:
*/
static void detach_device(struct device *dev)
{
- struct protection_domain *domain;
- struct iommu_dev_data *dev_data;
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+ struct protection_domain *domain = dev_data->domain;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
unsigned long flags;
-
- dev_data = dev_iommu_priv_get(dev);
- domain = dev_data->domain;
+ bool ppr = dev_data->ppr;
spin_lock_irqsave(&domain->lock, flags);
@@ -2150,8 +2158,19 @@ static void detach_device(struct device *dev)
if (WARN_ON(!dev_data->domain))
goto out;
+ if (ppr) {
+ iopf_queue_flush_dev(dev);
+
+ /* Updated here so that it gets reflected in DTE */
+ dev_data->ppr = false;
+ }
+
do_detach(dev_data);
+ /* Remove IOPF handler */
+ if (ppr)
+ amd_iommu_iopf_remove_device(iommu, dev_data);
+
if (dev_is_pci(dev))
pdev_disable_caps(to_pci_dev(dev));
@@ -2814,9 +2833,11 @@ static const struct iommu_dirty_ops amd_dirty_ops = {
static int amd_iommu_dev_enable_feature(struct device *dev,
enum iommu_dev_features feat)
{
- int ret;
+ int ret = 0;
switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ break;
default:
ret = -EINVAL;
break;
@@ -2827,9 +2848,11 @@ static int amd_iommu_dev_enable_feature(struct device *dev,
static int amd_iommu_dev_disable_feature(struct device *dev,
enum iommu_dev_features feat)
{
- int ret;
+ int ret = 0;
switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ break;
default:
ret = -EINVAL;
break;
diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c
index 9bdd1db5f60a..0463daa2d46b 100644
--- a/drivers/iommu/amd/ppr.c
+++ b/drivers/iommu/amd/ppr.c
@@ -243,3 +243,44 @@ void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
{
amd_iommu_complete_ppr(dev, resp->pasid, resp->code, resp->grpid);
}
+
+int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (!dev_data->pri_enabled)
+ return ret;
+
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+
+ if (!iommu->iopf_queue) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
+ if (ret)
+ goto out_unlock;
+
+ dev_data->ppr = true;
+
+out_unlock:
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
+ return ret;
+}
+
+/* Its assumed that caller has verified that device was added to iopf queue */
+void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+
+ iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
+ dev_data->ppr = false;
+
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
+}