summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/amd_iommu.c7
-rw-r--r--drivers/iommu/intel-iommu.c5
-rw-r--r--drivers/iommu/iova.c5
3 files changed, 13 insertions, 4 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b319e51c379b..21cb088d6687 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2608,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
/* Everything is mapped - write the right values into s->dma_address */
for_each_sg(sglist, s, nelems, i) {
- s->dma_address += address + s->offset;
+ /*
+ * Add in the remaining piece of the scatter-gather offset that
+ * was masked out when we were determining the physical address
+ * via (sg_phys(s) & PAGE_MASK) earlier.
+ */
+ s->dma_address += address + (s->offset & ~PAGE_MASK);
s->dma_length = s->length;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 87274b54febd..28cb713d728c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
u32 pmen;
unsigned long flags;
+ if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
+ return;
+
raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM;
@@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
ctx_lo = context[0].lo;
- sdev->did = domain->iommu_did[iommu->seq_id];
+ sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
if (!(ctx_lo & CONTEXT_PASIDE)) {
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index f8d3ba247523..2de8122e218f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr_iova = rb_entry(curr, struct iova, node);
} while (curr && new_pfn <= curr_iova->pfn_hi);
- if (limit_pfn < size || new_pfn < iovad->start_pfn)
+ if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+ iovad->max32_alloc_size = size;
goto iova32_full;
+ }
/* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = new_pfn;
@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
return 0;
iova32_full:
- iovad->max32_alloc_size = size;
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}