summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJacob Pan <jacob.pan.linux@gmail.com>2020-09-25 09:32:44 -0700
committerJoerg Roedel <jroedel@suse.de>2020-10-01 14:52:46 +0200
commit8d3bb3b8cbf2ffb3ef73720a48b3445518dcdb55 (patch)
tree0128ef643c3e803b601e0665b73b2a818b717c53 /drivers/iommu
parent1e6aaae93e9ddb9dc664993eb949b1da94cab3a5 (diff)
iommu/uapi: Use named union for user data
IOMMU UAPI data size is filled by the user space which must be validated by the kernel. To ensure backward compatibility, user data can only be extended by either re-purpose padding bytes or extend the variable sized union at the end. No size change is allowed before the union. Therefore, the minimum size is the offset of the union. To use offsetof() on the union, we must make it named. Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Link: https://lore.kernel.org/linux-iommu/20200611145518.0c2817d6@x1.home/ Link: https://lore.kernel.org/r/1601051567-54787-4-git-send-email-jacob.jun.pan@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/intel/iommu.c22
-rw-r--r--drivers/iommu/intel/svm.c2
2 files changed, 12 insertions, 12 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index f8177c59d229..f1c66c94be55 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -5424,8 +5424,8 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
/* Size is only valid in address selective invalidation */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
- size = to_vtd_size(inv_info->addr_info.granule_size,
- inv_info->addr_info.nb_granules);
+ size = to_vtd_size(inv_info->granu.addr_info.granule_size,
+ inv_info->granu.addr_info.nb_granules);
for_each_set_bit(cache_type,
(unsigned long *)&inv_info->cache,
@@ -5446,20 +5446,20 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
* granularity.
*/
if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
- (inv_info->pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
- pasid = inv_info->pasid_info.pasid;
+ (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
+ pasid = inv_info->granu.pasid_info.pasid;
else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
- (inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
- pasid = inv_info->addr_info.pasid;
+ (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
+ pasid = inv_info->granu.addr_info.pasid;
switch (BIT(cache_type)) {
case IOMMU_CACHE_INV_TYPE_IOTLB:
/* HW will ignore LSB bits based on address mask */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
size &&
- (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
+ (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
- inv_info->addr_info.addr, size);
+ inv_info->granu.addr_info.addr, size);
}
/*
@@ -5467,9 +5467,9 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
* We use npages = -1 to indicate that.
*/
qi_flush_piotlb(iommu, did, pasid,
- mm_to_dma_pfn(inv_info->addr_info.addr),
+ mm_to_dma_pfn(inv_info->granu.addr_info.addr),
(granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
- inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
+ inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
if (!info->ats_enabled)
break;
@@ -5492,7 +5492,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
size = 64 - VTD_PAGE_SHIFT;
addr = 0;
} else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
- addr = inv_info->addr_info.addr;
+ addr = inv_info->granu.addr_info.addr;
}
if (info->ats_enabled)
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 95c3164a2302..99353d6468fa 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -370,7 +370,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
spin_lock(&iommu->lock);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
- data->hpasid, &data->vtd, dmar_domain,
+ data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
spin_unlock(&iommu->lock);
if (ret) {