diff options
Diffstat (limited to 'drivers/iommu/s390-iommu.c')
| -rw-r--r-- | drivers/iommu/s390-iommu.c | 932 |
1 files changed, 840 insertions, 92 deletions
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index fbf59a8db29b..fe679850af28 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -14,16 +14,484 @@ #include <linux/rcupdate.h> #include <asm/pci_dma.h> -static const struct iommu_ops s390_iommu_ops; +#include "dma-iommu.h" + +static const struct iommu_ops s390_iommu_ops, s390_iommu_rtr_ops; + +static struct kmem_cache *dma_region_table_cache; +static struct kmem_cache *dma_page_table_cache; + +static u64 s390_iommu_aperture; +static u32 s390_iommu_aperture_factor = 1; struct s390_domain { struct iommu_domain domain; struct list_head devices; + struct zpci_iommu_ctrs ctrs; unsigned long *dma_table; spinlock_t list_lock; struct rcu_head rcu; + u8 origin_type; }; +static struct iommu_domain blocking_domain; + +static inline unsigned int calc_rfx(dma_addr_t ptr) +{ + return ((unsigned long)ptr >> ZPCI_RF_SHIFT) & ZPCI_INDEX_MASK; +} + +static inline unsigned int calc_rsx(dma_addr_t ptr) +{ + return ((unsigned long)ptr >> ZPCI_RS_SHIFT) & ZPCI_INDEX_MASK; +} + +static inline unsigned int calc_rtx(dma_addr_t ptr) +{ + return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; +} + +static inline unsigned int calc_sx(dma_addr_t ptr) +{ + return ((unsigned long)ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK; +} + +static inline unsigned int calc_px(dma_addr_t ptr) +{ + return ((unsigned long)ptr >> PAGE_SHIFT) & ZPCI_PT_MASK; +} + +static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa) +{ + *entry &= ZPCI_PTE_FLAG_MASK; + *entry |= (pfaa & ZPCI_PTE_ADDR_MASK); +} + +static inline void set_rf_rso(unsigned long *entry, phys_addr_t rso) +{ + *entry &= ZPCI_RTE_FLAG_MASK; + *entry |= (rso & ZPCI_RTE_ADDR_MASK); + *entry |= ZPCI_TABLE_TYPE_RFX; +} + +static inline void set_rs_rto(unsigned long *entry, phys_addr_t rto) +{ + *entry &= ZPCI_RTE_FLAG_MASK; + *entry |= (rto & ZPCI_RTE_ADDR_MASK); + *entry |= ZPCI_TABLE_TYPE_RSX; +} + +static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto) +{ + *entry &= ZPCI_RTE_FLAG_MASK; + *entry |= (sto & ZPCI_RTE_ADDR_MASK); + *entry |= ZPCI_TABLE_TYPE_RTX; +} + +static inline void set_st_pto(unsigned long *entry, phys_addr_t pto) +{ + *entry &= ZPCI_STE_FLAG_MASK; + *entry |= (pto & ZPCI_STE_ADDR_MASK); + *entry |= ZPCI_TABLE_TYPE_SX; +} + +static inline void validate_rf_entry(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_VALID_MASK; + *entry &= ~ZPCI_TABLE_OFFSET_MASK; + *entry |= ZPCI_TABLE_VALID; + *entry |= ZPCI_TABLE_LEN_RFX; +} + +static inline void validate_rs_entry(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_VALID_MASK; + *entry &= ~ZPCI_TABLE_OFFSET_MASK; + *entry |= ZPCI_TABLE_VALID; + *entry |= ZPCI_TABLE_LEN_RSX; +} + +static inline void validate_rt_entry(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_VALID_MASK; + *entry &= ~ZPCI_TABLE_OFFSET_MASK; + *entry |= ZPCI_TABLE_VALID; + *entry |= ZPCI_TABLE_LEN_RTX; +} + +static inline void validate_st_entry(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_VALID_MASK; + *entry |= ZPCI_TABLE_VALID; +} + +static inline void invalidate_pt_entry(unsigned long *entry) +{ + WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID); + *entry &= ~ZPCI_PTE_VALID_MASK; + *entry |= ZPCI_PTE_INVALID; +} + +static inline void validate_pt_entry(unsigned long *entry) +{ + WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID); + *entry &= ~ZPCI_PTE_VALID_MASK; + *entry |= ZPCI_PTE_VALID; +} + +static inline void entry_set_protected(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_PROT_MASK; + *entry |= ZPCI_TABLE_PROTECTED; +} + +static inline void entry_clr_protected(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_PROT_MASK; + *entry |= ZPCI_TABLE_UNPROTECTED; +} + +static inline int reg_entry_isvalid(unsigned long entry) +{ + return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; +} + +static inline int pt_entry_isvalid(unsigned long entry) +{ + return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; +} + +static inline unsigned long *get_rf_rso(unsigned long entry) +{ + if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RFX) + return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); + else + return NULL; +} + +static inline unsigned long *get_rs_rto(unsigned long entry) +{ + if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RSX) + return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); + else + return NULL; +} + +static inline unsigned long *get_rt_sto(unsigned long entry) +{ + if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) + return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); + else + return NULL; +} + +static inline unsigned long *get_st_pto(unsigned long entry) +{ + if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) + return phys_to_virt(entry & ZPCI_STE_ADDR_MASK); + else + return NULL; +} + +static int __init dma_alloc_cpu_table_caches(void) +{ + dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables", + ZPCI_TABLE_SIZE, + ZPCI_TABLE_ALIGN, + 0, NULL); + if (!dma_region_table_cache) + return -ENOMEM; + + dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables", + ZPCI_PT_SIZE, + ZPCI_PT_ALIGN, + 0, NULL); + if (!dma_page_table_cache) { + kmem_cache_destroy(dma_region_table_cache); + return -ENOMEM; + } + return 0; +} + +static unsigned long *dma_alloc_cpu_table(gfp_t gfp) +{ + unsigned long *table, *entry; + + table = kmem_cache_alloc(dma_region_table_cache, gfp); + if (!table) + return NULL; + + for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) + *entry = ZPCI_TABLE_INVALID; + return table; +} + +static void dma_free_cpu_table(void *table) +{ + kmem_cache_free(dma_region_table_cache, table); +} + +static void dma_free_page_table(void *table) +{ + kmem_cache_free(dma_page_table_cache, table); +} + +static void dma_free_seg_table(unsigned long entry) +{ + unsigned long *sto = get_rt_sto(entry); + int sx; + + for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++) + if (reg_entry_isvalid(sto[sx])) + dma_free_page_table(get_st_pto(sto[sx])); + + dma_free_cpu_table(sto); +} + +static void dma_free_rt_table(unsigned long entry) +{ + unsigned long *rto = get_rs_rto(entry); + int rtx; + + for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) + if (reg_entry_isvalid(rto[rtx])) + dma_free_seg_table(rto[rtx]); + + dma_free_cpu_table(rto); +} + +static void dma_free_rs_table(unsigned long entry) +{ + unsigned long *rso = get_rf_rso(entry); + int rsx; + + for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++) + if (reg_entry_isvalid(rso[rsx])) + dma_free_rt_table(rso[rsx]); + + dma_free_cpu_table(rso); +} + +static void dma_cleanup_tables(struct s390_domain *domain) +{ + int rtx, rsx, rfx; + + if (!domain->dma_table) + return; + + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RFX: + for (rfx = 0; rfx < ZPCI_TABLE_ENTRIES; rfx++) + if (reg_entry_isvalid(domain->dma_table[rfx])) + dma_free_rs_table(domain->dma_table[rfx]); + break; + case ZPCI_TABLE_TYPE_RSX: + for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++) + if (reg_entry_isvalid(domain->dma_table[rsx])) + dma_free_rt_table(domain->dma_table[rsx]); + break; + case ZPCI_TABLE_TYPE_RTX: + for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) + if (reg_entry_isvalid(domain->dma_table[rtx])) + dma_free_seg_table(domain->dma_table[rtx]); + break; + default: + WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type); + return; + } + + dma_free_cpu_table(domain->dma_table); +} + +static unsigned long *dma_alloc_page_table(gfp_t gfp) +{ + unsigned long *table, *entry; + + table = kmem_cache_alloc(dma_page_table_cache, gfp); + if (!table) + return NULL; + + for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) + *entry = ZPCI_PTE_INVALID; + return table; +} + +static unsigned long *dma_walk_rs_table(unsigned long *rso, + dma_addr_t dma_addr, gfp_t gfp) +{ + unsigned int rsx = calc_rsx(dma_addr); + unsigned long old_rse, rse; + unsigned long *rsep, *rto; + + rsep = &rso[rsx]; + rse = READ_ONCE(*rsep); + if (reg_entry_isvalid(rse)) { + rto = get_rs_rto(rse); + } else { + rto = dma_alloc_cpu_table(gfp); + if (!rto) + return NULL; + + set_rs_rto(&rse, virt_to_phys(rto)); + validate_rs_entry(&rse); + entry_clr_protected(&rse); + + old_rse = cmpxchg(rsep, ZPCI_TABLE_INVALID, rse); + if (old_rse != ZPCI_TABLE_INVALID) { + /* Somone else was faster, use theirs */ + dma_free_cpu_table(rto); + rto = get_rs_rto(old_rse); + } + } + return rto; +} + +static unsigned long *dma_walk_rf_table(unsigned long *rfo, + dma_addr_t dma_addr, gfp_t gfp) +{ + unsigned int rfx = calc_rfx(dma_addr); + unsigned long old_rfe, rfe; + unsigned long *rfep, *rso; + + rfep = &rfo[rfx]; + rfe = READ_ONCE(*rfep); + if (reg_entry_isvalid(rfe)) { + rso = get_rf_rso(rfe); + } else { + rso = dma_alloc_cpu_table(gfp); + if (!rso) + return NULL; + + set_rf_rso(&rfe, virt_to_phys(rso)); + validate_rf_entry(&rfe); + entry_clr_protected(&rfe); + + old_rfe = cmpxchg(rfep, ZPCI_TABLE_INVALID, rfe); + if (old_rfe != ZPCI_TABLE_INVALID) { + /* Somone else was faster, use theirs */ + dma_free_cpu_table(rso); + rso = get_rf_rso(old_rfe); + } + } + + if (!rso) + return NULL; + + return dma_walk_rs_table(rso, dma_addr, gfp); +} + +static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp) +{ + unsigned long old_rte, rte; + unsigned long *sto; + + rte = READ_ONCE(*rtep); + if (reg_entry_isvalid(rte)) { + sto = get_rt_sto(rte); + } else { + sto = dma_alloc_cpu_table(gfp); + if (!sto) + return NULL; + + set_rt_sto(&rte, virt_to_phys(sto)); + validate_rt_entry(&rte); + entry_clr_protected(&rte); + + old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte); + if (old_rte != ZPCI_TABLE_INVALID) { + /* Somone else was faster, use theirs */ + dma_free_cpu_table(sto); + sto = get_rt_sto(old_rte); + } + } + return sto; +} + +static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp) +{ + unsigned long old_ste, ste; + unsigned long *pto; + + ste = READ_ONCE(*step); + if (reg_entry_isvalid(ste)) { + pto = get_st_pto(ste); + } else { + pto = dma_alloc_page_table(gfp); + if (!pto) + return NULL; + set_st_pto(&ste, virt_to_phys(pto)); + validate_st_entry(&ste); + entry_clr_protected(&ste); + + old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste); + if (old_ste != ZPCI_TABLE_INVALID) { + /* Somone else was faster, use theirs */ + dma_free_page_table(pto); + pto = get_st_pto(old_ste); + } + } + return pto; +} + +static unsigned long *dma_walk_region_tables(struct s390_domain *domain, + dma_addr_t dma_addr, gfp_t gfp) +{ + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RFX: + return dma_walk_rf_table(domain->dma_table, dma_addr, gfp); + case ZPCI_TABLE_TYPE_RSX: + return dma_walk_rs_table(domain->dma_table, dma_addr, gfp); + case ZPCI_TABLE_TYPE_RTX: + return domain->dma_table; + default: + return NULL; + } +} + +static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain, + dma_addr_t dma_addr, gfp_t gfp) +{ + unsigned long *rto, *sto, *pto; + unsigned int rtx, sx, px; + + rto = dma_walk_region_tables(domain, dma_addr, gfp); + if (!rto) + return NULL; + + rtx = calc_rtx(dma_addr); + sto = dma_get_seg_table_origin(&rto[rtx], gfp); + if (!sto) + return NULL; + + sx = calc_sx(dma_addr); + pto = dma_get_page_table_origin(&sto[sx], gfp); + if (!pto) + return NULL; + + px = calc_px(dma_addr); + return &pto[px]; +} + +static void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags) +{ + unsigned long pte; + + pte = READ_ONCE(*ptep); + if (flags & ZPCI_PTE_INVALID) { + invalidate_pt_entry(&pte); + } else { + set_pt_pfaa(&pte, page_addr); + validate_pt_entry(&pte); + } + + if (flags & ZPCI_TABLE_PROTECTED) + entry_set_protected(&pte); + else + entry_clr_protected(&pte); + + xchg(ptep, pte); +} + static struct s390_domain *to_s390_domain(struct iommu_domain *dom) { return container_of(dom, struct s390_domain, domain); @@ -31,20 +499,37 @@ static struct s390_domain *to_s390_domain(struct iommu_domain *dom) static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap) { + struct zpci_dev *zdev = to_zpci_dev(dev); + switch (cap) { case IOMMU_CAP_CACHE_COHERENCY: return true; + case IOMMU_CAP_DEFERRED_FLUSH: + return zdev->pft != PCI_FUNC_TYPE_ISM; default: return false; } } -static struct iommu_domain *s390_domain_alloc(unsigned domain_type) +static inline u64 max_tbl_size(struct s390_domain *domain) { - struct s390_domain *s390_domain; + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RTX: + return ZPCI_TABLE_SIZE_RT - 1; + case ZPCI_TABLE_TYPE_RSX: + return ZPCI_TABLE_SIZE_RS - 1; + case ZPCI_TABLE_TYPE_RFX: + return U64_MAX; + default: + return 0; + } +} - if (domain_type != IOMMU_DOMAIN_UNMANAGED) - return NULL; +static struct iommu_domain *s390_domain_alloc_paging(struct device *dev) +{ + struct zpci_dev *zdev = to_zpci_dev(dev); + struct s390_domain *s390_domain; + u64 aperture_size; s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL); if (!s390_domain) @@ -55,9 +540,27 @@ static struct iommu_domain *s390_domain_alloc(unsigned domain_type) kfree(s390_domain); return NULL; } + + aperture_size = min(s390_iommu_aperture, + zdev->end_dma - zdev->start_dma + 1); + if (aperture_size <= (ZPCI_TABLE_SIZE_RT - zdev->start_dma)) { + s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX; + } else if (aperture_size <= (ZPCI_TABLE_SIZE_RS - zdev->start_dma) && + (zdev->dtsm & ZPCI_IOTA_DT_RS)) { + s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX; + } else if (zdev->dtsm & ZPCI_IOTA_DT_RF) { + s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX; + } else { + /* Assume RTX available */ + s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX; + aperture_size = ZPCI_TABLE_SIZE_RT - zdev->start_dma; + } + zdev->end_dma = zdev->start_dma + aperture_size - 1; + + s390_domain->domain.pgsize_bitmap = SZ_4K; s390_domain->domain.geometry.force_aperture = true; s390_domain->domain.geometry.aperture_start = 0; - s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1; + s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain); spin_lock_init(&s390_domain->list_lock); INIT_LIST_HEAD_RCU(&s390_domain->devices); @@ -69,7 +572,7 @@ static void s390_iommu_rcu_free_domain(struct rcu_head *head) { struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu); - dma_cleanup_tables(s390_domain->dma_table); + dma_cleanup_tables(s390_domain); kfree(s390_domain); } @@ -84,25 +587,116 @@ static void s390_domain_free(struct iommu_domain *domain) call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain); } -static void __s390_iommu_detach_device(struct zpci_dev *zdev) +static void zdev_s390_domain_update(struct zpci_dev *zdev, + struct iommu_domain *domain) { - struct s390_domain *s390_domain = zdev->s390_domain; unsigned long flags; - if (!s390_domain) - return; + spin_lock_irqsave(&zdev->dom_lock, flags); + zdev->s390_domain = domain; + spin_unlock_irqrestore(&zdev->dom_lock, flags); +} - spin_lock_irqsave(&s390_domain->list_lock, flags); - list_del_rcu(&zdev->iommu_list); - spin_unlock_irqrestore(&s390_domain->list_lock, flags); +static u64 get_iota_region_flag(struct s390_domain *domain) +{ + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RTX: + return ZPCI_IOTA_RTTO_FLAG; + case ZPCI_TABLE_TYPE_RSX: + return ZPCI_IOTA_RSTO_FLAG; + case ZPCI_TABLE_TYPE_RFX: + return ZPCI_IOTA_RFTO_FLAG; + default: + WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type); + return 0; + } +} + +static bool reg_ioat_propagate_error(int cc, u8 status) +{ + /* + * If the device is in the error state the reset routine + * will register the IOAT of the newly set domain on re-enable + */ + if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL) + return false; + /* + * If the device was removed treat registration as success + * and let the subsequent error event trigger tear down. + */ + if (cc == ZPCI_CC_INVAL_HANDLE) + return false; + return cc != ZPCI_CC_OK; +} + +static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev, + struct iommu_domain *domain, u8 *status) +{ + struct s390_domain *s390_domain; + int rc = 0; + u64 iota; + + switch (domain->type) { + case IOMMU_DOMAIN_IDENTITY: + rc = zpci_register_ioat(zdev, 0, zdev->start_dma, + zdev->end_dma, 0, status); + break; + case IOMMU_DOMAIN_BLOCKED: + /* Nothing to do in this case */ + break; + default: + s390_domain = to_s390_domain(domain); + iota = virt_to_phys(s390_domain->dma_table) | + get_iota_region_flag(s390_domain); + rc = zpci_register_ioat(zdev, 0, zdev->start_dma, + zdev->end_dma, iota, status); + } + + return rc; +} + +int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&zdev->dom_lock, flags); + + rc = s390_iommu_domain_reg_ioat(zdev, zdev->s390_domain, status); + + spin_unlock_irqrestore(&zdev->dom_lock, flags); + + return rc; +} + +static int blocking_domain_attach_device(struct iommu_domain *domain, + struct device *dev, + struct iommu_domain *old) +{ + struct zpci_dev *zdev = to_zpci_dev(dev); + struct s390_domain *s390_domain; + unsigned long flags; + + if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED) + return 0; + + s390_domain = to_s390_domain(zdev->s390_domain); + if (zdev->dma_table) { + spin_lock_irqsave(&s390_domain->list_lock, flags); + list_del_rcu(&zdev->iommu_list); + spin_unlock_irqrestore(&s390_domain->list_lock, flags); + } zpci_unregister_ioat(zdev, 0); - zdev->s390_domain = NULL; zdev->dma_table = NULL; + zdev_s390_domain_update(zdev, domain); + + return 0; } static int s390_iommu_attach_device(struct iommu_domain *domain, - struct device *dev) + struct device *dev, + struct iommu_domain *old) { struct s390_domain *s390_domain = to_s390_domain(domain); struct zpci_dev *zdev = to_zpci_dev(dev); @@ -117,23 +711,14 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, domain->geometry.aperture_end < zdev->start_dma)) return -EINVAL; - if (zdev->s390_domain) - __s390_iommu_detach_device(zdev); - else if (zdev->dma_table) - zpci_dma_exit_device(zdev); + blocking_domain_attach_device(&blocking_domain, dev, old); - cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, - virt_to_phys(s390_domain->dma_table), &status); - /* - * If the device is undergoing error recovery the reset code - * will re-establish the new domain. - */ - if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) + /* If we fail now DMA remains blocked via blocking domain */ + cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); + if (reg_ioat_propagate_error(cc, status)) return -EIO; zdev->dma_table = s390_domain->dma_table; - - zdev->dma_table = s390_domain->dma_table; - zdev->s390_domain = s390_domain; + zdev_s390_domain_update(zdev, domain); spin_lock_irqsave(&s390_domain->list_lock, flags); list_add_rcu(&zdev->iommu_list, &s390_domain->devices); @@ -142,19 +727,13 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, return 0; } -static void s390_iommu_set_platform_dma(struct device *dev) -{ - struct zpci_dev *zdev = to_zpci_dev(dev); - - __s390_iommu_detach_device(zdev); - zpci_dma_init_device(zdev); -} - static void s390_iommu_get_resv_regions(struct device *dev, struct list_head *list) { struct zpci_dev *zdev = to_zpci_dev(dev); struct iommu_resv_region *region; + u64 max_size, end_resv; + unsigned long flags; if (zdev->start_dma) { region = iommu_alloc_resv_region(0, zdev->start_dma, 0, @@ -164,10 +743,21 @@ static void s390_iommu_get_resv_regions(struct device *dev, list_add_tail(®ion->list, list); } - if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) { - region = iommu_alloc_resv_region(zdev->end_dma + 1, - ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1, - 0, IOMMU_RESV_RESERVED, GFP_KERNEL); + spin_lock_irqsave(&zdev->dom_lock, flags); + if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED || + zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) { + spin_unlock_irqrestore(&zdev->dom_lock, flags); + return; + } + + max_size = max_tbl_size(to_s390_domain(zdev->s390_domain)); + spin_unlock_irqrestore(&zdev->dom_lock, flags); + + if (zdev->end_dma < max_size) { + end_resv = max_size - zdev->end_dma; + region = iommu_alloc_resv_region(zdev->end_dma + 1, end_resv, + 0, IOMMU_RESV_RESERVED, + GFP_KERNEL); if (!region) return; list_add_tail(®ion->list, list); @@ -183,26 +773,23 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev) zdev = to_zpci_dev(dev); - if (zdev->start_dma > zdev->end_dma || - zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1) + if (zdev->start_dma > zdev->end_dma) return ERR_PTR(-EINVAL); - if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1) - zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1; + if (zdev->tlb_refresh) + dev->iommu->shadow_on_flush = 1; + + /* Start with DMA blocked */ + spin_lock_init(&zdev->dom_lock); + zdev_s390_domain_update(zdev, &blocking_domain); return &zdev->iommu_dev; } -static void s390_iommu_release_device(struct device *dev) +static int zpci_refresh_all(struct zpci_dev *zdev) { - struct zpci_dev *zdev = to_zpci_dev(dev); - - /* - * release_device is expected to detach any domain currently attached - * to the device, but keep it attached to other devices in the group. - */ - if (zdev) - __s390_iommu_detach_device(zdev); + return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma, + zdev->end_dma - zdev->start_dma + 1); } static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain) @@ -212,8 +799,8 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain) rcu_read_lock(); list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { - zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma, - zdev->end_dma - zdev->start_dma + 1); + atomic64_inc(&s390_domain->ctrs.global_rpcits); + zpci_refresh_all(zdev); } rcu_read_unlock(); } @@ -231,26 +818,40 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain, rcu_read_lock(); list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { + atomic64_inc(&s390_domain->ctrs.sync_rpcits); zpci_refresh_trans((u64)zdev->fh << 32, gather->start, size); } rcu_read_unlock(); } -static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain, - unsigned long iova, size_t size) +static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain, + unsigned long iova, size_t size) { struct s390_domain *s390_domain = to_s390_domain(domain); struct zpci_dev *zdev; + int ret = 0; rcu_read_lock(); list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { if (!zdev->tlb_refresh) continue; - zpci_refresh_trans((u64)zdev->fh << 32, - iova, size); + atomic64_inc(&s390_domain->ctrs.sync_map_rpcits); + ret = zpci_refresh_trans((u64)zdev->fh << 32, + iova, size); + /* + * let the hypervisor discover invalidated entries + * allowing it to free IOVAs and unpin pages + */ + if (ret == -ENOMEM) { + ret = zpci_refresh_all(zdev); + if (ret) + break; + } } rcu_read_unlock(); + + return ret; } static int s390_iommu_validate_trans(struct s390_domain *s390_domain, @@ -264,8 +865,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain, int rc; for (i = 0; i < nr_pages; i++) { - entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr, - gfp); + entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp); if (unlikely(!entry)) { rc = -ENOMEM; goto undo_cpu_trans; @@ -280,8 +880,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain, undo_cpu_trans: while (i-- > 0) { dma_addr -= PAGE_SIZE; - entry = dma_walk_cpu_trans(s390_domain->dma_table, - dma_addr, gfp); + entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp); if (!entry) break; dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID); @@ -298,8 +897,7 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain, int rc = 0; for (i = 0; i < nr_pages; i++) { - entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr, - GFP_ATOMIC); + entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC); if (unlikely(!entry)) { rc = -EINVAL; break; @@ -330,20 +928,64 @@ static int s390_iommu_map_pages(struct iommu_domain *domain, if (!IS_ALIGNED(iova | paddr, pgsize)) return -EINVAL; - if (!(prot & IOMMU_READ)) - return -EINVAL; - if (!(prot & IOMMU_WRITE)) flags |= ZPCI_TABLE_PROTECTED; rc = s390_iommu_validate_trans(s390_domain, paddr, iova, - pgcount, flags, gfp); - if (!rc) + pgcount, flags, gfp); + if (!rc) { *mapped = size; + atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages); + } return rc; } +static unsigned long *get_rso_from_iova(struct s390_domain *domain, + dma_addr_t iova) +{ + unsigned long *rfo; + unsigned long rfe; + unsigned int rfx; + + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RFX: + rfo = domain->dma_table; + rfx = calc_rfx(iova); + rfe = READ_ONCE(rfo[rfx]); + if (!reg_entry_isvalid(rfe)) + return NULL; + return get_rf_rso(rfe); + case ZPCI_TABLE_TYPE_RSX: + return domain->dma_table; + default: + return NULL; + } +} + +static unsigned long *get_rto_from_iova(struct s390_domain *domain, + dma_addr_t iova) +{ + unsigned long *rso; + unsigned long rse; + unsigned int rsx; + + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RFX: + case ZPCI_TABLE_TYPE_RSX: + rso = get_rso_from_iova(domain, iova); + rsx = calc_rsx(iova); + rse = READ_ONCE(rso[rsx]); + if (!reg_entry_isvalid(rse)) + return NULL; + return get_rs_rto(rse); + case ZPCI_TABLE_TYPE_RTX: + return domain->dma_table; + default: + return NULL; + } +} + static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { @@ -357,10 +999,13 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, iova > domain->geometry.aperture_end) return 0; + rto = get_rto_from_iova(s390_domain, iova); + if (!rto) + return 0; + rtx = calc_rtx(iova); sx = calc_sx(iova); px = calc_px(iova); - rto = s390_domain->dma_table; rte = READ_ONCE(rto[rtx]); if (reg_entry_isvalid(rte)) { @@ -395,10 +1040,25 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain, return 0; iommu_iotlb_gather_add_range(gather, iova, size); + atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages); return size; } +struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev) +{ + struct s390_domain *s390_domain; + + lockdep_assert_held(&zdev->dom_lock); + + if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED || + zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) + return NULL; + + s390_domain = to_s390_domain(zdev->s390_domain); + return &s390_domain->ctrs; +} + int zpci_init_iommu(struct zpci_dev *zdev) { int rc = 0; @@ -408,7 +1068,13 @@ int zpci_init_iommu(struct zpci_dev *zdev) if (rc) goto out_err; - rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL); + if (zdev->rtr_avail) { + rc = iommu_device_register(&zdev->iommu_dev, + &s390_iommu_rtr_ops, NULL); + } else { + rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, + NULL); + } if (rc) goto out_sysfs; @@ -427,23 +1093,105 @@ void zpci_destroy_iommu(struct zpci_dev *zdev) iommu_device_sysfs_remove(&zdev->iommu_dev); } -static const struct iommu_ops s390_iommu_ops = { - .capable = s390_iommu_capable, - .domain_alloc = s390_domain_alloc, - .probe_device = s390_iommu_probe_device, - .release_device = s390_iommu_release_device, - .device_group = generic_device_group, - .set_platform_dma_ops = s390_iommu_set_platform_dma, - .pgsize_bitmap = SZ_4K, - .get_resv_regions = s390_iommu_get_resv_regions, - .default_domain_ops = &(const struct iommu_domain_ops) { - .attach_dev = s390_iommu_attach_device, - .map_pages = s390_iommu_map_pages, - .unmap_pages = s390_iommu_unmap_pages, - .flush_iotlb_all = s390_iommu_flush_iotlb_all, - .iotlb_sync = s390_iommu_iotlb_sync, - .iotlb_sync_map = s390_iommu_iotlb_sync_map, - .iova_to_phys = s390_iommu_iova_to_phys, - .free = s390_domain_free, +static int __init s390_iommu_setup(char *str) +{ + if (!strcmp(str, "strict")) { + pr_warn("s390_iommu=strict deprecated; use iommu.strict=1 instead\n"); + iommu_set_dma_strict(); } + return 1; +} + +__setup("s390_iommu=", s390_iommu_setup); + +static int __init s390_iommu_aperture_setup(char *str) +{ + if (kstrtou32(str, 10, &s390_iommu_aperture_factor)) + s390_iommu_aperture_factor = 1; + return 1; +} + +__setup("s390_iommu_aperture=", s390_iommu_aperture_setup); + +static int __init s390_iommu_init(void) +{ + int rc; + + iommu_dma_forcedac = true; + s390_iommu_aperture = (u64)virt_to_phys(high_memory); + if (!s390_iommu_aperture_factor) + s390_iommu_aperture = ULONG_MAX; + else + s390_iommu_aperture *= s390_iommu_aperture_factor; + + rc = dma_alloc_cpu_table_caches(); + if (rc) + return rc; + + return rc; +} +subsys_initcall(s390_iommu_init); + +static int s390_attach_dev_identity(struct iommu_domain *domain, + struct device *dev, + struct iommu_domain *old) +{ + struct zpci_dev *zdev = to_zpci_dev(dev); + u8 status; + int cc; + + blocking_domain_attach_device(&blocking_domain, dev, old); + + /* If we fail now DMA remains blocked via blocking domain */ + cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); + if (reg_ioat_propagate_error(cc, status)) + return -EIO; + + zdev_s390_domain_update(zdev, domain); + + return 0; +} + +static const struct iommu_domain_ops s390_identity_ops = { + .attach_dev = s390_attach_dev_identity, +}; + +static struct iommu_domain s390_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &s390_identity_ops, +}; + +static struct iommu_domain blocking_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = blocking_domain_attach_device, + } +}; + +#define S390_IOMMU_COMMON_OPS() \ + .blocked_domain = &blocking_domain, \ + .release_domain = &blocking_domain, \ + .capable = s390_iommu_capable, \ + .domain_alloc_paging = s390_domain_alloc_paging, \ + .probe_device = s390_iommu_probe_device, \ + .device_group = generic_device_group, \ + .get_resv_regions = s390_iommu_get_resv_regions, \ + .default_domain_ops = &(const struct iommu_domain_ops) { \ + .attach_dev = s390_iommu_attach_device, \ + .map_pages = s390_iommu_map_pages, \ + .unmap_pages = s390_iommu_unmap_pages, \ + .flush_iotlb_all = s390_iommu_flush_iotlb_all, \ + .iotlb_sync = s390_iommu_iotlb_sync, \ + .iotlb_sync_map = s390_iommu_iotlb_sync_map, \ + .iova_to_phys = s390_iommu_iova_to_phys, \ + .free = s390_domain_free, \ + } + +static const struct iommu_ops s390_iommu_ops = { + S390_IOMMU_COMMON_OPS() +}; + +static const struct iommu_ops s390_iommu_rtr_ops = { + .identity_domain = &s390_identity_domain, + S390_IOMMU_COMMON_OPS() }; |
