summaryrefslogtreecommitdiff
path: root/arch/s390/pci/pci_dma.c
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2016-01-29 15:13:30 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-02-23 08:56:16 +0100
commit9a99649f2a89fdfc9dde5d5401675561567bf99a (patch)
treeb15f95ac2c6a81c3dd85b4ab815406c164979d5f /arch/s390/pci/pci_dma.c
parent1b17cb796f5d40ffa239c6926385abd83a77a49b (diff)
s390/pci: remove pdev pointer from arch data
For each PCI function we need to maintain arch specific data in struct zpci_dev which also contains a pointer to struct pci_dev. When a function is registered or deregistered (which is triggered by PCI common code) we need to adjust that pointer which could interfere with the machine check handler (triggered by FW) using zpci_dev->pdev. Since multiple instances of the same pdev could exist at a time this can't be solved with locking. Fix that by ditching the pdev pointer and use a bus walk to reach struct pci_dev (only one instance of a pdev can be registered at the bus at a time). Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/pci/pci_dma.c')
-rw-r--r--arch/s390/pci/pci_dma.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 4638b93c7632..a06ce8037cec 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -217,27 +217,29 @@ void dma_cleanup_tables(unsigned long *table)
dma_free_cpu_table(table);
}
-static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
+static unsigned long __dma_alloc_iommu(struct device *dev,
unsigned long start, int size)
{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long boundary_size;
- boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
start, size, 0, boundary_size, 0);
}
-static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
+static unsigned long dma_alloc_iommu(struct device *dev, int size)
{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long offset, flags;
int wrap = 0;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
- offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
+ offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
if (offset == -1) {
/* wrap-around */
- offset = __dma_alloc_iommu(zdev, 0, size);
+ offset = __dma_alloc_iommu(dev, 0, size);
wrap = 1;
}
@@ -251,8 +253,9 @@ static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
return offset;
}
-static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
+static void dma_free_iommu(struct device *dev, unsigned long offset, int size)
{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long flags;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
@@ -293,7 +296,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
- iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
+ iommu_page_index = dma_alloc_iommu(dev, nr_pages);
if (iommu_page_index == -1) {
ret = -ENOSPC;
goto out_err;
@@ -319,7 +322,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
return dma_addr + (offset & ~PAGE_MASK);
out_free:
- dma_free_iommu(zdev, iommu_page_index, nr_pages);
+ dma_free_iommu(dev, iommu_page_index, nr_pages);
out_err:
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
@@ -346,7 +349,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
atomic64_add(npages, &zdev->unmapped_pages);
iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
- dma_free_iommu(zdev, iommu_page_index, npages);
+ dma_free_iommu(dev, iommu_page_index, npages);
}
static void *s390_dma_alloc(struct device *dev, size_t size,