summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAlistair Popple <alistair@popple.id.au>2013-12-09 18:17:01 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-12-30 14:17:06 +1100
commite589a4404fa06730355de204d3d136ed9bbc7dea (patch)
treea7b4d1dad98a06d89e652194947735db7eec02b5 /drivers
parentfee26f6d5d68a8815b20c32d15dd70d5384eb937 (diff)
powerpc/iommu: Update constant names to reflect their hardcoded page size
The powerpc iommu uses a hardcoded page size of 4K. This patch changes the name of the IOMMU_PAGE_* macros to reflect the hardcoded values. A future patch will use the existing names to support dynamic page sizes. Signed-off-by: Alistair Popple <alistair@popple.id.au> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c9
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c28
2 files changed, 19 insertions, 18 deletions
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 952d795230a4..f7d7538b6bd9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1282,24 +1282,25 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
/* netdev inits at probe time along with the structures we need below*/
if (netdev == NULL)
- return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
+ return IOMMU_PAGE_ALIGN_4K(IBMVETH_IO_ENTITLEMENT_DEFAULT);
adapter = netdev_priv(netdev);
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
- ret += IOMMU_PAGE_ALIGN(netdev->mtu);
+ ret += IOMMU_PAGE_ALIGN_4K(netdev->mtu);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
if (adapter->rx_buff_pool[i].active)
ret +=
adapter->rx_buff_pool[i].size *
- IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
+ IOMMU_PAGE_ALIGN_4K(adapter->rx_buff_pool[i].
buff_size);
rxqentries += adapter->rx_buff_pool[i].size;
}
/* add the size of the receive queue entries */
- ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
+ ret += IOMMU_PAGE_ALIGN_4K(
+ rxqentries * sizeof(struct ibmveth_rx_q_entry));
return ret;
}
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index bdae7a04af75..a84788ba662c 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container)
* enforcing the limit based on the max that the guest can map.
*/
down_write(&current->mm->mmap_sem);
- npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
+ npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
locked = current->mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
@@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container)
down_write(&current->mm->mmap_sem);
current->mm->locked_vm -= (container->tbl->it_size <<
- IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
+ IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
up_write(&current->mm->mmap_sem);
}
@@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data,
if (info.argsz < minsz)
return -EINVAL;
- info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT;
- info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT;
+ info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
+ info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
info.flags = 0;
if (copy_to_user((void __user *)arg, &info, minsz))
@@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data,
VFIO_DMA_MAP_FLAG_WRITE))
return -EINVAL;
- if ((param.size & ~IOMMU_PAGE_MASK) ||
- (param.vaddr & ~IOMMU_PAGE_MASK))
+ if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
+ (param.vaddr & ~IOMMU_PAGE_MASK_4K))
return -EINVAL;
/* iova is checked by the IOMMU API */
@@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data,
if (ret)
return ret;
- for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) {
+ for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
ret = iommu_put_tce_user_mode(tbl,
- (param.iova >> IOMMU_PAGE_SHIFT) + i,
+ (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
tce);
if (ret)
break;
- tce += IOMMU_PAGE_SIZE;
+ tce += IOMMU_PAGE_SIZE_4K;
}
if (ret)
iommu_clear_tces_and_put_pages(tbl,
- param.iova >> IOMMU_PAGE_SHIFT, i);
+ param.iova >> IOMMU_PAGE_SHIFT_4K, i);
iommu_flush_tce(tbl);
@@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data,
if (param.flags)
return -EINVAL;
- if (param.size & ~IOMMU_PAGE_MASK)
+ if (param.size & ~IOMMU_PAGE_MASK_4K)
return -EINVAL;
ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
- param.size >> IOMMU_PAGE_SHIFT);
+ param.size >> IOMMU_PAGE_SHIFT_4K);
if (ret)
return ret;
ret = iommu_clear_tces_and_put_pages(tbl,
- param.iova >> IOMMU_PAGE_SHIFT,
- param.size >> IOMMU_PAGE_SHIFT);
+ param.iova >> IOMMU_PAGE_SHIFT_4K,
+ param.size >> IOMMU_PAGE_SHIFT_4K);
iommu_flush_tce(tbl);
return ret;