summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_64_vio_hv.c
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2018-05-14 20:00:28 +1000
committerPaul Mackerras <paulus@ozlabs.org>2018-05-17 16:41:51 +1000
commitca1fc489cfa06a554fd71eb46d8927614ec7e6f3 (patch)
treeb94c12c7060fed20667df64963b656005032b5e2 /arch/powerpc/kvm/book3s_64_vio_hv.c
parentc6b61661d229e42b58d5e511191e925d105a5cce (diff)
KVM: PPC: Book3S: Allow backing bigger guest IOMMU pages with smaller physical pages
At the moment we only support in the host the IOMMU page sizes which the guest is aware of, which is 4KB/64KB/16MB. However P9 does not support 16MB IOMMU pages, 2MB and 1GB pages are supported instead. We can still emulate bigger guest pages (for example 16MB) with smaller host pages (4KB/64KB/2MB). This allows the physical IOMMU pages to use a page size smaller or equal than the guest visible IOMMU page size. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_vio_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c50
1 files changed, 44 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index e220fabb2f5d..635f3ca8129a 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -221,7 +221,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
return H_SUCCESS;
}
-static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
+static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
struct iommu_table *tbl, unsigned long entry)
{
enum dma_data_direction dir = DMA_NONE;
@@ -245,7 +245,24 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
return ret;
}
-static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
+static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
+ struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
+ unsigned long entry)
+{
+ unsigned long i, ret = H_SUCCESS;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry * subpages;
+
+ for (i = 0; i < subpages; ++i) {
+ ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
+ if (ret != H_SUCCESS)
+ break;
+ }
+
+ return ret;
+}
+
+static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry, unsigned long ua,
enum dma_data_direction dir)
{
@@ -290,6 +307,27 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
return 0;
}
+static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
+ struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
+ unsigned long entry, unsigned long ua,
+ enum dma_data_direction dir)
+{
+ unsigned long i, pgoff, ret = H_SUCCESS;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry * subpages;
+
+ for (i = 0, pgoff = 0; i < subpages;
+ ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
+
+ ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
+ io_entry + i, ua + pgoff, dir);
+ if (ret != H_SUCCESS)
+ break;
+ }
+
+ return ret;
+}
+
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
@@ -327,10 +365,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (dir == DMA_NONE)
- ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
+ ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry);
else
- ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
+ ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
if (ret == H_SUCCESS)
@@ -477,7 +515,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
+ ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
@@ -529,7 +567,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
unsigned long entry = ioba >> stt->page_shift;
for (i = 0; i < npages; ++i) {
- ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
+ ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry + i);
if (ret == H_SUCCESS)