diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/gtt.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 181 |
1 files changed, 22 insertions, 159 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 4ec85308379a..076d9139edc6 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -33,12 +33,15 @@ * */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" #include "gt/intel_gt_regs.h" +#include <linux/vmalloc.h> #if defined(VERBOSE_DEBUG) #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) @@ -49,22 +52,6 @@ static bool enable_out_of_sync = false; static int preallocated_oos_pages = 8192; -static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) -{ - struct kvm *kvm = vgpu->vfio_device.kvm; - int idx; - bool ret; - - if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) - return false; - - idx = srcu_read_lock(&kvm->srcu); - ret = kvm_is_visible_gfn(kvm, gfn); - srcu_read_unlock(&kvm->srcu, idx); - - return ret; -} - /* * validate a gm address and related range size, * translate it to host gm address @@ -86,72 +73,6 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) return false; } -/* translate a guest gmadr to host gmadr */ -int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) -{ - struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - - if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), - "invalid guest gmadr %llx\n", g_addr)) - return -EACCES; - - if (vgpu_gmadr_is_aperture(vgpu, g_addr)) - *h_addr = vgpu_aperture_gmadr_base(vgpu) - + (g_addr - vgpu_aperture_offset(vgpu)); - else - *h_addr = vgpu_hidden_gmadr_base(vgpu) - + (g_addr - vgpu_hidden_offset(vgpu)); - return 0; -} - -/* translate a host gmadr to guest gmadr */ -int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) -{ - struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - - if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), - "invalid host gmadr %llx\n", h_addr)) - return -EACCES; - - if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) - *g_addr = vgpu_aperture_gmadr_base(vgpu) - + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); - else - *g_addr = vgpu_hidden_gmadr_base(vgpu) - + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); - return 0; -} - -int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, - unsigned long *h_index) -{ - u64 h_addr; - int ret; - - ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, - &h_addr); - if (ret) - return ret; - - *h_index = h_addr >> I915_GTT_PAGE_SHIFT; - return 0; -} - -int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, - unsigned long *g_index) -{ - u64 g_addr; - int ret; - - ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, - &g_addr); - if (ret) - return ret; - - *g_index = g_addr >> I915_GTT_PAGE_SHIFT; - return 0; -} - #define gtt_type_is_entry(type) \ (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \ && type != GTT_TYPE_PPGTT_PTE_ENTRY \ @@ -301,9 +222,11 @@ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index) static void ggtt_invalidate(struct intel_gt *gt) { - mmio_hw_access_pre(gt); + intel_wakeref_t wakeref; + + wakeref = mmio_hw_access_pre(gt); intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); - mmio_hw_access_post(gt); + mmio_hw_access_post(gt, wakeref); } static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte) @@ -1161,31 +1084,6 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, ops->set_pfn(se, s->shadow_page.mfn); } -/* - * Check if can do 2M page - * @vgpu: target vgpu - * @entry: target pfn's gtt entry - * - * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition, - * negative if found err. - */ -static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, - struct intel_gvt_gtt_entry *entry) -{ - const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - kvm_pfn_t pfn; - - if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) - return 0; - - if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) - return -EINVAL; - pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry)); - if (is_error_noslot_pfn(pfn)) - return -EINVAL; - return PageTransHuge(pfn_to_page(pfn)); -} - static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) @@ -1230,7 +1128,7 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ppgtt_set_shadow_entry(spt, se, index); return 0; err: - /* Cancel the existing addess mappings of DMA addr. */ + /* Cancel the existing address mappings of DMA addr. */ for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { gvt_vdbg_mm("invalidate 4K entry\n"); ppgtt_invalidate_pte(sub_spt, &sub_se); @@ -1279,7 +1177,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, { const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se = *ge; - unsigned long gfn, page_size = PAGE_SIZE; + unsigned long gfn; dma_addr_t dma_addr; int ret; @@ -1291,36 +1189,34 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, switch (ge->type) { case GTT_TYPE_PPGTT_PTE_4K_ENTRY: gvt_vdbg_mm("shadow 4K gtt entry\n"); + ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr); + if (ret) + return -ENXIO; break; case GTT_TYPE_PPGTT_PTE_64K_ENTRY: gvt_vdbg_mm("shadow 64K gtt entry\n"); /* * The layout of 64K page is special, the page size is - * controlled by uper PDE. To be simple, we always split + * controlled by upper PDE. To be simple, we always split * 64K page to smaller 4K pages in shadow PT. */ return split_64KB_gtt_entry(vgpu, spt, index, &se); case GTT_TYPE_PPGTT_PTE_2M_ENTRY: gvt_vdbg_mm("shadow 2M gtt entry\n"); - ret = is_2MB_gtt_possible(vgpu, ge); - if (ret == 0) + if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) || + intel_gvt_dma_map_guest_page(vgpu, gfn, + I915_GTT_PAGE_SIZE_2M, &dma_addr)) return split_2MB_gtt_entry(vgpu, spt, index, &se); - else if (ret < 0) - return ret; - page_size = I915_GTT_PAGE_SIZE_2M; break; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: gvt_vgpu_err("GVT doesn't support 1GB entry\n"); return -EINVAL; default: GEM_BUG_ON(1); + return -EINVAL; } - /* direct shadow */ - ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr); - if (ret) - return -ENXIO; - + /* Successfully shadowed a 4K or 2M page (without splitting). */ pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT); ppgtt_set_shadow_entry(spt, &se, index); return 0; @@ -1329,11 +1225,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt *gvt = vgpu->gvt; - const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; - unsigned long gfn, i; + unsigned long i; int ret; trace_spt_change(spt->vgpu->id, "born", spt, @@ -1350,13 +1244,6 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) ppgtt_generate_shadow_entry(&se, s, &ge); ppgtt_set_shadow_entry(spt, &se, i); } else { - gfn = ops->get_pfn(&ge); - if (!intel_gvt_is_valid_gfn(vgpu, gfn)) { - ops->set_pfn(&se, gvt->gtt.scratch_mfn); - ppgtt_set_shadow_entry(spt, &se, i); - continue; - } - ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge); if (ret) goto fail; @@ -1845,6 +1732,9 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) if (mm->ppgtt_mm.shadowed) return 0; + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) + return -EINVAL; + mm->ppgtt_mm.shadowed = true; for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { @@ -2331,14 +2221,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, m.val64 = e.val64; m.type = e.type; - /* one PTE update may be issued in multiple writes and the - * first write may not construct a valid gfn - */ - if (!intel_gvt_is_valid_gfn(vgpu, gfn)) { - ops->set_pfn(&m, gvt->gtt.scratch_mfn); - goto out; - } - ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr); if (ret) { @@ -2355,7 +2237,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ops->clear_present(&m); } -out: ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index); @@ -2876,24 +2757,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) } /** - * intel_vgpu_reset_gtt - reset the all GTT related status - * @vgpu: a vGPU - * - * This function is called from vfio core to reset reset all - * GTT related status, including GGTT, PPGTT, scratch page. - * - */ -void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) -{ - /* Shadow pages are only created when there is no page - * table tracking data, so remove page tracking data after - * removing the shadow pages. - */ - intel_vgpu_destroy_all_ppgtt_mm(vgpu); - intel_vgpu_reset_ggtt(vgpu, true); -} - -/** * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries * @gvt: intel gvt device * |
