diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/gtt.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 311 |
1 files changed, 97 insertions, 214 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index cc2c05e18206..076d9139edc6 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -33,11 +33,16 @@ * */ +#include <drm/drm_print.h> + #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" +#include "gt/intel_gt_regs.h" +#include <linux/vmalloc.h> + #if defined(VERBOSE_DEBUG) #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) #else @@ -68,72 +73,6 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) return false; } -/* translate a guest gmadr to host gmadr */ -int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) -{ - struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - - if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), - "invalid guest gmadr %llx\n", g_addr)) - return -EACCES; - - if (vgpu_gmadr_is_aperture(vgpu, g_addr)) - *h_addr = vgpu_aperture_gmadr_base(vgpu) - + (g_addr - vgpu_aperture_offset(vgpu)); - else - *h_addr = vgpu_hidden_gmadr_base(vgpu) - + (g_addr - vgpu_hidden_offset(vgpu)); - return 0; -} - -/* translate a host gmadr to guest gmadr */ -int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) -{ - struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - - if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), - "invalid host gmadr %llx\n", h_addr)) - return -EACCES; - - if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) - *g_addr = vgpu_aperture_gmadr_base(vgpu) - + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); - else - *g_addr = vgpu_hidden_gmadr_base(vgpu) - + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); - return 0; -} - -int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, - unsigned long *h_index) -{ - u64 h_addr; - int ret; - - ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, - &h_addr); - if (ret) - return ret; - - *h_index = h_addr >> I915_GTT_PAGE_SHIFT; - return 0; -} - -int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, - unsigned long *g_index) -{ - u64 g_addr; - int ret; - - ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, - &g_addr); - if (ret) - return ret; - - *g_index = g_addr >> I915_GTT_PAGE_SHIFT; - return 0; -} - #define gtt_type_is_entry(type) \ (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \ && type != GTT_TYPE_PPGTT_PTE_ENTRY \ @@ -185,7 +124,7 @@ struct gtt_type_table_entry { .pse_entry_type = pse_type, \ } -static struct gtt_type_table_entry gtt_type_table[] = { +static const struct gtt_type_table_entry gtt_type_table[] = { GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, GTT_TYPE_PPGTT_ROOT_L4_ENTRY, GTT_TYPE_INVALID, @@ -264,11 +203,6 @@ static inline int get_next_pt_type(int type) return gtt_type_table[type].next_pt_type; } -static inline int get_pt_type(int type) -{ - return gtt_type_table[type].pt_type; -} - static inline int get_entry_type(int type) { return gtt_type_table[type].entry_type; @@ -288,9 +222,11 @@ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index) static void ggtt_invalidate(struct intel_gt *gt) { - mmio_hw_access_pre(gt); + intel_wakeref_t wakeref; + + wakeref = mmio_hw_access_pre(gt); intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); - mmio_hw_access_post(gt); + mmio_hw_access_post(gt, wakeref); } static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte) @@ -312,7 +248,7 @@ static inline int gtt_get_entry64(void *pt, return -EINVAL; if (hypervisor_access) { - ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + + ret = intel_gvt_read_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); if (WARN_ON(ret)) @@ -337,7 +273,7 @@ static inline int gtt_set_entry64(void *pt, return -EINVAL; if (hypervisor_access) { - ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa + + ret = intel_gvt_write_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); if (WARN_ON(ret)) @@ -446,17 +382,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) return (e->val64 != 0); else - return (e->val64 & _PAGE_PRESENT); + return (e->val64 & GEN8_PAGE_PRESENT); } static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) { - e->val64 &= ~_PAGE_PRESENT; + e->val64 &= ~GEN8_PAGE_PRESENT; } static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) { - e->val64 |= _PAGE_PRESENT; + e->val64 |= GEN8_PAGE_PRESENT; } static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e) @@ -499,7 +435,7 @@ DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); -static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { +static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .get_entry = gtt_get_entry64, .set_entry = gtt_set_entry64, .clear_present = gtt_entry_clear_present, @@ -516,7 +452,7 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .set_pfn = gen8_gtt_set_pfn, }; -static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { +static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index, .gma_to_pte_index = gen8_gma_to_pte_index, .gma_to_pde_index = gen8_gma_to_pde_index, @@ -526,7 +462,7 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { }; /* Update entry type per pse and ips bit. */ -static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops, +static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops, struct intel_gvt_gtt_entry *entry, bool ips) { switch (entry->type) { @@ -553,7 +489,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index, bool guest) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); @@ -580,7 +516,7 @@ static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index, bool guest) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps : mm->ppgtt_mm.shadow_pdps, @@ -596,7 +532,7 @@ static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm, static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -608,7 +544,7 @@ static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -619,7 +555,7 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -629,7 +565,7 @@ static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; unsigned long offset = index; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -655,7 +591,7 @@ static inline int ppgtt_spt_get_entry( bool guest) { struct intel_gvt *gvt = spt->vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; int ret; e->type = get_entry_type(type); @@ -684,7 +620,7 @@ static inline int ppgtt_spt_set_entry( bool guest) { struct intel_gvt *gvt = spt->vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) return -EINVAL; @@ -745,7 +681,7 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); @@ -849,7 +785,7 @@ retry: */ spt->shadow_page.type = type; daddr = dma_map_page(kdev, spt->shadow_page.page, - 0, 4096, PCI_DMA_BIDIRECTIONAL); + 0, 4096, DMA_BIDIRECTIONAL); if (dma_mapping_error(kdev, daddr)) { gvt_vgpu_err("fail to map dma addr\n"); ret = -EINVAL; @@ -865,7 +801,7 @@ retry: return spt; err_unmap_dma: - dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL); err_free_spt: free_spt(spt); return ERR_PTR(ret); @@ -947,7 +883,7 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *e) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; enum intel_gvt_gtt_type cur_pt_type; @@ -984,7 +920,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *entry) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; int type; @@ -995,7 +931,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) return; - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); + intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); } static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) @@ -1055,7 +991,7 @@ static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; - if (GRAPHICS_VER(dev_priv) == 9 || GRAPHICS_VER(dev_priv) == 10) { + if (GRAPHICS_VER(dev_priv) == 9) { u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & GAMW_ECO_ENABLE_64K_IPS_FIELD; @@ -1072,7 +1008,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = NULL; bool ips = false; int ret; @@ -1136,7 +1072,7 @@ err: static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge) { - struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; se->type = ge->type; se->val64 = ge->val64; @@ -1148,35 +1084,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, ops->set_pfn(se, s->shadow_page.mfn); } -/** - * Check if can do 2M page - * @vgpu: target vgpu - * @entry: target pfn's gtt entry - * - * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition, - * negative if found err. - */ -static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, - struct intel_gvt_gtt_entry *entry) -{ - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - unsigned long pfn; - - if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) - return 0; - - pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); - if (pfn == INTEL_GVT_INVALID_ADDR) - return -EINVAL; - - return PageTransHuge(pfn_to_page(pfn)); -} - static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *sub_spt; struct intel_gvt_gtt_entry sub_se; unsigned long start_gfn; @@ -1193,12 +1105,10 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, return PTR_ERR(sub_spt); for_each_shadow_entry(sub_spt, &sub_se, sub_index) { - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, - start_gfn + sub_index, PAGE_SIZE, &dma_addr); - if (ret) { - ppgtt_invalidate_spt(spt); - return ret; - } + ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index, + PAGE_SIZE, &dma_addr); + if (ret) + goto err; sub_se.val64 = se->val64; /* Copy the PAT field from PDE. */ @@ -1217,13 +1127,24 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ops->set_pfn(se, sub_spt->shadow_page.mfn); ppgtt_set_shadow_entry(spt, se, index); return 0; +err: + /* Cancel the existing address mappings of DMA addr. */ + for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { + gvt_vdbg_mm("invalidate 4K entry\n"); + ppgtt_invalidate_pte(sub_spt, &sub_se); + } + /* Release the new allocated spt. */ + trace_spt_change(sub_spt->vgpu->id, "release", sub_spt, + sub_spt->guest_page.gfn, sub_spt->shadow_page.type); + ppgtt_free_spt(sub_spt); + return ret; } static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = *se; unsigned long start_gfn; dma_addr_t dma_addr; @@ -1239,8 +1160,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, ops->set_64k_splited(&entry); for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, - start_gfn + i, PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i, + PAGE_SIZE, &dma_addr); if (ret) return ret; @@ -1254,9 +1175,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *ge) { - struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se = *ge; - unsigned long gfn, page_size = PAGE_SIZE; + unsigned long gfn; dma_addr_t dma_addr; int ret; @@ -1268,37 +1189,34 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, switch (ge->type) { case GTT_TYPE_PPGTT_PTE_4K_ENTRY: gvt_vdbg_mm("shadow 4K gtt entry\n"); + ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr); + if (ret) + return -ENXIO; break; case GTT_TYPE_PPGTT_PTE_64K_ENTRY: gvt_vdbg_mm("shadow 64K gtt entry\n"); /* * The layout of 64K page is special, the page size is - * controlled by uper PDE. To be simple, we always split + * controlled by upper PDE. To be simple, we always split * 64K page to smaller 4K pages in shadow PT. */ return split_64KB_gtt_entry(vgpu, spt, index, &se); case GTT_TYPE_PPGTT_PTE_2M_ENTRY: gvt_vdbg_mm("shadow 2M gtt entry\n"); - ret = is_2MB_gtt_possible(vgpu, ge); - if (ret == 0) + if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) || + intel_gvt_dma_map_guest_page(vgpu, gfn, + I915_GTT_PAGE_SIZE_2M, &dma_addr)) return split_2MB_gtt_entry(vgpu, spt, index, &se); - else if (ret < 0) - return ret; - page_size = I915_GTT_PAGE_SIZE_2M; break; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: gvt_vgpu_err("GVT doesn't support 1GB entry\n"); return -EINVAL; default: GEM_BUG_ON(1); + return -EINVAL; } - /* direct shadow */ - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, - &dma_addr); - if (ret) - return -ENXIO; - + /* Successfully shadowed a 4K or 2M page (without splitting). */ pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT); ppgtt_set_shadow_entry(spt, &se, index); return 0; @@ -1307,11 +1225,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; - unsigned long gfn, i; + unsigned long i; int ret; trace_spt_change(spt->vgpu->id, "born", spt, @@ -1328,13 +1244,6 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) ppgtt_generate_shadow_entry(&se, s, &ge); ppgtt_set_shadow_entry(spt, &se, i); } else { - gfn = ops->get_pfn(&ge); - if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { - ops->set_pfn(&se, gvt->gtt.scratch_mfn); - ppgtt_set_shadow_entry(spt, &se, i); - continue; - } - ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge); if (ret) goto fail; @@ -1351,7 +1260,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *se, unsigned long index) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int ret; trace_spt_guest_change(spt->vgpu->id, "remove", spt, @@ -1432,7 +1341,7 @@ static int sync_oos_page(struct intel_vgpu *vgpu, { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; struct intel_gvt_gtt_entry old, new; int index; @@ -1495,7 +1404,7 @@ static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, struct intel_gvt *gvt = spt->vgpu->gvt; int ret; - ret = intel_gvt_hypervisor_read_gpa(spt->vgpu, + ret = intel_gvt_read_gpa(spt->vgpu, spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, oos_page->mem, I915_GTT_PAGE_SIZE); if (ret) @@ -1603,7 +1512,7 @@ static int ppgtt_handle_guest_write_page_table( { struct intel_vgpu *vgpu = spt->vgpu; int type = spt->shadow_page.type; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry old_se; int new_present; int i, ret; @@ -1720,7 +1629,7 @@ static int ppgtt_handle_guest_write_page_table_bytes( u64 pa, void *p_data, int bytes) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; struct intel_gvt_gtt_entry we, se; unsigned long index; @@ -1785,7 +1694,7 @@ static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_gtt *gtt = &gvt->gtt; - struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; struct intel_gvt_gtt_entry se; int index; @@ -1815,7 +1724,7 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_gtt *gtt = &gvt->gtt; - struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; struct intel_vgpu_ppgtt_spt *spt; struct intel_gvt_gtt_entry ge, se; int index, ret; @@ -1823,6 +1732,9 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) if (mm->ppgtt_mm.shadowed) return 0; + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) + return -EINVAL; + mm->ppgtt_mm.shadowed = true; for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { @@ -2067,7 +1979,7 @@ static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) { struct intel_vgpu *vgpu = mm->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); @@ -2096,8 +2008,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) { struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; - struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; unsigned long gpa = INTEL_GVT_INVALID_ADDR; unsigned long gma_index[4]; struct intel_gvt_gtt_entry e; @@ -2193,7 +2105,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, } /** - * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read + * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read * @vgpu: a vGPU * @off: register offset * @p_data: data will be returned to guest @@ -2221,13 +2133,12 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *entry) { - struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; pfn = pte_ops->get_pfn(entry); if (pfn != vgpu->gvt->gtt.scratch_mfn) - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, - pfn << PAGE_SHIFT); + intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); } static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, @@ -2236,7 +2147,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; unsigned long gma, gfn; struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; @@ -2310,21 +2221,13 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, m.val64 = e.val64; m.type = e.type; - /* one PTE update may be issued in multiple writes and the - * first write may not construct a valid gfn - */ - if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { - ops->set_pfn(&m, gvt->gtt.scratch_mfn); - goto out; - } - - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, - PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, + &dma_addr); if (ret) { gvt_vgpu_err("fail to populate guest ggtt entry\n"); /* guest driver may read/write the entry when partial * update the entry in this situation p2m will fail - * settting the shadow entry to point to a scratch page + * setting the shadow entry to point to a scratch page */ ops->set_pfn(&m, gvt->gtt.scratch_mfn); } else @@ -2334,7 +2237,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ops->clear_present(&m); } -out: ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index); @@ -2391,7 +2293,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_gtt *gtt = &vgpu->gtt; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int page_entry_num = I915_GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; void *scratch_pt; @@ -2409,8 +2311,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, return -ENOMEM; } - daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, - 4096, PCI_DMA_BIDIRECTIONAL); + daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, daddr)) { gvt_vgpu_err("fail to dmamap scratch_pt\n"); __free_page(virt_to_page(scratch_pt)); @@ -2440,7 +2341,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, /* The entry parameters like present/writeable/cache type * set to the same as i915's scratch page tree. */ - se.val64 |= _PAGE_PRESENT | _PAGE_RW; + se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (type == GTT_TYPE_PPGTT_PDE_PT) se.val64 |= PPAT_CACHED; @@ -2461,7 +2362,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu) if (vgpu->gtt.scratch_pt[i].page != NULL) { daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << I915_GTT_PAGE_SHIFT); - dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); __free_page(vgpu->gtt.scratch_pt[i].page); vgpu->gtt.scratch_pt[i].page = NULL; vgpu->gtt.scratch_pt[i].page_mfn = 0; @@ -2741,7 +2642,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) } daddr = dma_map_page(dev, virt_to_page(page), 0, - 4096, PCI_DMA_BIDIRECTIONAL); + 4096, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, daddr)) { gvt_err("fail to dmamap scratch ggtt page\n"); __free_page(virt_to_page(page)); @@ -2755,7 +2656,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ret = setup_spt_oos(gvt); if (ret) { gvt_err("fail to initialize SPT oos\n"); - dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); __free_page(gvt->gtt.scratch_page); return ret; } @@ -2769,7 +2670,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) * intel_gvt_clean_gtt - clean up mm components of a GVT device * @gvt: GVT device * - * This function is called at the driver unloading stage, to clean up the + * This function is called at the driver unloading stage, to clean up * the mm components of a GVT device. * */ @@ -2779,7 +2680,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << I915_GTT_PAGE_SHIFT); - dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); __free_page(gvt->gtt.scratch_page); @@ -2823,7 +2724,7 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) { struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; struct intel_gvt_gtt_entry old_entry; u32 index; @@ -2856,24 +2757,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) } /** - * intel_vgpu_reset_gtt - reset the all GTT related status - * @vgpu: a vGPU - * - * This function is called from vfio core to reset reset all - * GTT related status, including GGTT, PPGTT, scratch page. - * - */ -void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) -{ - /* Shadow pages are only created when there is no page - * table tracking data, so remove page tracking data after - * removing the shadow pages. - */ - intel_vgpu_destroy_all_ppgtt_mm(vgpu); - intel_vgpu_reset_ggtt(vgpu, true); -} - -/** * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries * @gvt: intel gvt device * @@ -2897,7 +2780,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt) offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; for (idx = 0; idx < num_low; idx++) { pte = mm->ggtt_mm.host_ggtt_aperture[idx]; - if (pte & _PAGE_PRESENT) + if (pte & GEN8_PAGE_PRESENT) write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); } @@ -2905,7 +2788,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt) offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; for (idx = 0; idx < num_hi; idx++) { pte = mm->ggtt_mm.host_ggtt_hidden[idx]; - if (pte & _PAGE_PRESENT) + if (pte & GEN8_PAGE_PRESENT) write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); } } |
