diff options
Diffstat (limited to 'drivers/gpu/drm/xe/display/xe_fb_pin.c')
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_fb_pin.c | 220 |
1 files changed, 151 insertions, 69 deletions
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index 722c84a56607..55259969480b 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -3,15 +3,18 @@ * Copyright © 2021 Intel Corporation */ -#include "i915_drv.h" +#include <drm/ttm/ttm_bo.h> + +#include "i915_vma.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" #include "intel_fb_pin.h" +#include "intel_fbdev.h" +#include "xe_bo.h" +#include "xe_device.h" #include "xe_ggtt.h" -#include "xe_gt.h" - -#include <drm/ttm/ttm_bo.h> +#include "xe_pm.h" static void write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs, @@ -30,7 +33,7 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ for (row = 0; row < height; row++) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); iosys_map_wr(map, *dpt_ofs, u64, pte); *dpt_ofs += 8; @@ -62,7 +65,7 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, for (column = 0; column < width; column++) { iosys_map_wr(map, *dpt_ofs, u64, pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB])); + xe->pat.idx[XE_CACHE_NONE])); *dpt_ofs += 8; src_idx++; @@ -76,14 +79,16 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, *dpt_ofs = ALIGN(*dpt_ofs, 4096); } -static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, +static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + unsigned int alignment) { struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_tile *tile0 = xe_device_get_root_tile(xe); struct xe_ggtt *ggtt = tile0->mem.ggtt; - struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt; + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj), *dpt; u32 dpt_size, size = bo->ttm.base.size; if (view->type == I915_GTT_VIEW_NORMAL) @@ -97,20 +102,29 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, XE_PAGE_SIZE); if (IS_DGFX(xe)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_CREATE_VRAM0_BIT | - XE_BO_CREATE_GGTT_BIT); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM0 | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + alignment); else - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_CREATE_STOLEN_BIT | - XE_BO_CREATE_GGTT_BIT); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + alignment); if (IS_ERR(dpt)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_CREATE_SYSTEM_BIT | - XE_BO_CREATE_GGTT_BIT); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + alignment); if (IS_ERR(dpt)) return PTR_ERR(dpt); @@ -119,7 +133,7 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, for (x = 0; x < size / XE_PAGE_SIZE; x++) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); iosys_map_wr(&dpt->vmap, x * 8, u64, pte); } @@ -149,7 +163,10 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, } vma->dpt = dpt; - vma->node = dpt->ggtt_node; + vma->node = dpt->ggtt_node[tile0->id]; + + /* Ensure DPT writes are flushed */ + xe_device_l2_flush(xe); return 0; } @@ -165,9 +182,9 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo for (row = 0; row < height; row++) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); - xe_ggtt_set_pte(ggtt, *ggtt_ofs, pte); + ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte); *ggtt_ofs += XE_PAGE_SIZE; src_idx -= src_stride; } @@ -177,11 +194,13 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo } } -static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, +static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + unsigned int alignment) { - struct xe_bo *bo = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; u32 align; @@ -190,7 +209,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, /* TODO: Consider sharing framebuffer mapping? * embed i915_vma inside intel_framebuffer */ - xe_device_mem_access_get(tile_to_xe(ggtt->tile)); + xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile)); ret = mutex_lock_interruptible(&ggtt->lock); if (ret) goto out; @@ -199,21 +218,28 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) align = max_t(u32, align, SZ_64K); - if (bo->ggtt_node.size && view->type == I915_GTT_VIEW_NORMAL) { - vma->node = bo->ggtt_node; + if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) { + vma->node = bo->ggtt_node[ggtt->tile->id]; } else if (view->type == I915_GTT_VIEW_NORMAL) { u32 x, size = bo->ttm.base.size; - ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size, - align, 0); - if (ret) + vma->node = xe_ggtt_node_init(ggtt); + if (IS_ERR(vma->node)) { + ret = PTR_ERR(vma->node); goto out_unlock; + } + + ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0); + if (ret) { + xe_ggtt_node_fini(vma->node); + goto out_unlock; + } for (x = 0; x < size; x += XE_PAGE_SIZE) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); - xe_ggtt_set_pte(ggtt, vma->node.start + x, pte); + ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node->base.start + x, pte); } } else { u32 i, ggtt_ofs; @@ -222,12 +248,19 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, /* display seems to use tiles instead of bytes here, so convert it back.. */ u32 size = intel_rotation_info_size(rot_info) * XE_PAGE_SIZE; - ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size, - align, 0); - if (ret) + vma->node = xe_ggtt_node_init(ggtt); + if (IS_ERR(vma->node)) { + ret = PTR_ERR(vma->node); goto out_unlock; + } + + ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0); + if (ret) { + xe_ggtt_node_fini(vma->node); + goto out_unlock; + } - ggtt_ofs = vma->node.start; + ggtt_ofs = vma->node->base.start; for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++) write_ggtt_rotated(bo, ggtt, &ggtt_ofs, @@ -238,29 +271,31 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, rot_info->plane[i].dst_stride); } - xe_ggtt_invalidate(ggtt); out_unlock: mutex_unlock(&ggtt->lock); out: - xe_device_mem_access_put(tile_to_xe(ggtt->tile)); + xe_pm_runtime_put(tile_to_xe(ggtt->tile)); return ret; } -static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb, - const struct i915_gtt_view *view) +static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, + const struct i915_gtt_view *view, + unsigned int alignment) { struct drm_device *dev = fb->base.dev; struct xe_device *xe = to_xe_device(dev); struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); - struct xe_bo *bo = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj); int ret; if (!vma) return ERR_PTR(-ENODEV); + refcount_set(&vma->ref, 1); if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) && intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 && - !(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) { + !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) { struct xe_tile *tile = xe_device_get_root_tile(xe); /* @@ -295,9 +330,9 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb, vma->bo = bo; if (intel_fb_uses_dpt(&fb->base)) - ret = __xe_pin_fb_vma_dpt(fb, view, vma); + ret = __xe_pin_fb_vma_dpt(fb, view, vma, alignment); else - ret = __xe_pin_fb_vma_ggtt(fb, view, vma); + ret = __xe_pin_fb_vma_ggtt(fb, view, vma, alignment); if (ret) goto err_unpin; @@ -314,14 +349,16 @@ err: static void __xe_unpin_fb_vma(struct i915_vma *vma) { - struct xe_device *xe = to_xe_device(vma->bo->ttm.base.dev); - struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; + u8 tile_id = vma->node->ggtt->tile->id; + + if (!refcount_dec_and_test(&vma->ref)) + return; if (vma->dpt) xe_bo_unpin_map_no_vm(vma->dpt); - else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) || - vma->bo->ggtt_node.start != vma->node.start) - xe_ggtt_remove_node(ggtt, &vma->node); + else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) || + vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start) + xe_ggtt_node_remove(vma->node, false); ttm_bo_reserve(&vma->bo->ttm, false, false, NULL); ttm_bo_unpin(&vma->bo->ttm); @@ -330,36 +367,76 @@ static void __xe_unpin_fb_vma(struct i915_vma *vma) } struct i915_vma * -intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, - bool phys_cursor, - const struct i915_gtt_view *view, - bool uses_fence, - unsigned long *out_flags) +intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, + const struct i915_gtt_view *view, + unsigned int alignment, + unsigned int phys_alignment, + unsigned int vtd_guard, + bool uses_fence, + unsigned long *out_flags) { *out_flags = 0; - return __xe_pin_fb_vma(to_intel_framebuffer(fb), view); + return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, phys_alignment); } -void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) +void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags) { __xe_unpin_fb_vma(vma); } -int intel_plane_pin_fb(struct intel_plane_state *plane_state) +static bool reuse_vma(struct intel_plane_state *new_plane_state, + const struct intel_plane_state *old_plane_state) { - struct drm_framebuffer *fb = plane_state->hw.fb; - struct xe_bo *bo = intel_fb_obj(fb); + struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb); + struct xe_device *xe = to_xe_device(fb->base.dev); struct i915_vma *vma; + if (old_plane_state->hw.fb == new_plane_state->hw.fb && + !memcmp(&old_plane_state->view.gtt, + &new_plane_state->view.gtt, + sizeof(new_plane_state->view.gtt))) { + vma = old_plane_state->ggtt_vma; + goto found; + } + + if (fb == intel_fbdev_framebuffer(xe->display.fbdev.fbdev)) { + vma = intel_fbdev_vma_pointer(xe->display.fbdev.fbdev); + if (vma) + goto found; + } + + return false; + +found: + refcount_inc(&vma->ref); + new_plane_state->ggtt_vma = vma; + return true; +} + +int intel_plane_pin_fb(struct intel_plane_state *new_plane_state, + const struct intel_plane_state *old_plane_state) +{ + struct drm_framebuffer *fb = new_plane_state->hw.fb; + struct drm_gem_object *obj = intel_fb_bo(fb); + struct xe_bo *bo = gem_to_xe_bo(obj); + struct i915_vma *vma; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); + unsigned int alignment = plane->min_alignment(plane, fb, 0); + + if (reuse_vma(new_plane_state, old_plane_state)) + return 0; + /* We reject creating !SCANOUT fb's, so this is weird.. */ - drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT)); + drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT)); + + vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, alignment); - vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt); if (IS_ERR(vma)) return PTR_ERR(vma); - plane_state->ggtt_vma = vma; + new_plane_state->ggtt_vma = vma; return 0; } @@ -370,8 +447,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) } /* - * For Xe introduce dummy intel_dpt_create which just return NULL and - * intel_dpt_destroy which does nothing. + * For Xe introduce dummy intel_dpt_create which just return NULL, + * intel_dpt_destroy which does nothing, and fake intel_dpt_ofsset returning 0; */ struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb) { @@ -381,4 +458,9 @@ struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb) void intel_dpt_destroy(struct i915_address_space *vm) { return; -}
\ No newline at end of file +} + +u64 intel_dpt_offset(struct i915_vma *dpt_vma) +{ + return 0; +} |