diff options
Diffstat (limited to 'drivers/gpu/drm/xe/display')
-rw-r--r-- | drivers/gpu/drm/xe/display/ext/i915_irq.c | 47 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_bo.c | 61 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_fb_bo.c | 51 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_fb_bo.h | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_fbdev_fb.c | 62 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_fbdev_fb.h | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_display.c | 551 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_display.h | 82 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_display_wa.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_dsb_buffer.c | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_fb_pin.c | 173 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_hdcp_gsc.c | 252 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_plane_initial.c | 113 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_tdf.c | 13 |
14 files changed, 1234 insertions, 259 deletions
diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c index bee191a4a97d..ac4cda2d81c7 100644 --- a/drivers/gpu/drm/xe/display/ext/i915_irq.c +++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c @@ -3,30 +3,28 @@ * Copyright © 2023 Intel Corporation */ -#include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_uncore.h" -void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, - i915_reg_t iir, i915_reg_t ier) +void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs) { - intel_uncore_write(uncore, imr, 0xffffffff); - intel_uncore_posting_read(uncore, imr); + intel_uncore_write(uncore, regs.imr, 0xffffffff); + intel_uncore_posting_read(uncore, regs.imr); - intel_uncore_write(uncore, ier, 0); + intel_uncore_write(uncore, regs.ier, 0); /* IIR can theoretically queue up two events. Be paranoid. */ - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); } /* * We should clear IMR at preinstall/uninstall, and just check at postinstall. */ -void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) +void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) { struct xe_device *xe = container_of(uncore, struct xe_device, uncore); u32 val = intel_uncore_read(uncore, reg); @@ -43,32 +41,19 @@ void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) intel_uncore_posting_read(uncore, reg); } -void gen3_irq_init(struct intel_uncore *uncore, - i915_reg_t imr, u32 imr_val, - i915_reg_t ier, u32 ier_val, - i915_reg_t iir) +void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs, + u32 imr_val, u32 ier_val) { - gen3_assert_iir_is_zero(uncore, iir); + gen2_assert_iir_is_zero(uncore, regs.iir); - intel_uncore_write(uncore, ier, ier_val); - intel_uncore_write(uncore, imr, imr_val); - intel_uncore_posting_read(uncore, imr); + intel_uncore_write(uncore, regs.ier, ier_val); + intel_uncore_write(uncore, regs.imr, imr_val); + intel_uncore_posting_read(uncore, regs.imr); } bool intel_irqs_enabled(struct xe_device *xe) { - /* - * XXX: i915 has a racy handling of the irq.enabled, since it doesn't - * lock its transitions. Because of that, the irq.enabled sometimes - * is not read with the irq.lock in place. - * However, the most critical cases like vblank and page flips are - * properly using the locks. - * We cannot take the lock in here or run any kind of assert because - * of i915 inconsistency. - * But at this point the xe irq is better protected against races, - * although the full solution would be protecting the i915 side. - */ - return xe->irq.enabled; + return atomic_read(&xe->irq.enabled); } void intel_synchronize_irq(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c new file mode 100644 index 000000000000..b463f5bd4eed --- /dev/null +++ b/drivers/gpu/drm/xe/display/intel_bo.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2024 Intel Corporation */ + +#include <drm/drm_gem.h> + +#include "xe_bo.h" +#include "intel_bo.h" + +bool intel_bo_is_tiled(struct drm_gem_object *obj) +{ + /* legacy tiling is unused */ + return false; +} + +bool intel_bo_is_userptr(struct drm_gem_object *obj) +{ + /* xe does not have userptr bos */ + return false; +} + +bool intel_bo_is_shmem(struct drm_gem_object *obj) +{ + return false; +} + +bool intel_bo_is_protected(struct drm_gem_object *obj) +{ + return false; +} + +void intel_bo_flush_if_display(struct drm_gem_object *obj) +{ +} + +int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + return drm_gem_prime_mmap(obj, vma); +} + +int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size) +{ + struct xe_bo *bo = gem_to_xe_bo(obj); + + return xe_bo_read(bo, offset, dst, size); +} + +struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj) +{ + return NULL; +} + +struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, + struct intel_frontbuffer *front) +{ + return front; +} + +void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) +{ + /* FIXME */ +} diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c index b21da7b745a5..4d209ebc26c2 100644 --- a/drivers/gpu/drm/xe/display/intel_fb_bo.c +++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c @@ -4,14 +4,18 @@ */ #include <drm/drm_modeset_helper.h> +#include <drm/ttm/ttm_bo.h> -#include "i915_drv.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_fb_bo.h" +#include "xe_bo.h" -void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) +void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj) { - if (bo->flags & XE_BO_CREATE_PINNED_BIT) { + struct xe_bo *bo = gem_to_xe_bo(obj); + + if (bo->flags & XE_BO_FLAG_PINNED) { /* Unpin our kernel fb first */ xe_bo_lock(bo, false); xe_bo_unpin(bo); @@ -21,41 +25,54 @@ void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) } int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct xe_bo *bo, + struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_i915_private *i915 = to_i915(bo->ttm.base.dev); + struct xe_bo *bo = gem_to_xe_bo(obj); + struct xe_device *xe = to_xe_device(bo->ttm.base.dev); int ret; + /* + * Some modifiers require physical alignment of 64KiB VRAM pages; + * require that the BO in those cases is created correctly. + */ + if (XE_IOCTL_DBG(xe, intel_fb_needs_64k_phys(mode_cmd->modifier[0]) && + !(bo->flags & XE_BO_FLAG_NEEDS_64K))) + return -EINVAL; + xe_bo_get(bo); ret = ttm_bo_reserve(&bo->ttm, true, false, NULL); if (ret) - return ret; + goto err; - if (!(bo->flags & XE_BO_SCANOUT_BIT)) { + if (!(bo->flags & XE_BO_FLAG_SCANOUT)) { /* - * XE_BO_SCANOUT_BIT should ideally be set at creation, or is + * XE_BO_FLAG_SCANOUT should ideally be set at creation, or is * automatically set when creating FB. We cannot change caching * mode when the boect is VM_BINDed, so we can only set * coherency with display when unbound. */ - if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) { + if (XE_IOCTL_DBG(xe, !list_empty(&bo->ttm.base.gpuva.list))) { ttm_bo_unreserve(&bo->ttm); - return -EINVAL; + ret = -EINVAL; + goto err; } - bo->flags |= XE_BO_SCANOUT_BIT; + bo->flags |= XE_BO_FLAG_SCANOUT; } ttm_bo_unreserve(&bo->ttm); + return 0; +err: + xe_bo_put(bo); return ret; } -struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *mode_cmd) +struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_i915_gem_object *bo; + struct xe_bo *bo; struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]); if (!gem) @@ -64,11 +81,11 @@ struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, bo = gem_to_xe_bo(gem); /* Require vram placement or dma-buf import */ if (IS_DGFX(i915) && - !xe_bo_can_migrate(gem_to_xe_bo(gem), XE_PL_VRAM0) && + !xe_bo_can_migrate(bo, XE_PL_VRAM0) && bo->ttm.type != ttm_bo_type_sg) { drm_gem_object_put(gem); return ERR_PTR(-EREMOTE); } - return bo; + return gem; } diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.h b/drivers/gpu/drm/xe/display/intel_fb_bo.h deleted file mode 100644 index 5d365b925b7a..000000000000 --- a/drivers/gpu/drm/xe/display/intel_fb_bo.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2021 Intel Corporation - */ - -#ifndef __INTEL_FB_BO_H__ -#define __INTEL_FB_BO_H__ - -struct drm_file; -struct drm_mode_fb_cmd2; -struct drm_i915_private; -struct intel_framebuffer; -struct xe_bo; - -void intel_fb_bo_framebuffer_fini(struct xe_bo *bo); -int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct xe_bo *bo, - struct drm_mode_fb_cmd2 *mode_cmd); - -struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *mode_cmd); - -#endif diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c index 51ae3561fd0d..ca95fcd098ec 100644 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c @@ -3,24 +3,25 @@ * Copyright © 2023 Intel Corporation */ -#include "intel_fbdev_fb.h" - #include <drm/drm_fb_helper.h> -#include "xe_gt.h" +#include "intel_display_types.h" +#include "intel_fb.h" +#include "intel_fbdev_fb.h" +#include "xe_bo.h" #include "xe_ttm_stolen_mgr.h" +#include "xe_wa.h" -#include "i915_drv.h" -#include "intel_display_types.h" +#include <generated/xe_wa_oob.h> -struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { struct drm_framebuffer *fb; struct drm_device *dev = helper->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct xe_device *xe = to_xe_device(dev); struct drm_mode_fb_cmd2 mode_cmd = {}; - struct drm_i915_gem_object *obj; + struct xe_bo *obj; int size; /* we don't do packed 24bpp */ @@ -39,50 +40,53 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, size = PAGE_ALIGN(size); obj = ERR_PTR(-ENODEV); - if (!IS_DGFX(dev_priv)) { - obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), + if (!IS_DGFX(xe) && !XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) { + obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size, - ttm_bo_type_kernel, XE_BO_SCANOUT_BIT | - XE_BO_CREATE_STOLEN_BIT | - XE_BO_CREATE_PINNED_BIT); + ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_PINNED); if (!IS_ERR(obj)) - drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n"); + drm_info(&xe->drm, "Allocated fbdev into stolen\n"); else - drm_info(&dev_priv->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj)); + drm_info(&xe->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj)); } + if (IS_ERR(obj)) { - obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size, - ttm_bo_type_kernel, XE_BO_SCANOUT_BIT | - XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) | - XE_BO_CREATE_PINNED_BIT); + obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size, + ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | + XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | + XE_BO_FLAG_PINNED); } if (IS_ERR(obj)) { - drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj); + drm_err(&xe->drm, "failed to allocate framebuffer (%pe)\n", obj); fb = ERR_PTR(-ENOMEM); goto err; } - fb = intel_framebuffer_create(obj, &mode_cmd); + fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd); if (IS_ERR(fb)) { xe_bo_unpin_map_no_vm(obj); goto err; } - drm_gem_object_put(intel_bo_to_drm_bo(obj)); - return fb; + drm_gem_object_put(&obj->ttm.base); + + return to_intel_framebuffer(fb); err: - return fb; + return ERR_CAST(fb); } int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, - struct drm_i915_gem_object *obj, struct i915_vma *vma) + struct drm_gem_object *_obj, struct i915_vma *vma) { + struct xe_bo *obj = gem_to_xe_bo(_obj); struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) { - if (obj->flags & XE_BO_CREATE_STOLEN_BIT) + if (!(obj->flags & XE_BO_FLAG_SYSTEM)) { + if (obj->flags & XE_BO_FLAG_STOLEN) info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0); else info->fix.smem_start = @@ -98,7 +102,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info XE_WARN_ON(iosys_map_is_null(&obj->vmap)); info->screen_base = obj->vmap.vaddr_iomem; - info->screen_size = intel_bo_to_drm_bo(obj)->size; + info->screen_size = obj->ttm.base.size; return 0; } diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.h b/drivers/gpu/drm/xe/display/intel_fbdev_fb.h deleted file mode 100644 index ea186772e0bb..000000000000 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef __INTEL_FBDEV_FB_H__ -#define __INTEL_FBDEV_FB_H__ - -struct drm_fb_helper; -struct drm_fb_helper_surface_size; -struct drm_i915_gem_object; -struct drm_i915_private; -struct fb_info; -struct i915_vma; - -struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes); -int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, - struct drm_i915_gem_object *obj, struct i915_vma *vma); - -#endif diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c new file mode 100644 index 000000000000..b3921dbc52ff --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "xe_display.h" +#include "regs/xe_irq_regs.h" + +#include <linux/fb.h> + +#include <drm/drm_drv.h> +#include <drm/drm_managed.h> +#include <drm/drm_probe_helper.h> +#include <uapi/drm/xe_drm.h> + +#include "soc/intel_dram.h" +#include "intel_acpi.h" +#include "intel_audio.h" +#include "intel_bw.h" +#include "intel_display.h" +#include "intel_display_driver.h" +#include "intel_display_irq.h" +#include "intel_display_types.h" +#include "intel_dmc.h" +#include "intel_dmc_wl.h" +#include "intel_dp.h" +#include "intel_encoder.h" +#include "intel_fbdev.h" +#include "intel_hdcp.h" +#include "intel_hotplug.h" +#include "intel_opregion.h" +#include "xe_module.h" + +/* Xe device functions */ + +static bool has_display(struct xe_device *xe) +{ + return HAS_DISPLAY(&xe->display); +} + +/** + * xe_display_driver_probe_defer - Detect if we need to wait for other drivers + * early on + * @pdev: PCI device + * + * Returns: true if probe needs to be deferred, false otherwise + */ +bool xe_display_driver_probe_defer(struct pci_dev *pdev) +{ + if (!xe_modparam.probe_display) + return 0; + + return intel_display_driver_probe_defer(pdev); +} + +/** + * xe_display_driver_set_hooks - Add driver flags and hooks for display + * @driver: DRM device driver + * + * Set features and function hooks in @driver that are needed for driving the + * display IP. This sets the driver's capability of driving display, regardless + * if the device has it enabled + */ +void xe_display_driver_set_hooks(struct drm_driver *driver) +{ + if (!xe_modparam.probe_display) + return; + + driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC; +} + +static void unset_display_features(struct xe_device *xe) +{ + xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); +} + +static void display_destroy(struct drm_device *dev, void *dummy) +{ + struct xe_device *xe = to_xe_device(dev); + + destroy_workqueue(xe->display.hotplug.dp_wq); +} + +/** + * xe_display_create - create display struct + * @xe: XE device instance + * + * Initialize all fields used by the display part. + * + * TODO: once everything can be inside a single struct, make the struct opaque + * to the rest of xe and return it to be xe->display. + * + * Returns: 0 on success + */ +int xe_display_create(struct xe_device *xe) +{ + spin_lock_init(&xe->display.fb_tracking.lock); + + xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0); + + return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL); +} + +static void xe_display_fini_nommio(struct drm_device *dev, void *dummy) +{ + struct xe_device *xe = to_xe_device(dev); + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_power_domains_cleanup(display); +} + +int xe_display_init_nommio(struct xe_device *xe) +{ + if (!xe->info.probe_display) + return 0; + + /* Fake uncore lock */ + spin_lock_init(&xe->uncore.lock); + + /* This must be called before any calls to HAS_PCH_* */ + intel_detect_pch(xe); + + return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe); +} + +static void xe_display_fini_noirq(void *arg) +{ + struct xe_device *xe = arg; + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_display_driver_remove_noirq(display); + intel_opregion_cleanup(display); +} + +int xe_display_init_noirq(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + int err; + + if (!xe->info.probe_display) + return 0; + + intel_display_driver_early_probe(display); + + /* Early display init.. */ + intel_opregion_setup(display); + + /* + * Fill the dram structure to get the system dram info. This will be + * used for memory latency calculation. + */ + intel_dram_detect(xe); + + intel_bw_init_hw(xe); + + intel_display_device_info_runtime_init(display); + + err = intel_display_driver_probe_noirq(display); + if (err) { + intel_opregion_cleanup(display); + return err; + } + + return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noirq, xe); +} + +static void xe_display_fini_noaccel(void *arg) +{ + struct xe_device *xe = arg; + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_display_driver_remove_nogem(display); +} + +int xe_display_init_noaccel(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + int err; + + if (!xe->info.probe_display) + return 0; + + err = intel_display_driver_probe_nogem(display); + if (err) + return err; + + return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noaccel, xe); +} + +int xe_display_init(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return 0; + + return intel_display_driver_probe(display); +} + +void xe_display_fini(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_hpd_poll_fini(xe); + + intel_hdcp_component_fini(display); + intel_audio_deinit(xe); +} + +void xe_display_register(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_display_driver_register(display); + intel_power_domains_enable(display); + intel_register_dsm_handler(); +} + +void xe_display_unregister(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_unregister_dsm_handler(); + intel_power_domains_disable(display); + intel_display_driver_unregister(display); +} + +void xe_display_driver_remove(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_display_driver_remove(display); +} + +/* IRQ-related functions */ + +void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) +{ + if (!xe->info.probe_display) + return; + + if (master_ctl & DISPLAY_IRQ) + gen11_display_irq_handler(xe); +} + +void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + if (gu_misc_iir & GU_MISC_GSE) + intel_opregion_asle_intr(display); +} + +void xe_display_irq_reset(struct xe_device *xe) +{ + if (!xe->info.probe_display) + return; + + gen11_display_irq_reset(xe); +} + +void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) +{ + if (!xe->info.probe_display) + return; + + if (gt->info.id == XE_GT0) + gen11_de_irq_postinstall(xe); +} + +static bool suspend_to_idle(void) +{ +#if IS_ENABLED(CONFIG_ACPI_SLEEP) + if (acpi_target_system_state() < ACPI_STATE_S3) + return true; +#endif + return false; +} + +static void xe_display_flush_cleanup_work(struct xe_device *xe) +{ + struct intel_crtc *crtc; + + for_each_intel_crtc(&xe->drm, crtc) { + struct drm_crtc_commit *commit; + + spin_lock(&crtc->base.commit_lock); + commit = list_first_entry_or_null(&crtc->base.commit_list, + struct drm_crtc_commit, commit_entry); + if (commit) + drm_crtc_commit_get(commit); + spin_unlock(&crtc->base.commit_lock); + + if (commit) { + wait_for_completion(&commit->cleanup_done); + drm_crtc_commit_put(commit); + } + } +} + +/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */ +static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) +{ + struct intel_display *display = &xe->display; + bool s2idle = suspend_to_idle(); + if (!xe->info.probe_display) + return; + + /* + * We do a lot of poking in a lot of registers, make sure they work + * properly. + */ + intel_power_domains_disable(display); + if (!runtime) + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); + + if (!runtime && has_display(xe)) { + drm_kms_helper_poll_disable(&xe->drm); + intel_display_driver_disable_user_access(display); + intel_display_driver_suspend(display); + } + + xe_display_flush_cleanup_work(xe); + + intel_hpd_cancel_work(xe); + + if (!runtime && has_display(xe)) { + intel_display_driver_suspend_access(display); + intel_encoder_suspend_all(&xe->display); + } + + intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold); + + intel_dmc_suspend(display); + + if (runtime && has_display(xe)) + intel_hpd_poll_enable(xe); +} + +void xe_display_pm_suspend(struct xe_device *xe) +{ + __xe_display_pm_suspend(xe, false); +} + +void xe_display_pm_shutdown(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_power_domains_disable(display); + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); + if (has_display(xe)) { + drm_kms_helper_poll_disable(&xe->drm); + intel_display_driver_disable_user_access(display); + intel_display_driver_suspend(display); + } + + xe_display_flush_cleanup_work(xe); + intel_dp_mst_suspend(display); + intel_hpd_cancel_work(xe); + + if (has_display(xe)) + intel_display_driver_suspend_access(display); + + intel_encoder_suspend_all(display); + intel_encoder_shutdown_all(display); + + intel_opregion_suspend(display, PCI_D3cold); + + intel_dmc_suspend(display); +} + +void xe_display_pm_runtime_suspend(struct xe_device *xe) +{ + if (!xe->info.probe_display) + return; + + if (xe->d3cold.allowed) { + __xe_display_pm_suspend(xe, true); + return; + } + + intel_hpd_poll_enable(xe); +} + +void xe_display_pm_suspend_late(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + bool s2idle = suspend_to_idle(); + + if (!xe->info.probe_display) + return; + + intel_display_power_suspend_late(display, s2idle); +} + +void xe_display_pm_runtime_suspend_late(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + if (xe->d3cold.allowed) + xe_display_pm_suspend_late(xe); + + /* + * If xe_display_pm_suspend_late() is not called, it is likely + * that we will be on dynamic DC states with DMC wakelock enabled. We + * need to flush the release work in that case. + */ + intel_dmc_wl_flush_release_work(display); +} + +void xe_display_pm_shutdown_late(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + /* + * The only requirement is to reboot with display DC states disabled, + * for now leaving all display power wells in the INIT power domain + * enabled. + */ + intel_power_domains_driver_remove(display); +} + +void xe_display_pm_resume_early(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_display_power_resume_early(display); +} + +static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_dmc_resume(display); + + if (has_display(xe)) + drm_mode_config_reset(&xe->drm); + + intel_display_driver_init_hw(display); + + if (!runtime && has_display(xe)) + intel_display_driver_resume_access(display); + + intel_hpd_init(xe); + + if (!runtime && has_display(xe)) { + intel_display_driver_resume(display); + drm_kms_helper_poll_enable(&xe->drm); + intel_display_driver_enable_user_access(display); + } + + if (has_display(xe)) + intel_hpd_poll_disable(xe); + + intel_opregion_resume(display); + + if (!runtime) + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false); + + intel_power_domains_enable(display); +} + +void xe_display_pm_resume(struct xe_device *xe) +{ + __xe_display_pm_resume(xe, false); +} + +void xe_display_pm_runtime_resume(struct xe_device *xe) +{ + if (!xe->info.probe_display) + return; + + if (xe->d3cold.allowed) { + __xe_display_pm_resume(xe, true); + return; + } + + intel_hpd_init(xe); + intel_hpd_poll_disable(xe); +} + + +static void display_device_remove(struct drm_device *dev, void *arg) +{ + struct intel_display *display = arg; + + intel_display_device_remove(display); +} + +int xe_display_probe(struct xe_device *xe) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + struct intel_display *display; + int err; + + if (!xe->info.probe_display) + goto no_display; + + display = intel_display_device_probe(pdev); + + err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display); + if (err) + return err; + + if (has_display(xe)) + return 0; + +no_display: + xe->info.probe_display = false; + unset_display_features(xe); + return 0; +} diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h new file mode 100644 index 000000000000..233f81a26c25 --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_display.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_DISPLAY_H_ +#define _XE_DISPLAY_H_ + +#include "xe_device.h" + +struct drm_driver; + +#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) + +bool xe_display_driver_probe_defer(struct pci_dev *pdev); +void xe_display_driver_set_hooks(struct drm_driver *driver); +void xe_display_driver_remove(struct xe_device *xe); + +int xe_display_create(struct xe_device *xe); + +int xe_display_probe(struct xe_device *xe); + +int xe_display_init_nommio(struct xe_device *xe); +int xe_display_init_noirq(struct xe_device *xe); +int xe_display_init_noaccel(struct xe_device *xe); +int xe_display_init(struct xe_device *xe); +void xe_display_fini(struct xe_device *xe); + +void xe_display_register(struct xe_device *xe); +void xe_display_unregister(struct xe_device *xe); + +void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl); +void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir); +void xe_display_irq_reset(struct xe_device *xe); +void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt); + +void xe_display_pm_suspend(struct xe_device *xe); +void xe_display_pm_shutdown(struct xe_device *xe); +void xe_display_pm_suspend_late(struct xe_device *xe); +void xe_display_pm_shutdown_late(struct xe_device *xe); +void xe_display_pm_resume_early(struct xe_device *xe); +void xe_display_pm_resume(struct xe_device *xe); +void xe_display_pm_runtime_suspend(struct xe_device *xe); +void xe_display_pm_runtime_suspend_late(struct xe_device *xe); +void xe_display_pm_runtime_resume(struct xe_device *xe); + +#else + +static inline int xe_display_driver_probe_defer(struct pci_dev *pdev) { return 0; } +static inline void xe_display_driver_set_hooks(struct drm_driver *driver) { } +static inline void xe_display_driver_remove(struct xe_device *xe) {} + +static inline int xe_display_create(struct xe_device *xe) { return 0; } + +static inline int xe_display_probe(struct xe_device *xe) { return 0; } + +static inline int xe_display_init_nommio(struct xe_device *xe) { return 0; } +static inline int xe_display_init_noirq(struct xe_device *xe) { return 0; } +static inline int xe_display_init_noaccel(struct xe_device *xe) { return 0; } +static inline int xe_display_init(struct xe_device *xe) { return 0; } +static inline void xe_display_fini(struct xe_device *xe) {} + +static inline void xe_display_register(struct xe_device *xe) {} +static inline void xe_display_unregister(struct xe_device *xe) {} + +static inline void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) {} +static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) {} +static inline void xe_display_irq_reset(struct xe_device *xe) {} +static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {} + +static inline void xe_display_pm_suspend(struct xe_device *xe) {} +static inline void xe_display_pm_shutdown(struct xe_device *xe) {} +static inline void xe_display_pm_suspend_late(struct xe_device *xe) {} +static inline void xe_display_pm_shutdown_late(struct xe_device *xe) {} +static inline void xe_display_pm_resume_early(struct xe_device *xe) {} +static inline void xe_display_pm_resume(struct xe_device *xe) {} +static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {} +static inline void xe_display_pm_runtime_suspend_late(struct xe_device *xe) {} +static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {} + +#endif /* CONFIG_DRM_XE_DISPLAY */ +#endif /* _XE_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c new file mode 100644 index 000000000000..68e3d1959ad6 --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_display_wa.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include "intel_display_wa.h" + +#include "xe_device.h" +#include "xe_wa.h" + +#include <generated/xe_wa_oob.h> + +bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) +{ + return XE_WA(xe_root_mmio_gt(i915), 16023588340); +} diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c index 27c2fb1c002a..f95375451e2f 100644 --- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c +++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c @@ -3,12 +3,12 @@ * Copyright 2023, Intel Corporation. */ -#include "i915_drv.h" #include "i915_vma.h" #include "intel_display_types.h" #include "intel_dsb_buffer.h" #include "xe_bo.h" -#include "xe_gt.h" +#include "xe_device.h" +#include "xe_device_types.h" u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) { @@ -17,7 +17,10 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val) { + struct xe_device *xe = dsb_buf->vma->bo->tile->xe; + iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val); + xe_device_l2_flush(xe); } u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) @@ -27,26 +30,30 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size) { + struct xe_device *xe = dsb_buf->vma->bo->tile->xe; + WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf)); iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size); + xe_device_l2_flush(xe); } bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct drm_i915_gem_object *obj; + struct xe_device *xe = to_xe_device(crtc->base.dev); + struct xe_bo *obj; struct i915_vma *vma; vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) return false; - obj = xe_bo_create_pin_map(i915, xe_device_get_root_tile(i915), + /* Set scanout flag for WC mapping */ + obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_ALIGN(size), ttm_bo_type_kernel, - XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) | - XE_BO_CREATE_GGTT_BIT); + XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | + XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT); if (IS_ERR(obj)) { kfree(vma); return false; @@ -67,5 +74,9 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf) { - /* TODO: add xe specific flush_map() for dsb buffer object. */ + /* + * The memory barrier here is to ensure coherency of DSB vs MMIO, + * both for weak ordering archs and discrete cards. + */ + xe_device_wmb(dsb_buf->vma->bo->tile->xe); } diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index 722c84a56607..9fa51b84737c 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -3,15 +3,16 @@ * Copyright © 2021 Intel Corporation */ -#include "i915_drv.h" +#include <drm/ttm/ttm_bo.h> + #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" #include "intel_fb_pin.h" +#include "xe_bo.h" +#include "xe_device.h" #include "xe_ggtt.h" -#include "xe_gt.h" - -#include <drm/ttm/ttm_bo.h> +#include "xe_pm.h" static void write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs, @@ -30,7 +31,7 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ for (row = 0; row < height; row++) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); iosys_map_wr(map, *dpt_ofs, u64, pte); *dpt_ofs += 8; @@ -62,7 +63,7 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, for (column = 0; column < width; column++) { iosys_map_wr(map, *dpt_ofs, u64, pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB])); + xe->pat.idx[XE_CACHE_NONE])); *dpt_ofs += 8; src_idx++; @@ -76,14 +77,16 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, *dpt_ofs = ALIGN(*dpt_ofs, 4096); } -static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, +static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + u64 physical_alignment) { struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_tile *tile0 = xe_device_get_root_tile(xe); struct xe_ggtt *ggtt = tile0->mem.ggtt; - struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt; + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj), *dpt; u32 dpt_size, size = bo->ttm.base.size; if (view->type == I915_GTT_VIEW_NORMAL) @@ -97,20 +100,29 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, XE_PAGE_SIZE); if (IS_DGFX(xe)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_CREATE_VRAM0_BIT | - XE_BO_CREATE_GGTT_BIT); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM0 | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); else - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_CREATE_STOLEN_BIT | - XE_BO_CREATE_GGTT_BIT); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); if (IS_ERR(dpt)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_CREATE_SYSTEM_BIT | - XE_BO_CREATE_GGTT_BIT); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); if (IS_ERR(dpt)) return PTR_ERR(dpt); @@ -119,7 +131,7 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, for (x = 0; x < size / XE_PAGE_SIZE; x++) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); iosys_map_wr(&dpt->vmap, x * 8, u64, pte); } @@ -149,7 +161,7 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, } vma->dpt = dpt; - vma->node = dpt->ggtt_node; + vma->node = dpt->ggtt_node[tile0->id]; return 0; } @@ -165,9 +177,9 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo for (row = 0; row < height; row++) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); - xe_ggtt_set_pte(ggtt, *ggtt_ofs, pte); + ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte); *ggtt_ofs += XE_PAGE_SIZE; src_idx -= src_stride; } @@ -177,11 +189,13 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo } } -static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, +static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + u64 physical_alignment) { - struct xe_bo *bo = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; u32 align; @@ -190,7 +204,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, /* TODO: Consider sharing framebuffer mapping? * embed i915_vma inside intel_framebuffer */ - xe_device_mem_access_get(tile_to_xe(ggtt->tile)); + xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile)); ret = mutex_lock_interruptible(&ggtt->lock); if (ret) goto out; @@ -199,21 +213,28 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) align = max_t(u32, align, SZ_64K); - if (bo->ggtt_node.size && view->type == I915_GTT_VIEW_NORMAL) { - vma->node = bo->ggtt_node; + if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) { + vma->node = bo->ggtt_node[ggtt->tile->id]; } else if (view->type == I915_GTT_VIEW_NORMAL) { u32 x, size = bo->ttm.base.size; - ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size, - align, 0); - if (ret) + vma->node = xe_ggtt_node_init(ggtt); + if (IS_ERR(vma->node)) { + ret = PTR_ERR(vma->node); + goto out_unlock; + } + + ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0); + if (ret) { + xe_ggtt_node_fini(vma->node); goto out_unlock; + } for (x = 0; x < size; x += XE_PAGE_SIZE) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); - xe_ggtt_set_pte(ggtt, vma->node.start + x, pte); + ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node->base.start + x, pte); } } else { u32 i, ggtt_ofs; @@ -222,12 +243,19 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, /* display seems to use tiles instead of bytes here, so convert it back.. */ u32 size = intel_rotation_info_size(rot_info) * XE_PAGE_SIZE; - ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size, - align, 0); - if (ret) + vma->node = xe_ggtt_node_init(ggtt); + if (IS_ERR(vma->node)) { + ret = PTR_ERR(vma->node); goto out_unlock; + } - ggtt_ofs = vma->node.start; + ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0); + if (ret) { + xe_ggtt_node_fini(vma->node); + goto out_unlock; + } + + ggtt_ofs = vma->node->base.start; for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++) write_ggtt_rotated(bo, ggtt, &ggtt_ofs, @@ -238,21 +266,22 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, rot_info->plane[i].dst_stride); } - xe_ggtt_invalidate(ggtt); out_unlock: mutex_unlock(&ggtt->lock); out: - xe_device_mem_access_put(tile_to_xe(ggtt->tile)); + xe_pm_runtime_put(tile_to_xe(ggtt->tile)); return ret; } -static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb, - const struct i915_gtt_view *view) +static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, + const struct i915_gtt_view *view, + u64 physical_alignment) { struct drm_device *dev = fb->base.dev; struct xe_device *xe = to_xe_device(dev); struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); - struct xe_bo *bo = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj); int ret; if (!vma) @@ -260,7 +289,7 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb, if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) && intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 && - !(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) { + !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) { struct xe_tile *tile = xe_device_get_root_tile(xe); /* @@ -295,12 +324,14 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb, vma->bo = bo; if (intel_fb_uses_dpt(&fb->base)) - ret = __xe_pin_fb_vma_dpt(fb, view, vma); + ret = __xe_pin_fb_vma_dpt(fb, view, vma, physical_alignment); else - ret = __xe_pin_fb_vma_ggtt(fb, view, vma); + ret = __xe_pin_fb_vma_ggtt(fb, view, vma, physical_alignment); if (ret) goto err_unpin; + /* Ensure DPT writes are flushed */ + xe_device_l2_flush(xe); return vma; err_unpin: @@ -314,14 +345,13 @@ err: static void __xe_unpin_fb_vma(struct i915_vma *vma) { - struct xe_device *xe = to_xe_device(vma->bo->ttm.base.dev); - struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; + u8 tile_id = vma->node->ggtt->tile->id; if (vma->dpt) xe_bo_unpin_map_no_vm(vma->dpt); - else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) || - vma->bo->ggtt_node.start != vma->node.start) - xe_ggtt_remove_node(ggtt, &vma->node); + else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) || + vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start) + xe_ggtt_node_remove(vma->node, false); ttm_bo_reserve(&vma->bo->ttm, false, false, NULL); ttm_bo_unpin(&vma->bo->ttm); @@ -330,18 +360,19 @@ static void __xe_unpin_fb_vma(struct i915_vma *vma) } struct i915_vma * -intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, - bool phys_cursor, - const struct i915_gtt_view *view, - bool uses_fence, - unsigned long *out_flags) +intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, + const struct i915_gtt_view *view, + unsigned int alignment, + unsigned int phys_alignment, + bool uses_fence, + unsigned long *out_flags) { *out_flags = 0; - return __xe_pin_fb_vma(to_intel_framebuffer(fb), view); + return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, phys_alignment); } -void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) +void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags) { __xe_unpin_fb_vma(vma); } @@ -349,13 +380,18 @@ void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) int intel_plane_pin_fb(struct intel_plane_state *plane_state) { struct drm_framebuffer *fb = plane_state->hw.fb; - struct xe_bo *bo = intel_fb_obj(fb); + struct drm_gem_object *obj = intel_fb_bo(fb); + struct xe_bo *bo = gem_to_xe_bo(obj); struct i915_vma *vma; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + u64 phys_alignment = plane->min_alignment(plane, fb, 0); /* We reject creating !SCANOUT fb's, so this is weird.. */ - drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT)); + drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT)); + + vma = __xe_pin_fb_vma(intel_fb, &plane_state->view.gtt, phys_alignment); - vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -370,8 +406,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) } /* - * For Xe introduce dummy intel_dpt_create which just return NULL and - * intel_dpt_destroy which does nothing. + * For Xe introduce dummy intel_dpt_create which just return NULL, + * intel_dpt_destroy which does nothing, and fake intel_dpt_ofsset returning 0; */ struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb) { @@ -381,4 +417,9 @@ struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb) void intel_dpt_destroy(struct i915_address_space *vm) { return; -}
\ No newline at end of file +} + +u64 intel_dpt_offset(struct i915_vma *dpt_vma) +{ + return 0; +} diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index 0f11a39333e2..7c02323e9531 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -3,32 +3,262 @@ * Copyright 2023, Intel Corporation. */ -#include "i915_drv.h" +#include <drm/drm_print.h> +#include <drm/intel/i915_hdcp_interface.h> +#include <linux/delay.h> + +#include "abi/gsc_command_header_abi.h" #include "intel_hdcp_gsc.h" +#include "intel_hdcp_gsc_message.h" +#include "xe_bo.h" +#include "xe_device.h" +#include "xe_device_types.h" +#include "xe_force_wake.h" +#include "xe_gsc_proxy.h" +#include "xe_gsc_submit.h" +#include "xe_map.h" +#include "xe_pm.h" +#include "xe_uc_fw.h" + +#define HECI_MEADDRESS_HDCP 18 + +struct intel_hdcp_gsc_message { + struct xe_bo *hdcp_bo; + u64 hdcp_cmd_in; + u64 hdcp_cmd_out; +}; -bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915) +#define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header) + +bool intel_hdcp_gsc_cs_required(struct intel_display *display) { - return true; + return DISPLAY_VER(display) >= 14; } -bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915) +bool intel_hdcp_gsc_check_status(struct intel_display *display) { - return false; + struct xe_device *xe = to_xe_device(display->drm); + struct xe_tile *tile = xe_device_get_root_tile(xe); + struct xe_gt *gt = tile->media_gt; + struct xe_gsc *gsc = >->uc.gsc; + bool ret = true; + unsigned int fw_ref; + + if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) { + drm_dbg_kms(&xe->drm, + "GSC Components not ready for HDCP2.x\n"); + return false; + } + + xe_pm_runtime_get(xe); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) { + drm_dbg_kms(&xe->drm, + "failed to get forcewake to check proxy status\n"); + ret = false; + goto out; + } + + if (!xe_gsc_proxy_init_done(gsc)) + ret = false; + + xe_force_wake_put(gt_to_fw(gt), fw_ref); +out: + xe_pm_runtime_put(xe); + return ret; } -int intel_hdcp_gsc_init(struct drm_i915_private *i915) +/*This function helps allocate memory for the command that we will send to gsc cs */ +static int intel_hdcp_gsc_initialize_message(struct intel_display *display, + struct intel_hdcp_gsc_message *hdcp_message) { - drm_info(&i915->drm, "HDCP support not yet implemented\n"); - return -ENODEV; + struct xe_device *xe = to_xe_device(display->drm); + struct xe_bo *bo = NULL; + u64 cmd_in, cmd_out; + int ret = 0; + + /* allocate object of two page for HDCP command memory and store it */ + bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT); + + if (IS_ERR(bo)) { + drm_err(display->drm, "Failed to allocate bo for HDCP streaming command!\n"); + ret = PTR_ERR(bo); + goto out; + } + + cmd_in = xe_bo_ggtt_addr(bo); + cmd_out = cmd_in + PAGE_SIZE; + xe_map_memset(xe, &bo->vmap, 0, 0, bo->size); + + hdcp_message->hdcp_bo = bo; + hdcp_message->hdcp_cmd_in = cmd_in; + hdcp_message->hdcp_cmd_out = cmd_out; +out: + return ret; } -void intel_hdcp_gsc_fini(struct drm_i915_private *i915) +static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display) { + struct intel_hdcp_gsc_message *hdcp_message; + int ret; + + hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL); + + if (!hdcp_message) + return -ENOMEM; + + /* + * NOTE: No need to lock the comp mutex here as it is already + * going to be taken before this function called + */ + ret = intel_hdcp_gsc_initialize_message(display, hdcp_message); + if (ret) { + drm_err(display->drm, "Could not initialize hdcp_message\n"); + kfree(hdcp_message); + return ret; + } + + display->hdcp.hdcp_message = hdcp_message; + return ret; } -ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, +static const struct i915_hdcp_ops gsc_hdcp_ops = { + .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session, + .verify_receiver_cert_prepare_km = + intel_hdcp_gsc_verify_receiver_cert_prepare_km, + .verify_hprime = intel_hdcp_gsc_verify_hprime, + .store_pairing_info = intel_hdcp_gsc_store_pairing_info, + .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check, + .verify_lprime = intel_hdcp_gsc_verify_lprime, + .get_session_key = intel_hdcp_gsc_get_session_key, + .repeater_check_flow_prepare_ack = + intel_hdcp_gsc_repeater_check_flow_prepare_ack, + .verify_mprime = intel_hdcp_gsc_verify_mprime, + .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication, + .close_hdcp_session = intel_hdcp_gsc_close_session, +}; + +int intel_hdcp_gsc_init(struct intel_display *display) +{ + struct i915_hdcp_arbiter *data; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.arbiter = data; + display->hdcp.arbiter->hdcp_dev = display->drm->dev; + display->hdcp.arbiter->ops = &gsc_hdcp_ops; + ret = intel_hdcp_gsc_hdcp2_init(display); + if (ret) + kfree(data); + + mutex_unlock(&display->hdcp.hdcp_mutex); + + return ret; +} + +void intel_hdcp_gsc_fini(struct intel_display *display) +{ + struct intel_hdcp_gsc_message *hdcp_message = + display->hdcp.hdcp_message; + struct i915_hdcp_arbiter *arb = display->hdcp.arbiter; + + if (hdcp_message) { + xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); + kfree(hdcp_message); + display->hdcp.hdcp_message = NULL; + } + + kfree(arb); + display->hdcp.arbiter = NULL; +} + +static int xe_gsc_send_sync(struct xe_device *xe, + struct intel_hdcp_gsc_message *hdcp_message, + u32 msg_size_in, u32 msg_size_out, + u32 addr_out_off) +{ + struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt; + struct iosys_map *map = &hdcp_message->hdcp_bo->vmap; + struct xe_gsc *gsc = >->uc.gsc; + int ret; + + ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in, + hdcp_message->hdcp_cmd_out, msg_size_out); + if (ret) { + drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret); + return ret; + } + + if (xe_gsc_check_and_update_pending(xe, map, 0, map, addr_out_off)) + return -EAGAIN; + + ret = xe_gsc_read_out_header(xe, map, addr_out_off, + sizeof(struct hdcp_cmd_header), NULL); + + return ret; +} + +ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in, size_t msg_in_len, u8 *msg_out, size_t msg_out_len) { - return -ENODEV; + const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE; + struct intel_hdcp_gsc_message *hdcp_message; + u64 host_session_id; + u32 msg_size_in, msg_size_out; + u32 addr_out_off, addr_in_wr_off = 0; + int ret, tries = 0; + + if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) { + ret = -ENOSPC; + goto out; + } + + msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE; + msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE; + hdcp_message = xe->display.hdcp.hdcp_message; + addr_out_off = PAGE_SIZE; + + host_session_id = xe_gsc_create_host_session_id(); + xe_pm_runtime_get_noresume(xe); + addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap, + addr_in_wr_off, HECI_MEADDRESS_HDCP, + host_session_id, msg_in_len); + xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off, + msg_in, msg_in_len); + /* + * Keep sending request in case the pending bit is set no need to add + * message handle as we are using same address hence loc. of header is + * same and it will contain the message handle. we will send the message + * 20 times each message 50 ms apart + */ + do { + ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out, + addr_out_off); + + /* Only try again if gsc says so */ + if (ret != -EAGAIN) + break; + + msleep(50); + + } while (++tries < 20); + + if (ret) + goto out; + + xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap, + addr_out_off + HDCP_GSC_HEADER_SIZE, + msg_out_len); + +out: + xe_pm_runtime_put(xe); + return ret; } diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index ccf83c12b545..2a2f250fa495 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -6,31 +6,37 @@ /* for ioread64 */ #include <linux/io-64-nonatomic-lo-hi.h> +#include "regs/xe_gtt_defs.h" #include "xe_ggtt.h" -#include "i915_drv.h" #include "intel_atomic_plane.h" +#include "intel_crtc.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_frontbuffer.h" #include "intel_plane_initial.h" +#include "xe_bo.h" +#include "xe_wa.h" + +#include <generated/xe_wa_oob.h> static bool -intel_reuse_initial_plane_obj(struct drm_i915_private *i915, - const struct intel_initial_plane_config *plane_config, +intel_reuse_initial_plane_obj(struct intel_crtc *this, + const struct intel_initial_plane_config plane_configs[], struct drm_framebuffer **fb) { + struct xe_device *xe = to_xe_device(this->base.dev); struct intel_crtc *crtc; - for_each_intel_crtc(&i915->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); + for_each_intel_crtc(&xe->drm, crtc) { struct intel_plane *plane = to_intel_plane(crtc->base.primary); - struct intel_plane_state *plane_state = + const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); if (!crtc_state->uapi.active) continue; @@ -38,7 +44,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915, if (!plane_state->ggtt_vma) continue; - if (intel_plane_ggtt_offset(plane_state) == plane_config->base) { + if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) { *fb = plane_state->hw.fb; return true; } @@ -60,7 +66,7 @@ initial_plane_bo(struct xe_device *xe, if (plane_config->size == 0) return NULL; - flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT; + flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT; base = round_down(plane_config->base, page_size); if (IS_DGFX(xe)) { @@ -77,7 +83,7 @@ initial_plane_bo(struct xe_device *xe, } phys_base = pte & ~(page_size - 1); - flags |= XE_BO_CREATE_VRAM0_BIT; + flags |= XE_BO_FLAG_VRAM0; /* * We don't currently expect this to ever be placed in the @@ -99,7 +105,10 @@ initial_plane_bo(struct xe_device *xe, if (!stolen) return NULL; phys_base = base; - flags |= XE_BO_CREATE_STOLEN_BIT; + flags |= XE_BO_FLAG_STOLEN; + + if (XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) + return NULL; /* * If the FB is too big, just don't use it since fbdev is not very @@ -131,8 +140,7 @@ static bool intel_alloc_initial_plane_obj(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct xe_device *xe = to_xe_device(crtc->base.dev); struct drm_mode_fb_cmd2 mode_cmd = { 0 }; struct drm_framebuffer *fb = &plane_config->fb->base; struct xe_bo *bo; @@ -144,9 +152,9 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, case I915_FORMAT_MOD_4_TILED: break; default: - drm_dbg(&dev_priv->drm, - "Unsupported modifier for initial FB: 0x%llx\n", - fb->modifier); + drm_dbg_kms(&xe->drm, + "Unsupported modifier for initial FB: 0x%llx\n", + fb->modifier); return false; } @@ -157,13 +165,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, mode_cmd.modifier[0] = fb->modifier; mode_cmd.flags = DRM_MODE_FB_MODIFIERS; - bo = initial_plane_bo(dev_priv, plane_config); + bo = initial_plane_bo(xe, plane_config); if (!bo) return false; if (intel_framebuffer_init(to_intel_framebuffer(fb), - bo, &mode_cmd)) { - drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); + &bo->ttm.base, &mode_cmd)) { + drm_dbg_kms(&xe->drm, "intel fb init failed\n"); goto err_bo; } /* Reference handed over to fb */ @@ -178,16 +186,14 @@ err_bo: static void intel_find_initial_plane_obj(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) + struct intel_initial_plane_config plane_configs[]) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_initial_plane_config *plane_config = + &plane_configs[crtc->pipe]; struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); struct drm_framebuffer *fb; struct i915_vma *vma; @@ -201,15 +207,15 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, if (intel_alloc_initial_plane_obj(crtc, plane_config)) fb = &plane_config->fb->base; - else if (!intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb)) + else if (!intel_reuse_initial_plane_obj(crtc, plane_configs, &fb)) goto nofb; plane_state->uapi.rotation = plane_config->rotation; intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->uapi.rotation, &plane_state->view); - vma = intel_pin_and_fence_fb_obj(fb, false, &plane_state->view.gtt, - false, &plane_state->flags); + vma = intel_fb_pin_to_ggtt(fb, &plane_state->view.gtt, + 0, 0, false, &plane_state->flags); if (IS_ERR(vma)) goto nofb; @@ -233,14 +239,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); plane_config->vma = vma; - - /* - * Flip to the newly created mapping ASAP, so we can re-use the - * first part of GGTT for WOPCM, prevent flickering, and prevent - * the lookup of sysmem scratch pages. - */ - plane->check_plane(crtc_state, plane_state); - plane->async_flip(plane, crtc_state, plane_state, true); return; nofb: @@ -267,25 +265,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config) } } -void intel_crtc_initial_plane_config(struct intel_crtc *crtc) +void intel_initial_plane_config(struct intel_display *display) { - struct xe_device *xe = to_xe_device(crtc->base.dev); - struct intel_initial_plane_config plane_config = {}; + struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {}; + struct intel_crtc *crtc; - /* - * Note that reserving the BIOS fb up front prevents us - * from stuffing other stolen allocations like the ring - * on top. This prevents some ugliness at boot time, and - * can even allow for smooth boot transitions if the BIOS - * fb is large enough for the active pipe configuration. - */ - xe->display.funcs.display->get_initial_plane_config(crtc, &plane_config); + for_each_intel_crtc(display->drm, crtc) { + struct intel_initial_plane_config *plane_config = + &plane_configs[crtc->pipe]; - /* - * If the fb is shared between multiple heads, we'll - * just get the first one. - */ - intel_find_initial_plane_obj(crtc, &plane_config); + if (!to_intel_crtc_state(crtc->base.state)->uapi.active) + continue; + + /* + * Note that reserving the BIOS fb up front prevents us + * from stuffing other stolen allocations like the ring + * on top. This prevents some ugliness at boot time, and + * can even allow for smooth boot transitions if the BIOS + * fb is large enough for the active pipe configuration. + */ + display->funcs.display->get_initial_plane_config(crtc, plane_config); + + /* + * If the fb is shared between multiple heads, we'll + * just get the first one. + */ + intel_find_initial_plane_obj(crtc, plane_configs); + + if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config)) + intel_crtc_wait_for_next_vblank(crtc); - plane_config_fini(&plane_config); + plane_config_fini(plane_config); + } } diff --git a/drivers/gpu/drm/xe/display/xe_tdf.c b/drivers/gpu/drm/xe/display/xe_tdf.c new file mode 100644 index 000000000000..2c0d4e144e09 --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_tdf.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include "xe_device.h" +#include "intel_display_types.h" +#include "intel_tdf.h" + +void intel_td_flush(struct drm_i915_private *i915) +{ + xe_device_td_flush(i915); +} |