summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/gpu/driver-uapi.rst11
-rw-r--r--Documentation/gpu/drm-mm.rst2
-rw-r--r--drivers/accel/ivpu/Makefile3
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c35
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h23
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c41
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c10
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h12
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c (renamed from drivers/accel/ivpu/ivpu_hw_mtl.c)485
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx_reg.h281
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c1178
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx_reg.h267
-rw-r--r--drivers/accel/ivpu/ivpu_hw_mtl_reg.h281
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c84
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c8
-rw-r--r--drivers/gpu/drm/Kconfig16
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c25
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c36
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h13
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c12
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c6
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h94
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c74
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h5
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c4
-rw-r--r--drivers/gpu/drm/i915/intel_step.c10
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c3
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c18
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c2
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c366
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c104
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c16
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c5
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000c.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/vmm.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c219
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h93
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c411
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.h54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c86
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c419
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h127
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c1916
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c197
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c27
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c99
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c16
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c6
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c6
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c57
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c4
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c28
-rw-r--r--drivers/gpu/drm/ttm/Makefile1
-rw-r--r--drivers/gpu/drm/ttm/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/ttm/tests/Makefile6
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c212
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c113
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h41
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c437
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c2
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.c4
-rw-r--r--drivers/video/fbdev/Kconfig160
-rw-r--r--drivers/video/fbdev/acornfb.c4
-rw-r--r--drivers/video/fbdev/asiliantfb.c4
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c4
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c4
-rw-r--r--drivers/video/fbdev/carminefb.c5
-rw-r--r--drivers/video/fbdev/chipsfb.c4
-rw-r--r--drivers/video/fbdev/da8xx-fb.c4
-rw-r--r--drivers/video/fbdev/efifb.c4
-rw-r--r--drivers/video/fbdev/fm2fb.c4
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c4
-rw-r--r--drivers/video/fbdev/g364fb.c4
-rw-r--r--drivers/video/fbdev/geode/Kconfig12
-rw-r--r--drivers/video/fbdev/geode/gx1fb_core.c5
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c5
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c5
-rw-r--r--drivers/video/fbdev/goldfishfb.c4
-rw-r--r--drivers/video/fbdev/grvga.c4
-rw-r--r--drivers/video/fbdev/gxt4500.c4
-rw-r--r--drivers/video/fbdev/i740fb.c4
-rw-r--r--drivers/video/fbdev/imxfb.c4
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c4
-rw-r--r--drivers/video/fbdev/macfb.c4
-rw-r--r--drivers/video/fbdev/maxinefb.c4
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c4
-rw-r--r--drivers/video/fbdev/mmp/fb/Kconfig4
-rw-r--r--drivers/video/fbdev/mmp/fb/mmpfb.c4
-rw-r--r--drivers/video/fbdev/mx3fb.c4
-rw-r--r--drivers/video/fbdev/ocfb.c4
-rw-r--r--drivers/video/fbdev/offb.c4
-rw-r--r--drivers/video/fbdev/omap/Kconfig4
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c4
-rw-r--r--drivers/video/fbdev/platinumfb.c4
-rw-r--r--drivers/video/fbdev/pmag-aa-fb.c4
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c4
-rw-r--r--drivers/video/fbdev/pmagb-b-fb.c4
-rw-r--r--drivers/video/fbdev/pxa168fb.c4
-rw-r--r--drivers/video/fbdev/pxafb.c4
-rw-r--r--drivers/video/fbdev/q40fb.c4
-rw-r--r--drivers/video/fbdev/s3c-fb.c4
-rw-r--r--drivers/video/fbdev/sh7760fb.c4
-rw-r--r--drivers/video/fbdev/simplefb.c4
-rw-r--r--drivers/video/fbdev/sstfb.c4
-rw-r--r--drivers/video/fbdev/sunxvr1000.c4
-rw-r--r--drivers/video/fbdev/sunxvr2500.c4
-rw-r--r--drivers/video/fbdev/uvesafb.c4
-rw-r--r--drivers/video/fbdev/valkyriefb.c4
-rw-r--r--drivers/video/fbdev/vesafb.c4
-rw-r--r--drivers/video/fbdev/xilinxfb.c4
-rw-r--r--include/drm/drm_exec.h14
-rw-r--r--include/drm/drm_gem.h15
-rw-r--r--include/drm/ttm/ttm_bo.h2
-rw-r--r--include/uapi/drm/ivpu_accel.h9
-rw-r--r--include/uapi/drm/nouveau_drm.h265
-rw-r--r--samples/Kconfig4
-rw-r--r--samples/vfio-mdev/mdpy-fb.c4
214 files changed, 8330 insertions, 1844 deletions
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
index 905c064cd106..ef162b51d010 100644
--- a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
@@ -18,6 +18,7 @@ properties:
enum:
- edt,et028013dma
- inanbo,t28cp45tn89-v17
+ - jasonic,jt240mhqs-hwt-ek-e3
- sitronix,st7789v
reg: true
@@ -25,6 +26,7 @@ properties:
power-supply: true
backlight: true
port: true
+ rotation: true
spi-cpha: true
spi-cpol: true
@@ -58,6 +60,7 @@ examples:
reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
backlight = <&pwm_bl>;
power-supply = <&power>;
+ rotation = <180>;
spi-max-frequency = <100000>;
spi-cpol;
spi-cpha;
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 1e2e51401dc5..1dfafc339ddd 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -677,6 +677,8 @@ patternProperties:
description: iWave Systems Technologies Pvt. Ltd.
"^jadard,.*":
description: Jadard Technology Inc.
+ "^jasonic,.*":
+ description: Jasonic Technology Ltd.
"^jdi,.*":
description: Japan Display Inc.
"^jedec,.*":
diff --git a/Documentation/gpu/driver-uapi.rst b/Documentation/gpu/driver-uapi.rst
index 4411e6919a3d..c08bcbb95fb3 100644
--- a/Documentation/gpu/driver-uapi.rst
+++ b/Documentation/gpu/driver-uapi.rst
@@ -6,3 +6,14 @@ drm/i915 uAPI
=============
.. kernel-doc:: include/uapi/drm/i915_drm.h
+
+drm/nouveau uAPI
+================
+
+VM_BIND / EXEC uAPI
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/nouveau/nouveau_exec.c
+ :doc: Overview
+
+.. kernel-doc:: include/uapi/drm/nouveau_drm.h
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 513197359aba..c19b34b1c0ed 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -520,7 +520,7 @@ DRM Cache Handling and Fast WC memcpy()
.. _drm_sync_objects:
DRM Sync Objects
-===========================
+================
.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
:doc: Overview
diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile
index 9858d9fea36e..e4328b430564 100644
--- a/drivers/accel/ivpu/Makefile
+++ b/drivers/accel/ivpu/Makefile
@@ -7,7 +7,8 @@ intel_vpu-y := \
ivpu_fw.o \
ivpu_fw_log.o \
ivpu_gem.o \
- ivpu_hw_mtl.o \
+ ivpu_hw_37xx.o \
+ ivpu_hw_40xx.o \
ivpu_ipc.o \
ivpu_job.o \
ivpu_jsm_msg.o \
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 815dd9b588e9..ba79f397c9e8 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -115,6 +115,22 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
kref_put(&file_priv->ref, file_priv_release);
}
+static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
+{
+ switch (args->index) {
+ case DRM_IVPU_CAP_METRIC_STREAMER:
+ args->value = 0;
+ break;
+ case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
+ args->value = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@@ -144,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = ivpu_get_context_count(vdev);
break;
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
- args->value = vdev->hw->ranges.user_low.start;
+ args->value = vdev->hw->ranges.user.start;
break;
case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
args->value = file_priv->priority;
@@ -174,6 +190,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_SKU:
args->value = vdev->hw->sku;
break;
+ case DRM_IVPU_PARAM_CAPABILITIES:
+ ret = ivpu_get_capabilities(vdev, args);
+ break;
default:
ret = -EINVAL;
break;
@@ -443,8 +462,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
- /* VPU MTL does not require PCI spec 10m D3hot delay */
- if (ivpu_is_mtl(vdev))
+ /* VPU 37XX does not require 10m D3hot delay */
+ if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev);
@@ -482,8 +501,13 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (!vdev->pm)
return -ENOMEM;
- vdev->hw->ops = &ivpu_hw_mtl_ops;
- vdev->hw->dma_bits = 38;
+ if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
+ vdev->hw->ops = &ivpu_hw_40xx_ops;
+ vdev->hw->dma_bits = 48;
+ } else {
+ vdev->hw->ops = &ivpu_hw_37xx_ops;
+ vdev->hw->dma_bits = 38;
+ }
vdev->platform = IVPU_PLATFORM_INVALID;
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
@@ -610,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 8fe8cda2e39d..9e8c075fe9ef 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -23,6 +23,10 @@
#define DRIVER_DATE "20230117"
#define PCI_DEVICE_ID_MTL 0x7d1d
+#define PCI_DEVICE_ID_LNL 0x643e
+
+#define IVPU_HW_37XX 37
+#define IVPU_HW_40XX 40
#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
/* SSID 1 is used by the VPU to represent invalid context */
@@ -76,6 +80,7 @@ struct ivpu_wa_table {
bool clear_runtime_mem;
bool d3hot_after_power_off;
bool interrupt_clear_with_0;
+ bool disable_clock_relinquish;
};
struct ivpu_hw_info;
@@ -146,11 +151,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
-static inline bool ivpu_is_mtl(struct ivpu_device *vdev)
-{
- return to_pci_dev(vdev->drm.dev)->device == PCI_DEVICE_ID_MTL;
-}
-
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
return to_pci_dev(vdev->drm.dev)->revision;
@@ -161,6 +161,19 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev)
return to_pci_dev(vdev->drm.dev)->device;
}
+static inline int ivpu_hw_gen(struct ivpu_device *vdev)
+{
+ switch (ivpu_device_id(vdev)) {
+ case PCI_DEVICE_ID_MTL:
+ return IVPU_HW_37XX;
+ case PCI_DEVICE_ID_LNL:
+ return IVPU_HW_40XX;
+ default:
+ ivpu_err(vdev, "Unknown VPU device\n");
+ return 0;
+ }
+}
+
static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev)
{
return container_of(dev, struct ivpu_device, drm);
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 317716482a15..9827ea4d7b83 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -43,12 +43,20 @@ static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
+/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
+static struct {
+ int gen;
+ const char *name;
+} fw_names[] = {
+ { IVPU_HW_37XX, "vpu_37xx.bin" },
+ { IVPU_HW_37XX, "mtl_vpu.bin" },
+ { IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
+ { IVPU_HW_40XX, "vpu_40xx.bin" },
+ { IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
+};
+
static int ivpu_fw_request(struct ivpu_device *vdev)
{
- static const char * const fw_names[] = {
- "mtl_vpu.bin",
- "intel/vpu/mtl_vpu_v0.0.bin"
- };
int ret = -ENOENT;
int i;
@@ -60,9 +68,12 @@ static int ivpu_fw_request(struct ivpu_device *vdev)
}
for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
- ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev);
+ if (fw_names[i].gen != ivpu_hw_gen(vdev))
+ continue;
+
+ ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
if (!ret) {
- vdev->fw->name = fw_names[i];
+ vdev->fw->name = fw_names[i].name;
return 0;
}
}
@@ -195,7 +206,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
return -EINVAL;
}
- ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size);
+ ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
return 0;
}
@@ -236,7 +247,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
}
if (fw->shave_nn_size) {
- fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start,
+ fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
if (!fw->mem_shave_nn) {
ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
@@ -434,9 +445,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
* Uncached region of VPU address space, covers IPC buffers, job queues
* and log buffers, programmable to L2$ Uncached by VPU MTRR
*/
- boot_params->shared_region_base = vdev->hw->ranges.global_low.start;
- boot_params->shared_region_size = vdev->hw->ranges.global_low.end -
- vdev->hw->ranges.global_low.start;
+ boot_params->shared_region_base = vdev->hw->ranges.global.start;
+ boot_params->shared_region_size = vdev->hw->ranges.global.end -
+ vdev->hw->ranges.global.start;
boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
@@ -444,10 +455,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
- boot_params->global_aliased_pio_base =
- vdev->hw->ranges.global_aliased_pio.start;
- boot_params->global_aliased_pio_size =
- ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
+ boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
+ boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
/* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1;
@@ -455,7 +464,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
/* Enable L2 cache for first 2GB of high memory */
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
- ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start);
+ ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
if (vdev->fw->mem_shave_nn)
boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 52b339aefadc..2981bb32c755 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -279,10 +279,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
int ret;
if (!range) {
- if (bo->flags & DRM_IVPU_BO_HIGH_MEM)
- range = &vdev->hw->ranges.user_high;
+ if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
+ range = &vdev->hw->ranges.shave;
+ else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
+ range = &vdev->hw->ranges.dma;
else
- range = &vdev->hw->ranges.user_low;
+ range = &vdev->hw->ranges.user;
}
mutex_lock(&ctx->lock);
@@ -570,7 +572,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
fixed_range.end = vpu_addr + size;
range = &fixed_range;
} else {
- range = &vdev->hw->ranges.global_low;
+ range = &vdev->hw->ranges.global;
}
bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 0393901be492..ab341237bcf9 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -38,11 +38,10 @@ struct ivpu_addr_range {
struct ivpu_hw_info {
const struct ivpu_hw_ops *ops;
struct {
- struct ivpu_addr_range global_low;
- struct ivpu_addr_range global_high;
- struct ivpu_addr_range user_low;
- struct ivpu_addr_range user_high;
- struct ivpu_addr_range global_aliased_pio;
+ struct ivpu_addr_range global;
+ struct ivpu_addr_range user;
+ struct ivpu_addr_range shave;
+ struct ivpu_addr_range dma;
} ranges;
struct {
u8 min_ratio;
@@ -60,7 +59,8 @@ struct ivpu_hw_info {
int dma_bits;
};
-extern const struct ivpu_hw_ops ivpu_hw_mtl_ops;
+extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
+extern const struct ivpu_hw_ops ivpu_hw_40xx_ops;
static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
{
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index f1211e74017d..9eae1c241bc0 100644
--- a/drivers/accel/ivpu/ivpu_hw_mtl.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -5,7 +5,7 @@
#include "ivpu_drv.h"
#include "ivpu_fw.h"
-#include "ivpu_hw_mtl_reg.h"
+#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
@@ -39,34 +39,34 @@
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC)
-#define ICB_0_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
+#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
-#define ICB_1_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
- (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
+#define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
-#define BUTTRESS_IRQ_MASK ((REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
- (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
- (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
+ (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+ (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
-#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
- (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
+#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
+ (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
static char *ivpu_platform_to_str(u32 platform)
{
@@ -84,8 +84,8 @@ static char *ivpu_platform_to_str(u32 platform)
static void ivpu_hw_read_platform(struct ivpu_device *vdev)
{
- u32 gen_ctrl = REGV_RD32(MTL_VPU_HOST_SS_GEN_CTRL);
- u32 platform = REG_GET_FLD(MTL_VPU_HOST_SS_GEN_CTRL, PS, gen_ctrl);
+ u32 gen_ctrl = REGV_RD32(VPU_37XX_HOST_SS_GEN_CTRL);
+ u32 platform = REG_GET_FLD(VPU_37XX_HOST_SS_GEN_CTRL, PS, gen_ctrl);
if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
vdev->platform = platform;
@@ -123,7 +123,7 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
{
- return REGB_POLL_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
+ return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
}
/* Send KMD initiated workpoint change */
@@ -139,23 +139,23 @@ static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ra
return ret;
}
- val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD0);
- val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
- val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
- REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD0, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0);
+ val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
+ val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
- val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD1);
- val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
- val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
- REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD1, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1);
+ val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
+ val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
- val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD2);
- val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
- REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD2, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2);
+ val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
- val = REGB_RD32(MTL_BUTTRESS_WP_REQ_CMD);
- val = REG_SET_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, val);
- REGB_WR32(MTL_BUTTRESS_WP_REQ_CMD, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD);
+ val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val);
ret = ivpu_pll_wait_for_cmd_send(vdev);
if (ret)
@@ -171,7 +171,7 @@ static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
if (IVPU_WA(punit_disabled))
return 0;
- return REGB_POLL_FLD(MTL_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
+ return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
}
static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
@@ -179,7 +179,7 @@ static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
if (IVPU_WA(punit_disabled))
return 0;
- return REGB_POLL_FLD(MTL_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
+ return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
}
static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
@@ -188,21 +188,21 @@ static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
u32 fmin_fuse, fmax_fuse;
- fmin_fuse = REGB_RD32(MTL_BUTTRESS_FMIN_FUSE);
- fuse_min_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
- fuse_pn_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
+ fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE);
+ fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
+ fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
- fmax_fuse = REGB_RD32(MTL_BUTTRESS_FMAX_FUSE);
- fuse_max_ratio = REG_GET_FLD(MTL_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
+ fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE);
+ fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
}
-static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev)
{
- return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100);
+ return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
}
static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
@@ -248,7 +248,7 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
return ret;
}
- ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev);
+ ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
if (ret) {
ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
return ret;
@@ -272,52 +272,52 @@ static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
{
u32 val = 0;
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
- REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_CLR, val);
+ REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
}
static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_SET);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
if (enable) {
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
} else {
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
}
- REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_SET, val);
+ REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
}
static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_CLK_SET);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
if (enable) {
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
} else {
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
}
- REGV_WR32(MTL_VPU_HOST_SS_CPR_CLK_SET, val);
+ REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
}
static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
- if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
+ if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@@ -325,9 +325,9 @@ static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QACCEPTN);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
- if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
+ if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@@ -335,9 +335,9 @@ static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QDENY);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
- if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
+ if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@@ -385,7 +385,7 @@ static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
{
- REGV_WR32(MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
+ REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
}
static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
@@ -393,12 +393,12 @@ static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
int ret;
u32 val;
- val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
+ val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
if (enable)
- val = REG_SET_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
else
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
- REGV_WR32(MTL_VPU_HOST_SS_NOC_QREQN, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
@@ -453,26 +453,26 @@ static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
if (enable)
- val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
else
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
- REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
+ REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
}
static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
if (enable)
- val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
else
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
- REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, val);
+ REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
}
static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
@@ -481,32 +481,32 @@ static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 ex
if (ivpu_is_fpga(vdev))
return 0;
- return REGV_POLL_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
+ return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
}
static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
if (enable)
- val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
else
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
- REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, val);
+ REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
}
static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE);
+ u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
if (enable)
- val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
else
- val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
- REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
+ REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
}
static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
@@ -538,25 +538,25 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES);
+ u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
- REGV_WR32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, val);
+ REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
}
static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_IF_TBU_MMUSSIDV);
+ u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
- val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
- REGV_WR32(MTL_VPU_HOST_IF_TBU_MMUSSIDV, val);
+ REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
}
static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
@@ -576,10 +576,10 @@ static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
val = vdev->fw->entry_point >> 9;
- REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
+ REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
- val = REG_SET_FLD(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
- REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
+ REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
@@ -590,27 +590,27 @@ static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
int ret;
u32 val;
- ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
return ret;
}
- val = REGB_RD32(MTL_BUTTRESS_VPU_D0I3_CONTROL);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL);
if (enable)
- val = REG_SET_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
+ val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
else
- val = REG_CLR_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
- REGB_WR32(MTL_BUTTRESS_VPU_D0I3_CONTROL, val);
+ val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val);
- ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
return ret;
}
-static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
@@ -620,16 +620,15 @@ static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
ivpu_pll_init_frequency_ratios(vdev);
- ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M);
- ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M);
- ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M);
- ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G);
- hw->ranges.global_aliased_pio = hw->ranges.user_low;
+ ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
+ ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M);
+ ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
+ ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
return 0;
}
-static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
@@ -637,24 +636,24 @@ static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
if (IVPU_WA(punit_disabled))
return 0;
- ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
return ret;
}
- val = REGB_RD32(MTL_BUTTRESS_VPU_IP_RESET);
- val = REG_SET_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
- REGB_WR32(MTL_BUTTRESS_VPU_IP_RESET, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
+ val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
- ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
return ret;
}
-static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev)
{
int ret;
@@ -667,7 +666,7 @@ static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
return ret;
}
-static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev)
{
int ret;
@@ -678,7 +677,7 @@ static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
return ret;
}
-static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
@@ -686,11 +685,11 @@ static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
ivpu_hw_wa_init(vdev);
ivpu_hw_timeouts_init(vdev);
- ret = ivpu_hw_mtl_reset(vdev);
+ ret = ivpu_hw_37xx_reset(vdev);
if (ret)
ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
- ret = ivpu_hw_mtl_d0i3_disable(vdev);
+ ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@@ -732,7 +731,7 @@ static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
return ret;
}
-static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev)
{
ivpu_boot_no_snoop_enable(vdev);
ivpu_boot_tbu_mmu_enable(vdev);
@@ -741,32 +740,31 @@ static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
return 0;
}
-static bool ivpu_hw_mtl_is_idle(struct ivpu_device *vdev)
+static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
{
u32 val;
if (IVPU_WA(punit_disabled))
return true;
- val = REGB_RD32(MTL_BUTTRESS_VPU_STATUS);
- return REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, READY, val) &&
- REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, IDLE, val);
+ val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS);
+ return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) &&
+ REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
}
-static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
+static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
- if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) {
+ if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
ivpu_err(vdev, "Failed to reset the VPU\n");
- }
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
ret = -EIO;
}
- if (ivpu_hw_mtl_d0i3_enable(vdev)) {
+ if (ivpu_hw_37xx_d0i3_enable(vdev)) {
ivpu_err(vdev, "Failed to enter D0I3\n");
ret = -EIO;
}
@@ -774,7 +772,7 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
return ret;
}
-static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
{
u32 val;
@@ -792,7 +790,7 @@ static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
}
-static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
+static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
@@ -806,35 +804,35 @@ static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
}
/* Register indirect accesses */
-static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
{
u32 pll_curr_ratio;
- pll_curr_ratio = REGB_RD32(MTL_BUTTRESS_CURRENT_PLL);
- pll_curr_ratio &= MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK;
+ pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL);
+ pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK;
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
- return ivpu_hw_mtl_pll_to_freq(pll_curr_ratio, vdev->hw->config);
+ return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
}
-static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
- return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_OFFSET);
+ return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
}
-static u32 ivpu_hw_mtl_reg_telemetry_size_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev)
{
- return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_SIZE);
+ return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE);
}
-static u32 ivpu_hw_mtl_reg_telemetry_enable_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
{
- return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_ENABLE);
+ return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
}
-static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
{
u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
@@ -842,52 +840,52 @@ static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
}
-static u32 ivpu_hw_mtl_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
{
- return REGV_RD32(MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM);
+ return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
}
-static u32 ivpu_hw_mtl_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
{
- u32 count = REGV_RD32_SILENT(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT);
+ u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
- return REG_GET_FLD(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
+ return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
-static void ivpu_hw_mtl_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
{
REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
}
-static void ivpu_hw_mtl_irq_clear(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev)
{
- REGV_WR64(MTL_VPU_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
+ REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
}
-static void ivpu_hw_mtl_irq_enable(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev)
{
- REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
- REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
- REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
- REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+ REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
+ REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
+ REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
}
-static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
{
- REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
- REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
- REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
- REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+ REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
+ REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
+ REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
}
-static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
ivpu_pm_schedule_recovery(vdev);
}
-static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
@@ -895,7 +893,7 @@ static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
ivpu_pm_schedule_recovery(vdev);
}
-static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
@@ -903,65 +901,66 @@ static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
}
/* Handler for IRQs from VPU core (irqV) */
-static u32 ivpu_hw_mtl_irqv_handler(struct ivpu_device *vdev, int irq)
+static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
{
- u32 status = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
- REGV_WR32(MTL_VPU_HOST_SS_ICB_CLEAR_0, status);
+ REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
ivpu_mmu_irq_evtq_handler(vdev);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
ivpu_ipc_irq_handler(vdev);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
ivpu_mmu_irq_gerr_handler(vdev);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
- ivpu_hw_mtl_irq_wdt_mss_handler(vdev);
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
+ ivpu_hw_37xx_irq_wdt_mss_handler(vdev);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
- ivpu_hw_mtl_irq_wdt_nce_handler(vdev);
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
+ ivpu_hw_37xx_irq_wdt_nce_handler(vdev);
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
- ivpu_hw_mtl_irq_noc_firewall_handler(vdev);
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
+ ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
return status;
}
/* Handler for IRQs from Buttress core (irqB) */
-static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
+static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
{
- u32 status = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+ u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
bool schedule_recovery = false;
if (status == 0)
return 0;
/* Disable global interrupt before handling local buttress interrupts */
- REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
- if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
- ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL));
+ if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
+ REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
- if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
- ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
- REGB_WR32(MTL_BUTTRESS_ATS_ERR_CLEAR, 0x1);
+ if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
+ ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
+ REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
- if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
- u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
+ if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
+ u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
- ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
- REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
- REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
- REGB_WR32(MTL_BUTTRESS_UFI_ERR_CLEAR, 0x1);
+ ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
+ REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
+ REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
+ REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
@@ -971,12 +970,12 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
* Writing 1 triggers an interrupt, so we can't perform read update write.
* Clear local interrupt status by writing 0 to all bits.
*/
- REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
+ REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
else
- REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, status);
+ REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
/* Re-enable global interrupt */
- REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
@@ -984,65 +983,65 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
return status;
}
-static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr)
+static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
{
struct ivpu_device *vdev = ptr;
u32 ret_irqv, ret_irqb;
- ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq);
- ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq);
+ ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
+ ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
return IRQ_RETVAL(ret_irqb | ret_irqv);
}
-static void ivpu_hw_mtl_diagnose_failure(struct ivpu_device *vdev)
+static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
{
- u32 irqv = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
- u32 irqb = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+ u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
- if (ivpu_hw_mtl_reg_ipc_rx_count_get(vdev))
+ if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev))
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
ivpu_err(vdev, "WDT MSS timeout detected\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
ivpu_err(vdev, "WDT NCE timeout detected\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
+ if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
ivpu_err(vdev, "NOC Firewall irq detected\n");
- if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
- ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
+ if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
+ ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
- if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
- u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
+ if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
+ u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
- ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
- REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
- REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
+ ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
+ REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
+ REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
}
}
-const struct ivpu_hw_ops ivpu_hw_mtl_ops = {
- .info_init = ivpu_hw_mtl_info_init,
- .power_up = ivpu_hw_mtl_power_up,
- .is_idle = ivpu_hw_mtl_is_idle,
- .power_down = ivpu_hw_mtl_power_down,
- .boot_fw = ivpu_hw_mtl_boot_fw,
- .wdt_disable = ivpu_hw_mtl_wdt_disable,
- .diagnose_failure = ivpu_hw_mtl_diagnose_failure,
- .reg_pll_freq_get = ivpu_hw_mtl_reg_pll_freq_get,
- .reg_telemetry_offset_get = ivpu_hw_mtl_reg_telemetry_offset_get,
- .reg_telemetry_size_get = ivpu_hw_mtl_reg_telemetry_size_get,
- .reg_telemetry_enable_get = ivpu_hw_mtl_reg_telemetry_enable_get,
- .reg_db_set = ivpu_hw_mtl_reg_db_set,
- .reg_ipc_rx_addr_get = ivpu_hw_mtl_reg_ipc_rx_addr_get,
- .reg_ipc_rx_count_get = ivpu_hw_mtl_reg_ipc_rx_count_get,
- .reg_ipc_tx_set = ivpu_hw_mtl_reg_ipc_tx_set,
- .irq_clear = ivpu_hw_mtl_irq_clear,
- .irq_enable = ivpu_hw_mtl_irq_enable,
- .irq_disable = ivpu_hw_mtl_irq_disable,
- .irq_handler = ivpu_hw_mtl_irq_handler,
+const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
+ .info_init = ivpu_hw_37xx_info_init,
+ .power_up = ivpu_hw_37xx_power_up,
+ .is_idle = ivpu_hw_37xx_is_idle,
+ .power_down = ivpu_hw_37xx_power_down,
+ .boot_fw = ivpu_hw_37xx_boot_fw,
+ .wdt_disable = ivpu_hw_37xx_wdt_disable,
+ .diagnose_failure = ivpu_hw_37xx_diagnose_failure,
+ .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
+ .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
+ .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
+ .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
+ .reg_db_set = ivpu_hw_37xx_reg_db_set,
+ .reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get,
+ .reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get,
+ .reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set,
+ .irq_clear = ivpu_hw_37xx_irq_clear,
+ .irq_enable = ivpu_hw_37xx_irq_enable,
+ .irq_disable = ivpu_hw_37xx_irq_disable,
+ .irq_handler = ivpu_hw_37xx_irq_handler,
};
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
new file mode 100644
index 000000000000..6e4e915948f9
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_MTL_REG_H__
+#define __IVPU_HW_MTL_REG_H__
+
+#include <linux/bits.h>
+
+#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE 0x00000000u
+
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT 0x00000004u
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_CMD 0x00000014u
+#define VPU_37XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
+
+#define VPU_37XX_BUTTRESS_WP_DOWNLOAD 0x00000018u
+#define VPU_37XX_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
+
+#define VPU_37XX_BUTTRESS_CURRENT_PLL 0x0000001cu
+#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
+
+#define VPU_37XX_BUTTRESS_PLL_ENABLE 0x00000020u
+
+#define VPU_37XX_BUTTRESS_FMIN_FUSE 0x00000024u
+#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
+#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
+
+#define VPU_37XX_BUTTRESS_FMAX_FUSE 0x00000028u
+#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
+
+#define VPU_37XX_BUTTRESS_TILE_FUSE 0x0000002cu
+#define VPU_37XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
+
+#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK 0x00000030u
+#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
+
+#define VPU_37XX_BUTTRESS_PLL_STATUS 0x00000040u
+#define VPU_37XX_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
+
+#define VPU_37XX_BUTTRESS_VPU_STATUS 0x00000044u
+#define VPU_37XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
+
+#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
+#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
+
+#define VPU_37XX_BUTTRESS_VPU_IP_RESET 0x00000050u
+#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
+
+#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
+#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
+#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
+
+#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
+#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
+#define VPU_37XX_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
+
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG 0x000000b0u
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
+
+#define VPU_37XX_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
+
+#define VPU_37XX_HOST_SS_CPR_CLK_SET 0x00000084u
+#define VPU_37XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_37XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_37XX_HOST_SS_CPR_RST_SET 0x00000094u
+#define VPU_37XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_37XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_37XX_HOST_SS_CPR_RST_CLR 0x00000098u
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_37XX_HOST_SS_HW_VERSION 0x00000108u
+#define VPU_37XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
+#define VPU_37XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
+#define VPU_37XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
+
+#define VPU_37XX_HOST_SS_GEN_CTRL 0x00000118u
+#define VPU_37XX_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
+
+#define VPU_37XX_HOST_SS_NOC_QREQN 0x00000154u
+#define VPU_37XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_NOC_QACCEPTN 0x00000158u
+#define VPU_37XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_NOC_QDENY 0x0000015cu
+#define VPU_37XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define MTL_VPU_TOP_NOC_QREQN 0x00000160u
+#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u
+#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define MTL_VPU_TOP_NOC_QDENY 0x00000168u
+#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
+
+#define VPU_37XX_HOST_SS_ICB_STATUS_0 0x00010210u
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
+
+#define VPU_37XX_HOST_SS_ICB_STATUS_1 0x00010214u
+#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
+
+#define VPU_37XX_HOST_SS_ICB_CLEAR_0 0x00010220u
+#define VPU_37XX_HOST_SS_ICB_CLEAR_1 0x00010224u
+#define VPU_37XX_HOST_SS_ICB_ENABLE_0 0x00010240u
+
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
+
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0)
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8)
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
+#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u
+#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
+#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO 0x00041040u
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
+
+#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
+#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
+#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
+
+#define VPU_37XX_HOST_MMU_IDR0 0x00200000u
+#define VPU_37XX_HOST_MMU_IDR1 0x00200004u
+#define VPU_37XX_HOST_MMU_IDR3 0x0020000cu
+#define VPU_37XX_HOST_MMU_IDR5 0x00200014u
+#define VPU_37XX_HOST_MMU_CR0 0x00200020u
+#define VPU_37XX_HOST_MMU_CR0ACK 0x00200024u
+#define VPU_37XX_HOST_MMU_CR1 0x00200028u
+#define VPU_37XX_HOST_MMU_CR2 0x0020002cu
+#define VPU_37XX_HOST_MMU_IRQ_CTRL 0x00200050u
+#define VPU_37XX_HOST_MMU_IRQ_CTRLACK 0x00200054u
+
+#define VPU_37XX_HOST_MMU_GERROR 0x00200060u
+#define VPU_37XX_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
+#define VPU_37XX_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
+
+#define VPU_37XX_HOST_MMU_GERRORN 0x00200064u
+
+#define VPU_37XX_HOST_MMU_STRTAB_BASE 0x00200080u
+#define VPU_37XX_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
+#define VPU_37XX_HOST_MMU_CMDQ_BASE 0x00200090u
+#define VPU_37XX_HOST_MMU_CMDQ_PROD 0x00200098u
+#define VPU_37XX_HOST_MMU_CMDQ_CONS 0x0020009cu
+#define VPU_37XX_HOST_MMU_EVTQ_BASE 0x002000a0u
+#define VPU_37XX_HOST_MMU_EVTQ_PROD 0x002000a8u
+#define VPU_37XX_HOST_MMU_EVTQ_CONS 0x002000acu
+#define VPU_37XX_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
+#define VPU_37XX_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
+
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
+
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV 0x00360004u
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
+
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1)
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1)
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4)
+
+#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu
+#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u
+#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u
+#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u
+
+#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u
+#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
+
+#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u
+#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
+
+#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u
+
+#endif /* __IVPU_HW_MTL_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
new file mode 100644
index 000000000000..34626d66fa10
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -0,0 +1,1178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_hw.h"
+#include "ivpu_hw_40xx_reg.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_ipc.h"
+#include "ivpu_mmu.h"
+#include "ivpu_pm.h"
+
+#include <linux/dmi.h>
+
+#define TILE_MAX_NUM 6
+#define TILE_MAX_MASK 0x3f
+
+#define LNL_HW_ID 0x4040
+
+#define SKU_TILE_SHIFT 0u
+#define SKU_TILE_MASK 0x0000ffffu
+#define SKU_HW_ID_SHIFT 16u
+#define SKU_HW_ID_MASK 0xffff0000u
+
+#define PLL_CONFIG_DEFAULT 0x1
+#define PLL_CDYN_DEFAULT 0x80
+#define PLL_EPP_DEFAULT 0x80
+#define PLL_REF_CLK_FREQ (50 * 1000000)
+#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
+
+#define PLL_PROFILING_FREQ_DEFAULT 38400000
+#define PLL_PROFILING_FREQ_HIGH 400000000
+
+#define TIM_SAFE_ENABLE 0xf1d0dead
+#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
+
+#define TIMEOUT_US (150 * USEC_PER_MSEC)
+#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
+#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
+
+#define WEIGHTS_DEFAULT 0xf711f711u
+#define WEIGHTS_ATS_DEFAULT 0x0000f711u
+
+#define ICB_0_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
+
+#define ICB_1_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
+
+#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
+
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR)) | \
+ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR)))
+
+#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
+#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
+
+#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
+ (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
+
+static char *ivpu_platform_to_str(u32 platform)
+{
+ switch (platform) {
+ case IVPU_PLATFORM_SILICON:
+ return "IVPU_PLATFORM_SILICON";
+ case IVPU_PLATFORM_SIMICS:
+ return "IVPU_PLATFORM_SIMICS";
+ case IVPU_PLATFORM_FPGA:
+ return "IVPU_PLATFORM_FPGA";
+ default:
+ return "Invalid platform";
+ }
+}
+
+static const struct dmi_system_id ivpu_dmi_platform_simulation[] = {
+ {
+ .ident = "Intel Simics",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
+ },
+ },
+ {
+ .ident = "Intel Simics",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "Simics"),
+ },
+ },
+ { }
+};
+
+static void ivpu_hw_read_platform(struct ivpu_device *vdev)
+{
+ if (dmi_check_system(ivpu_dmi_platform_simulation))
+ vdev->platform = IVPU_PLATFORM_SIMICS;
+ else
+ vdev->platform = IVPU_PLATFORM_SILICON;
+
+ ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
+ ivpu_platform_to_str(vdev->platform), vdev->platform);
+}
+
+static void ivpu_hw_wa_init(struct ivpu_device *vdev)
+{
+ vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
+ vdev->wa.clear_runtime_mem = false;
+
+ if (ivpu_hw_gen(vdev) == IVPU_HW_40XX)
+ vdev->wa.disable_clock_relinquish = true;
+}
+
+static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
+{
+ if (ivpu_is_fpga(vdev)) {
+ vdev->timeout.boot = 100000;
+ vdev->timeout.jsm = 50000;
+ vdev->timeout.tdr = 2000000;
+ vdev->timeout.reschedule_suspend = 1000;
+ } else if (ivpu_is_simics(vdev)) {
+ vdev->timeout.boot = 50;
+ vdev->timeout.jsm = 500;
+ vdev->timeout.tdr = 10000;
+ vdev->timeout.reschedule_suspend = 10;
+ } else {
+ vdev->timeout.boot = 1000;
+ vdev->timeout.jsm = 500;
+ vdev->timeout.tdr = 2000;
+ vdev->timeout.reschedule_suspend = 10;
+ }
+}
+
+static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
+{
+ return REGB_POLL_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
+}
+
+static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
+ u16 target_ratio, u16 epp, u16 config, u16 cdyn)
+{
+ int ret;
+ u32 val;
+
+ ret = ivpu_pll_wait_for_cmd_send(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, epp, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
+ val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CDYN, cdyn, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_CMD);
+ val = REG_SET_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_CMD, val);
+
+ ret = ivpu_pll_wait_for_cmd_send(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
+{
+ return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
+}
+
+static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u8 fuse_min_ratio, fuse_pn_ratio, fuse_max_ratio;
+ u32 fmin_fuse, fmax_fuse;
+
+ fmin_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMIN_FUSE);
+ fuse_min_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
+ fuse_pn_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
+
+ fmax_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMAX_FUSE);
+ fuse_max_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
+
+ hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
+ hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
+ hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
+}
+
+static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
+{
+ u16 config = enable ? PLL_CONFIG_DEFAULT : 0;
+ u16 cdyn = enable ? PLL_CDYN_DEFAULT : 0;
+ u16 epp = enable ? PLL_EPP_DEFAULT : 0;
+ struct ivpu_hw_info *hw = vdev->hw;
+ u16 target_ratio = hw->pll.pn_ratio;
+ int ret;
+
+ ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, epp: 0x%x, config: 0x%x, cdyn: 0x%x\n",
+ PLL_RATIO_TO_FREQ(target_ratio), epp, config, cdyn);
+
+ ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio,
+ target_ratio, epp, config, cdyn);
+ if (ret) {
+ ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
+ return ret;
+ }
+
+ if (enable) {
+ ret = ivpu_pll_wait_for_status_ready(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ivpu_pll_enable(struct ivpu_device *vdev)
+{
+ return ivpu_pll_drive(vdev, true);
+}
+
+static int ivpu_pll_disable(struct ivpu_device *vdev)
+{
+ return ivpu_pll_drive(vdev, false);
+}
+
+static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
+
+ if (enable) {
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
+ }
+
+ REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
+}
+
+static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
+
+ if (enable) {
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
+ }
+
+ REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
+}
+
+static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static void ivpu_boot_idle_gen_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
+}
+
+static int ivpu_boot_host_ss_check(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_boot_noc_qreqn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_noc_qacceptn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
+
+ ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+ return ret;
+ }
+
+ if (enable) {
+ REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
+ REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
+ }
+
+ return ret;
+}
+
+static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_host_ss_axi_drive(vdev, true);
+}
+
+static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
+ if (enable) {
+ val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ } else {
+ val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ }
+ REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
+
+ ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_host_ss_top_noc_drive(vdev, true);
+}
+
+static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
+
+ if (enable)
+ ndelay(500);
+}
+
+static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
+
+ if (!enable)
+ ndelay(500);
+}
+
+static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
+{
+ if (ivpu_is_fpga(vdev))
+ return 0;
+
+ return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU,
+ exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
+}
+
+static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
+
+ REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
+}
+
+static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
+
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
+
+ REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+}
+
+static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
+
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+
+ REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
+}
+
+static int ivpu_boot_cpu_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_cpu_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_boot_pwr_island_trickle_drive(vdev, true);
+ ivpu_boot_pwr_island_drive(vdev, true);
+
+ ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for power island status\n");
+ return ret;
+ }
+
+ ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
+ return ret;
+ }
+
+ ivpu_boot_host_ss_clk_drive(vdev, true);
+ ivpu_boot_host_ss_rst_drive(vdev, true);
+ ivpu_boot_pwr_island_isolation_drive(vdev, false);
+
+ return ret;
+}
+
+static int ivpu_boot_soc_cpu_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
+ REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
+
+ ret = ivpu_boot_cpu_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_cpu_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_boot_soc_cpu_enable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_soc_cpu_drive(vdev, true);
+}
+
+static int ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+ u64 val64;
+
+ ret = ivpu_boot_soc_cpu_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
+ return ret;
+ }
+
+ val64 = vdev->fw->entry_point;
+ val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
+ REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
+
+ val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
+ val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
+ REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
+
+ ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
+ ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume");
+
+ return 0;
+}
+
+static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_D0I3_CONTROL);
+ if (enable)
+ val = REG_SET_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val);
+ else
+ val = REG_CLR_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_D0I3_CONTROL, val);
+
+ ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool ivpu_tile_disable_check(u32 config)
+{
+ /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
+ if (config == 0)
+ return true;
+
+ if (config > BIT(TILE_MAX_NUM - 1))
+ return false;
+
+ if ((config & (config - 1)) == 0)
+ return true;
+
+ return false;
+}
+
+static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u32 tile_disable;
+ u32 tile_enable;
+ u32 fuse;
+
+ fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
+ if (!REG_TEST_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, VALID, fuse)) {
+ ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
+ return -EIO;
+ }
+
+ tile_disable = REG_GET_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, CONFIG, fuse);
+ if (!ivpu_tile_disable_check(tile_disable)) {
+ ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", tile_disable);
+ return -EIO;
+ }
+
+ if (tile_disable)
+ ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n",
+ TILE_MAX_NUM - 1, ffs(tile_disable) - 1);
+ else
+ ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
+
+ tile_enable = (~tile_disable) & TILE_MAX_MASK;
+
+ hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
+ hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
+ hw->tile_fuse = tile_disable;
+ hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+
+ ivpu_pll_init_frequency_ratios(vdev);
+
+ ivpu_hw_init_range(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
+ ivpu_hw_init_range(&vdev->hw->ranges.user, 0x80000000, SZ_256M);
+ ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
+ ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
+
+ return 0;
+}
+
+static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+
+ ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_IP_RESET);
+ val = REG_SET_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_IP_RESET, val);
+
+ ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+ return ret;
+}
+
+static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = ivpu_boot_d0i3_drive(vdev, true);
+ if (ret)
+ ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
+
+ udelay(5); /* VPU requires 5 us to complete the transition */
+
+ return ret;
+}
+
+static int ivpu_hw_40xx_d0i3_disable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = ivpu_boot_d0i3_drive(vdev, false);
+ if (ret)
+ ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
+
+ return ret;
+}
+
+static void ivpu_hw_40xx_profiling_freq_reg_set(struct ivpu_device *vdev)
+{
+ u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
+
+ if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
+ val = REG_CLR_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val);
+ else
+ val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val);
+
+ REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val);
+}
+
+static void ivpu_hw_40xx_ats_print(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
+ REGB_RD32(VPU_40XX_BUTTRESS_HM_ATS) ? "Enable" : "Disable");
+}
+
+static void ivpu_hw_40xx_clock_relinquish_disable(struct ivpu_device *vdev)
+{
+ u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
+
+ val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
+ REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val);
+}
+
+static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_hw_40xx_reset(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to reset HW: %d\n", ret);
+ return ret;
+ }
+
+ ivpu_hw_read_platform(vdev);
+ ivpu_hw_wa_init(vdev);
+ ivpu_hw_timeouts_init(vdev);
+
+ ret = ivpu_hw_40xx_d0i3_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+
+ ret = ivpu_pll_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
+ return ret;
+ }
+
+ if (IVPU_WA(disable_clock_relinquish))
+ ivpu_hw_40xx_clock_relinquish_disable(vdev);
+ ivpu_hw_40xx_profiling_freq_reg_set(vdev);
+ ivpu_hw_40xx_ats_print(vdev);
+
+ ret = ivpu_boot_host_ss_check(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
+ return ret;
+ }
+
+ ivpu_boot_idle_gen_drive(vdev, false);
+
+ ret = ivpu_boot_pwr_domain_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_host_ss_axi_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_host_ss_top_noc_enable(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_hw_40xx_boot_fw(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_boot_no_snoop_enable(vdev);
+ ivpu_boot_tbu_mmu_enable(vdev);
+
+ ret = ivpu_boot_soc_cpu_boot(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
+
+ return ret;
+}
+
+static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return true;
+
+ val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
+ return REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, val) &&
+ REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val);
+}
+
+static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
+{
+ int ret = 0;
+
+ if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev))
+ ivpu_warn(vdev, "Failed to reset the VPU\n");
+
+ if (ivpu_pll_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable PLL\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_hw_40xx_d0i3_enable(vdev)) {
+ ivpu_err(vdev, "Failed to enter D0I3\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
+
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
+
+ val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
+ val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
+}
+
+/* Register indirect accesses */
+static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
+{
+ u32 pll_curr_ratio;
+
+ pll_curr_ratio = REGB_RD32(VPU_40XX_BUTTRESS_PLL_FREQ);
+ pll_curr_ratio &= VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK;
+
+ return PLL_RATIO_TO_FREQ(pll_curr_ratio);
+}
+
+static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
+}
+
+static u32 ivpu_hw_40xx_reg_telemetry_size_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE);
+}
+
+static u32 ivpu_hw_40xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
+}
+
+static void ivpu_hw_40xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+{
+ u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
+ u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
+
+ REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
+}
+
+static u32 ivpu_hw_40xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+{
+ return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
+}
+
+static u32 ivpu_hw_40xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+{
+ u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
+
+ return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
+}
+
+static void ivpu_hw_40xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
+}
+
+static void ivpu_hw_40xx_irq_clear(struct ivpu_device *vdev)
+{
+ REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
+}
+
+static void ivpu_hw_40xx_irq_enable(struct ivpu_device *vdev)
+{
+ REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
+ REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
+ REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
+ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+}
+
+static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
+{
+ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+ REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
+ REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
+ REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
+}
+
+static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
+{
+ /* TODO: For LNN hang consider engine reset instead of full recovery */
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
+{
+ ivpu_hw_wdt_disable(vdev);
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
+{
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+/* Handler for IRQs from VPU core (irqV) */
+static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
+{
+ u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (!status)
+ return IRQ_NONE;
+
+ REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
+ ivpu_mmu_irq_evtq_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
+ ret |= ivpu_ipc_irq_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
+ ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
+ ivpu_mmu_irq_gerr_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
+ ivpu_hw_40xx_irq_wdt_mss_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
+ ivpu_hw_40xx_irq_wdt_nce_handler(vdev);
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
+ ivpu_hw_40xx_irq_noc_firewall_handler(vdev);
+
+ return ret;
+}
+
+/* Handler for IRQs from Buttress core (irqB) */
+static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
+{
+ bool schedule_recovery = false;
+ u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
+ ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
+ REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1),
+ REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2));
+ REGB_WR32(VPU_40XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, status)) {
+ ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG));
+ REGB_WR32(VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, status)) {
+ ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG));
+ REGB_WR32(VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, status)) {
+ ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW),
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH));
+ REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, status)) {
+ ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW),
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH));
+ REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, status)) {
+ ivpu_err(vdev, "Survivability error detected\n");
+ schedule_recovery = true;
+ }
+
+ if (schedule_recovery)
+ ivpu_pm_schedule_recovery(vdev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
+{
+ struct ivpu_device *vdev = ptr;
+ irqreturn_t ret = IRQ_NONE;
+
+ ret |= ivpu_hw_40xx_irqv_handler(vdev, irq);
+ ret |= ivpu_hw_40xx_irqb_handler(vdev, irq);
+
+ if (ret & IRQ_WAKE_THREAD)
+ return IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev)
+{
+ u32 irqv = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ u32 irqb = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+
+ if (ivpu_hw_40xx_reg_ipc_rx_count_get(vdev))
+ ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
+ ivpu_err(vdev, "WDT MSS timeout detected\n");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
+ ivpu_err(vdev, "WDT NCE timeout detected\n");
+
+ if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
+ ivpu_err(vdev, "NOC Firewall irq detected\n");
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) {
+ ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
+ REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1),
+ REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2));
+ }
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, irqb))
+ ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG));
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, irqb))
+ ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG));
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, irqb))
+ ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW),
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH));
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, irqb))
+ ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW),
+ REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH));
+
+ if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, irqb))
+ ivpu_err(vdev, "Survivability error detected\n");
+}
+
+const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
+ .info_init = ivpu_hw_40xx_info_init,
+ .power_up = ivpu_hw_40xx_power_up,
+ .is_idle = ivpu_hw_40xx_is_idle,
+ .power_down = ivpu_hw_40xx_power_down,
+ .boot_fw = ivpu_hw_40xx_boot_fw,
+ .wdt_disable = ivpu_hw_40xx_wdt_disable,
+ .diagnose_failure = ivpu_hw_40xx_diagnose_failure,
+ .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
+ .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
+ .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
+ .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
+ .reg_db_set = ivpu_hw_40xx_reg_db_set,
+ .reg_ipc_rx_addr_get = ivpu_hw_40xx_reg_ipc_rx_addr_get,
+ .reg_ipc_rx_count_get = ivpu_hw_40xx_reg_ipc_rx_count_get,
+ .reg_ipc_tx_set = ivpu_hw_40xx_reg_ipc_tx_set,
+ .irq_clear = ivpu_hw_40xx_irq_clear,
+ .irq_enable = ivpu_hw_40xx_irq_enable,
+ .irq_disable = ivpu_hw_40xx_irq_disable,
+ .irq_handler = ivpu_hw_40xx_irq_handler,
+};
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
new file mode 100644
index 000000000000..5139cfe88532
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_40XX_REG_H__
+#define __IVPU_HW_40XX_REG_H__
+
+#include <linux/bits.h>
+
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT 0x00000000u
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6)
+
+#define VPU_40XX_BUTTRESS_LOCAL_INT_MASK 0x00000004u
+#define VPU_40XX_BUTTRESS_GLOBAL_INT_MASK 0x00000008u
+
+#define VPU_40XX_BUTTRESS_HM_ATS 0x0000000cu
+
+#define VPU_40XX_BUTTRESS_ATS_ERR_LOG1 0x00000010u
+#define VPU_40XX_BUTTRESS_ATS_ERR_LOG2 0x00000014u
+#define VPU_40XX_BUTTRESS_ATS_ERR_CLEAR 0x00000018u
+
+#define VPU_40XX_BUTTRESS_CFI0_ERR_LOG 0x0000001cu
+#define VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR 0x00000020u
+
+#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u
+
+#define VPU_40XX_BUTTRESS_CFI1_ERR_LOG 0x00000040u
+#define VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR 0x00000044u
+
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW 0x00000048u
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH 0x0000004cu
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR 0x00000050u
+
+#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS 0x00000054u
+
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW 0x00000058u
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH 0x0000005cu
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR 0x00000060u
+
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000130u
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
+
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1 0x00000134u
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
+
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000138u
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16)
+
+#define VPU_40XX_BUTTRESS_WP_REQ_CMD 0x0000013cu
+#define VPU_40XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
+
+#define VPU_40XX_BUTTRESS_PLL_FREQ 0x00000148u
+#define VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
+
+#define VPU_40XX_BUTTRESS_TILE_FUSE 0x00000150u
+#define VPU_40XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
+
+#define VPU_40XX_BUTTRESS_VPU_STATUS 0x00000154u
+#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
+
+#define VPU_40XX_BUTTRESS_IP_RESET 0x00000160u
+#define VPU_40XX_BUTTRESS_IP_RESET_TRIGGER_MASK BIT_MASK(0)
+
+#define VPU_40XX_BUTTRESS_D0I3_CONTROL 0x00000164u
+#define VPU_40XX_BUTTRESS_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_D0I3_CONTROL_I3_MASK BIT_MASK(2)
+
+#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000168u
+#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x0000016cu
+#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000170u
+
+#define VPU_40XX_BUTTRESS_FMIN_FUSE 0x00000174u
+#define VPU_40XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
+#define VPU_40XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
+
+#define VPU_40XX_BUTTRESS_FMAX_FUSE 0x00000178u
+#define VPU_40XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
+
+#define VPU_40XX_HOST_SS_CPR_CLK_EN 0x00000080u
+#define VPU_40XX_HOST_SS_CPR_CLK_EN_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_CLK_EN_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_CLK_EN_CSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_CLK_SET 0x00000084u
+#define VPU_40XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_RST_EN 0x00000090u
+#define VPU_40XX_HOST_SS_CPR_RST_EN_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_RST_EN_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_RST_EN_CSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_RST_SET 0x00000094u
+#define VPU_40XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_RST_CLR 0x00000098u
+#define VPU_40XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_HW_VERSION 0x00000108u
+#define VPU_40XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
+#define VPU_40XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
+#define VPU_40XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
+
+#define VPU_40XX_HOST_SS_SW_VERSION 0x0000010cu
+
+#define VPU_40XX_HOST_SS_GEN_CTRL 0x00000118u
+#define VPU_40XX_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
+
+#define VPU_40XX_HOST_SS_NOC_QREQN 0x00000154u
+#define VPU_40XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_HOST_SS_NOC_QACCEPTN 0x00000158u
+#define VPU_40XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_HOST_SS_NOC_QDENY 0x0000015cu
+#define VPU_40XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_TOP_NOC_QREQN 0x00000160u
+#define VPU_40XX_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
+#define VPU_40XX_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(2)
+
+#define VPU_40XX_TOP_NOC_QACCEPTN 0x00000164u
+#define VPU_40XX_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
+#define VPU_40XX_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(2)
+
+#define VPU_40XX_TOP_NOC_QDENY 0x00000168u
+#define VPU_40XX_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
+#define VPU_40XX_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(2)
+
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
+
+#define VPU_40XX_HOST_SS_ICB_STATUS_0 0x00010210u
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
+
+#define VPU_40XX_HOST_SS_ICB_STATUS_1 0x00010214u
+#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
+
+#define VPU_40XX_HOST_SS_ICB_CLEAR_0 0x00010220u
+#define VPU_40XX_HOST_SS_ICB_CLEAR_1 0x00010224u
+#define VPU_40XX_HOST_SS_ICB_ENABLE_0 0x00010240u
+#define VPU_40XX_HOST_SS_ICB_ENABLE_1 0x00010244u
+
+#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
+
+#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
+#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
+#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0_CSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0_CSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_CSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0_CSS_CPU_MASK BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u
+#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
+
+#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
+#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
+
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO 0x00040040u
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_DONE_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
+
+#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
+#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
+#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
+
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_SNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AW_SNOOP_OVERRIDE_MASK BIT_MASK(4)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AR_SNOOP_OVERRIDE_MASK BIT_MASK(5)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
+
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV 0x00360004u
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
+
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
+
+#define VPU_40XX_CPU_SS_TIM_WATCHDOG 0x0102009cu
+#define VPU_40XX_CPU_SS_TIM_WDOG_EN 0x010200a4u
+#define VPU_40XX_CPU_SS_TIM_SAFE 0x010200a8u
+
+#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG 0x01021008u
+#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
+
+#define VPU_40XX_CPU_SS_CPR_NOC_QREQN 0x01010030u
+#define VPU_40XX_CPU_SS_CPR_NOC_QREQN_TOP_MMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN 0x01010034u
+#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN_TOP_MMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_CPR_NOC_QDENY 0x01010038u
+#define VPU_40XX_CPU_SS_CPR_NOC_QDENY_TOP_MMIO_MASK BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_TIM_IPC_FIFO 0x010200f0u
+#define VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT 0x01029008u
+
+#define VPU_40XX_CPU_SS_DOORBELL_0 0x01300000u
+#define VPU_40XX_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_DOORBELL_1 0x01301000u
+
+#endif /* __IVPU_HW_40XX_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
deleted file mode 100644
index 593b8ff07417..000000000000
--- a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
+++ /dev/null
@@ -1,281 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2020-2023 Intel Corporation
- */
-
-#ifndef __IVPU_HW_MTL_REG_H__
-#define __IVPU_HW_MTL_REG_H__
-
-#include <linux/bits.h>
-
-#define MTL_BUTTRESS_INTERRUPT_TYPE 0x00000000u
-
-#define MTL_BUTTRESS_INTERRUPT_STAT 0x00000004u
-#define MTL_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
-#define MTL_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
-#define MTL_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
-
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
-
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
-
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
-
-#define MTL_BUTTRESS_WP_REQ_CMD 0x00000014u
-#define MTL_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
-
-#define MTL_BUTTRESS_WP_DOWNLOAD 0x00000018u
-#define MTL_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
-
-#define MTL_BUTTRESS_CURRENT_PLL 0x0000001cu
-#define MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
-
-#define MTL_BUTTRESS_PLL_ENABLE 0x00000020u
-
-#define MTL_BUTTRESS_FMIN_FUSE 0x00000024u
-#define MTL_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
-#define MTL_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
-
-#define MTL_BUTTRESS_FMAX_FUSE 0x00000028u
-#define MTL_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
-
-#define MTL_BUTTRESS_TILE_FUSE 0x0000002cu
-#define MTL_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
-#define MTL_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
-
-#define MTL_BUTTRESS_LOCAL_INT_MASK 0x00000030u
-#define MTL_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
-
-#define MTL_BUTTRESS_PLL_STATUS 0x00000040u
-#define MTL_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
-
-#define MTL_BUTTRESS_VPU_STATUS 0x00000044u
-#define MTL_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
-#define MTL_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
-
-#define MTL_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
-#define MTL_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
-#define MTL_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
-
-#define MTL_BUTTRESS_VPU_IP_RESET 0x00000050u
-#define MTL_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
-
-#define MTL_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
-#define MTL_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
-#define MTL_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
-
-#define MTL_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
-#define MTL_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
-#define MTL_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
-
-#define MTL_BUTTRESS_UFI_ERR_LOG 0x000000b0u
-#define MTL_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
-#define MTL_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
-#define MTL_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
-
-#define MTL_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
-
-#define MTL_VPU_HOST_SS_CPR_CLK_SET 0x00000084u
-#define MTL_VPU_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
-#define MTL_VPU_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
-
-#define MTL_VPU_HOST_SS_CPR_RST_SET 0x00000094u
-#define MTL_VPU_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
-#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
-
-#define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
-
-#define MTL_VPU_HOST_SS_HW_VERSION 0x00000108u
-#define MTL_VPU_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
-#define MTL_VPU_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
-#define MTL_VPU_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
-
-#define MTL_VPU_HOST_SS_GEN_CTRL 0x00000118u
-#define MTL_VPU_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
-
-#define MTL_VPU_HOST_SS_NOC_QREQN 0x00000154u
-#define MTL_VPU_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_NOC_QACCEPTN 0x00000158u
-#define MTL_VPU_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_NOC_QDENY 0x0000015cu
-#define MTL_VPU_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
-
-#define MTL_VPU_TOP_NOC_QREQN 0x00000160u
-#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
-#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
-
-#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u
-#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
-#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
-
-#define MTL_VPU_TOP_NOC_QDENY 0x00000168u
-#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
-#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1)
-
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
-
-#define MTL_VPU_HOST_SS_ICB_STATUS_0 0x00010210u
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
-
-#define MTL_VPU_HOST_SS_ICB_STATUS_1 0x00010214u
-#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
-
-#define MTL_VPU_HOST_SS_ICB_CLEAR_0 0x00010220u
-#define MTL_VPU_HOST_SS_ICB_CLEAR_1 0x00010224u
-#define MTL_VPU_HOST_SS_ICB_ENABLE_0 0x00010240u
-
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
-
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0)
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8)
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
-#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u
-#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE 0x00030204u
-#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO 0x00041040u
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
-
-#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
-#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
-#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
-
-#define MTL_VPU_HOST_MMU_IDR0 0x00200000u
-#define MTL_VPU_HOST_MMU_IDR1 0x00200004u
-#define MTL_VPU_HOST_MMU_IDR3 0x0020000cu
-#define MTL_VPU_HOST_MMU_IDR5 0x00200014u
-#define MTL_VPU_HOST_MMU_CR0 0x00200020u
-#define MTL_VPU_HOST_MMU_CR0ACK 0x00200024u
-#define MTL_VPU_HOST_MMU_CR1 0x00200028u
-#define MTL_VPU_HOST_MMU_CR2 0x0020002cu
-#define MTL_VPU_HOST_MMU_IRQ_CTRL 0x00200050u
-#define MTL_VPU_HOST_MMU_IRQ_CTRLACK 0x00200054u
-
-#define MTL_VPU_HOST_MMU_GERROR 0x00200060u
-#define MTL_VPU_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
-#define MTL_VPU_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
-
-#define MTL_VPU_HOST_MMU_GERRORN 0x00200064u
-
-#define MTL_VPU_HOST_MMU_STRTAB_BASE 0x00200080u
-#define MTL_VPU_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
-#define MTL_VPU_HOST_MMU_CMDQ_BASE 0x00200090u
-#define MTL_VPU_HOST_MMU_CMDQ_PROD 0x00200098u
-#define MTL_VPU_HOST_MMU_CMDQ_CONS 0x0020009cu
-#define MTL_VPU_HOST_MMU_EVTQ_BASE 0x002000a0u
-#define MTL_VPU_HOST_MMU_EVTQ_PROD 0x002000a8u
-#define MTL_VPU_HOST_MMU_EVTQ_CONS 0x002000acu
-#define MTL_VPU_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
-#define MTL_VPU_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
-
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
-
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV 0x00360004u
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
-
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
-
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1)
-
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1)
-
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4)
-
-#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu
-#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u
-#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u
-#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u
-
-#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u
-#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
-
-#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u
-#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
-
-#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u
-
-#endif /* __IVPU_HW_MTL_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 53878e77aad3..baefaf7bb3cb 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -7,7 +7,7 @@
#include <linux/highmem.h>
#include "ivpu_drv.h"
-#include "ivpu_hw_mtl_reg.h"
+#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
@@ -186,13 +186,13 @@
#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
-#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
- (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT)))
+#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
+ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT)))
static char *ivpu_mmu_event_to_str(u32 cmd)
{
@@ -250,15 +250,15 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else
val_ref = IVPU_MMU_IDR0_REF;
- val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0);
+ val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
- val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1);
+ val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1);
if (val != IVPU_MMU_IDR1_REF)
ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
- val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3);
+ val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3);
if (val != IVPU_MMU_IDR3_REF)
ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
@@ -269,7 +269,7 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else
val_ref = IVPU_MMU_IDR5_REF;
- val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5);
+ val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
}
@@ -396,18 +396,18 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
int ret;
- ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0);
+ ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
if (ret)
return ret;
- return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl);
+ return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
}
static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
{
struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
- return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
+ return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
IVPU_MMU_QUEUE_TIMEOUT_US);
}
@@ -447,7 +447,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
return ret;
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
- REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod);
+ REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
if (ret)
@@ -495,7 +495,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
- ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0);
+ ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
if (ret)
return ret;
@@ -505,17 +505,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
- REGV_WR32(MTL_VPU_HOST_MMU_CR1, val);
+ REGV_WR32(VPU_37XX_HOST_MMU_CR1, val);
- REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
- REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
+ REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
+ REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
- REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
- REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0);
- REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0);
+ REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
+ REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0);
+ REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0);
val = IVPU_MMU_CR0_CMDQEN;
- ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
@@ -531,17 +531,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret)
return ret;
- REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
- REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0);
- REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0);
+ REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
+ REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0);
+ REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0);
val |= IVPU_MMU_CR0_EVTQEN;
- ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
val |= IVPU_MMU_CR0_ATSCHK;
- ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
@@ -550,7 +550,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
return ret;
val |= IVPU_MMU_CR0_SMMUEN;
- return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
}
static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
@@ -801,14 +801,14 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
- evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC);
+ evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC);
if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
return NULL;
clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
- REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
+ REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
return evt;
}
@@ -841,35 +841,35 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
ivpu_dbg(vdev, IRQ, "MMU error\n");
- gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR);
- gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN);
+ gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR);
+ gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN);
active = gerror_val ^ gerrorn_val;
if (!(active & IVPU_MMU_GERROR_ERR_MASK))
return;
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
- if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active))
+ if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active))
ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
- REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val);
+ REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val);
}
int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 465a82298476..1d2e554e2c4a 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -431,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
return ret;
if (!context_id) {
- start = vdev->hw->ranges.global_low.start;
- end = vdev->hw->ranges.global_high.end;
+ start = vdev->hw->ranges.global.start;
+ end = vdev->hw->ranges.shave.end;
} else {
- start = vdev->hw->ranges.user_low.start;
- end = vdev->hw->ranges.user_high.end;
+ start = vdev->hw->ranges.user.start;
+ end = vdev->hw->ranges.dma.end;
}
drm_mm_init(&ctx->mm, start, end - start);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b51c6a141dfa..9d1f0e04fd56 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -135,7 +135,6 @@ config DRM_DEBUG_MODESET_LOCK
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
- select FRAMEBUFFER_CONSOLE if !EXPERT
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
default y
help
@@ -196,6 +195,21 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_TTM_KUNIT_TEST
+ tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+ default n
+ depends on DRM && KUNIT
+ select DRM_TTM
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ Enables unit tests for TTM, a GPU memory manager subsystem used
+ to manage memory buffers. This option is mostly useful for kernel
+ developers.
+
+ If in doubt, say "N".
+
config DRM_EXEC
tristate
depends on DRM
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 58184cd6ab0b..cc5cf4c2faf7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -68,7 +68,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct regmap *regmap = crtc->dc->hlcdc->regmap;
struct drm_display_mode *adj = &c->state->adjusted_mode;
+ struct drm_encoder *encoder = NULL, *en_iter;
+ struct drm_connector *connector = NULL;
struct atmel_hlcdc_crtc_state *state;
+ struct drm_device *ddev = c->dev;
+ struct drm_connector_list_iter iter;
unsigned long mode_rate;
struct videomode vm;
unsigned long prate;
@@ -76,6 +80,23 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
unsigned int cfg = 0;
int div, ret;
+ /* get encoder from crtc */
+ drm_for_each_encoder(en_iter, ddev) {
+ if (en_iter->crtc == c) {
+ encoder = en_iter;
+ break;
+ }
+ }
+
+ if (encoder) {
+ /* Get the connector from encoder */
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ if (connector->encoder == encoder)
+ break;
+ drm_connector_list_iter_end(&iter);
+ }
+
ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
if (ret)
return;
@@ -134,6 +155,10 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
cfg |= ATMEL_HLCDC_CLKDIV(div);
+ if (connector &&
+ connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ cfg |= ATMEL_HLCDC_CLKPOL;
+
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0), mask, cfg);
state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index b23214b0c496..6c1d79474505 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2449,15 +2449,7 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
enum drm_connector_status result;
result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
-
- mutex_lock(&hdmi->mutex);
- if (result != hdmi->last_connector_result) {
- dev_dbg(hdmi->dev, "read_hpd result: %d", result);
- handle_plugged_change(hdmi,
- result == connector_status_connected);
- hdmi->last_connector_result = result;
- }
- mutex_unlock(&hdmi->mutex);
+ hdmi->last_connector_result = result;
return result;
}
@@ -2958,6 +2950,7 @@ static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
hdmi->curr_conn = NULL;
dw_hdmi_update_power(hdmi);
dw_hdmi_update_phy_mask(hdmi);
+ handle_plugged_change(hdmi, false);
mutex_unlock(&hdmi->mutex);
}
@@ -2976,6 +2969,7 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
hdmi->curr_conn = connector;
dw_hdmi_update_power(hdmi);
dw_hdmi_update_phy_mask(hdmi);
+ handle_plugged_change(hdmi, true);
mutex_unlock(&hdmi->mutex);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 4153f302de7c..d19e796c2061 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -39,13 +39,12 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
if (exynos_crtc->ops->atomic_disable)
exynos_crtc->ops->atomic_disable(exynos_crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event && !crtc->state->active) {
- spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
-
crtc->state->event = NULL;
}
+ spin_unlock_irq(&crtc->dev->event_lock);
}
static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 964dceb28c1e..34cdabc30b4f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1426,6 +1426,6 @@ struct platform_driver gsc_driver = {
.name = "exynos-drm-gsc",
.owner = THIS_MODULE,
.pm = &gsc_pm_ops,
- .of_match_table = of_match_ptr(exynos_drm_gsc_of_match),
+ .of_match_table = exynos_drm_gsc_of_match,
},
};
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index f7ebc146f96d..ad6488e9c2b2 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -444,7 +444,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
- if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) {
+ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) ||
+ (DISPLAY_VER(dev_priv) >= 12)) {
intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
@@ -553,7 +554,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
}
}
- if (IS_JSL_EHL(dev_priv)) {
+ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
for_each_dsi_phy(phy, intel_dsi->phys)
intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy),
0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index dcc1f6941b60..2fb030b1ff1d 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -470,7 +470,7 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
cdclk_config->cdclk = 450000;
- else if (IS_HSW_ULT(dev_priv))
+ else if (IS_HASWELL_ULT(dev_priv))
cdclk_config->cdclk = 337500;
else
cdclk_config->cdclk = 540000;
@@ -3155,7 +3155,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- if (IS_JSL_EHL(dev_priv)) {
+ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
if (dev_priv->display.cdclk.hw.ref == 24000)
dev_priv->display.cdclk.max_cdclk_freq = 552000;
else
@@ -3200,9 +3200,9 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
*/
if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
dev_priv->display.cdclk.max_cdclk_freq = 450000;
- else if (IS_BDW_ULX(dev_priv))
+ else if (IS_BROADWELL_ULX(dev_priv))
dev_priv->display.cdclk.max_cdclk_freq = 450000;
- else if (IS_BDW_ULT(dev_priv))
+ else if (IS_BROADWELL_ULT(dev_priv))
dev_priv->display.cdclk.max_cdclk_freq = 540000;
else
dev_priv->display.cdclk.max_cdclk_freq = 675000;
@@ -3567,10 +3567,10 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
/* Wa_22011320316:adl-p[a0] */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
- } else if (IS_ADLP_RPLU(dev_priv)) {
+ } else if (IS_RAPTORLAKE_U(dev_priv)) {
dev_priv->display.cdclk.table = rplu_cdclk_table;
dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
} else {
@@ -3583,7 +3583,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
} else if (DISPLAY_VER(dev_priv) >= 12) {
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
dev_priv->display.cdclk.table = icl_cdclk_table;
- } else if (IS_JSL_EHL(dev_priv)) {
+ } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs;
dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 11) {
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 922a6d87b553..e2a220cf2e57 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -141,7 +141,7 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
if (IS_ALDERLAKE_S(i915))
return phy == PHY_A;
- else if (IS_JSL_EHL(i915) ||
+ else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) ||
IS_ROCKETLAKE(i915) ||
IS_DG1(i915))
return phy < PHY_C;
@@ -242,7 +242,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
- if (IS_JSL_EHL(dev_priv)) {
+ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
if (ehl_vbt_ddi_d_present(dev_priv))
expected_val = ICL_PHY_MISC_MUX_DDID;
@@ -333,7 +333,8 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
* "internal" child devices.
*/
val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
- if (IS_JSL_EHL(dev_priv) && phy == PHY_A) {
+ if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ phy == PHY_A) {
val &= ~ICL_PHY_MISC_MUX_DDID;
if (ehl_vbt_ddi_d_present(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 3cd2191fa794..84bbf854337a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3583,7 +3583,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
{
if (DISPLAY_VER(dev_priv) >= 12 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
- else if (IS_JSL_EHL(dev_priv) && crtc_state->port_clock > 594000)
+ else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 3;
else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
@@ -4878,7 +4879,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
encoder->disable_clock = dg1_ddi_disable_clock;
encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
encoder->get_config = dg1_ddi_get_config;
- } else if (IS_JSL_EHL(dev_priv)) {
+ } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
if (intel_ddi_is_tc(dev_priv, port)) {
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
@@ -4949,7 +4950,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
else if (DISPLAY_VER(dev_priv) >= 12)
encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
- else if (IS_JSL_EHL(dev_priv))
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
else if (DISPLAY_VER(dev_priv) == 11)
encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 5b2665a9d86d..de809e2d9cac 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -1410,7 +1410,7 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 270000) {
- if (IS_TGL_UY(dev_priv)) {
+ if (IS_TIGERLAKE_UY(dev_priv)) {
return intel_get_buf_trans(&tgl_uy_combo_phy_trans_dp_hbr2,
n_entries);
} else {
@@ -1740,15 +1740,15 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
encoder->get_buf_trans = icl_get_mg_buf_trans;
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
encoder->get_buf_trans = bxt_get_buf_trans;
- } else if (IS_CML_ULX(i915) || IS_CFL_ULX(i915) || IS_KBL_ULX(i915)) {
+ } else if (IS_COMETLAKE_ULX(i915) || IS_COFFEELAKE_ULX(i915) || IS_KABYLAKE_ULX(i915)) {
encoder->get_buf_trans = kbl_y_get_buf_trans;
- } else if (IS_CML_ULT(i915) || IS_CFL_ULT(i915) || IS_KBL_ULT(i915)) {
+ } else if (IS_COMETLAKE_ULT(i915) || IS_COFFEELAKE_ULT(i915) || IS_KABYLAKE_ULT(i915)) {
encoder->get_buf_trans = kbl_u_get_buf_trans;
} else if (IS_COMETLAKE(i915) || IS_COFFEELAKE(i915) || IS_KABYLAKE(i915)) {
encoder->get_buf_trans = kbl_get_buf_trans;
- } else if (IS_SKL_ULX(i915)) {
+ } else if (IS_SKYLAKE_ULX(i915)) {
encoder->get_buf_trans = skl_y_get_buf_trans;
- } else if (IS_SKL_ULT(i915)) {
+ } else if (IS_SKYLAKE_ULT(i915)) {
encoder->get_buf_trans = skl_u_get_buf_trans;
} else if (IS_SKYLAKE(i915)) {
encoder->get_buf_trans = skl_get_buf_trans;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 43cba98f7753..763ab569d8f3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1749,7 +1749,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
return phy <= PHY_E;
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
return phy <= PHY_D;
- else if (IS_JSL_EHL(dev_priv))
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
return phy <= PHY_C;
else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
return phy <= PHY_B;
@@ -1801,7 +1801,8 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
return PHY_B + port - PORT_TC1;
else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
return PHY_C + port - PORT_TC1;
- else if (IS_JSL_EHL(i915) && port == PORT_D)
+ else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ port == PORT_D)
return PHY_A;
return PHY_A + port - PORT_A;
@@ -7377,7 +7378,7 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 9)
return false;
- if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
return false;
if (HAS_PCH_LPT_H(dev_priv) &&
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 8286e79522d1..c39f8a15d8aa 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -724,10 +724,24 @@ static const struct intel_display_device_info xe_lpdp_display = {
BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
};
+/*
+ * Separate detection for no display cases to keep the display id array simple.
+ *
+ * IVB Q requires subvendor and subdevice matching to differentiate from IVB D
+ * GT2 server.
+ */
+static bool has_no_display(struct pci_dev *pdev)
+{
+ static const struct pci_device_id ids[] = {
+ INTEL_IVB_Q_IDS(0),
+ {}
+ };
+
+ return pci_match_id(ids, pdev);
+}
+
#undef INTEL_VGA_DEVICE
-#undef INTEL_QUANTA_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) { id, info }
-#define INTEL_QUANTA_VGA_DEVICE(info) { 0x16a, info }
static const struct {
u32 devid;
@@ -752,7 +766,6 @@ static const struct {
INTEL_IRONLAKE_M_IDS(&ilk_m_display),
INTEL_SNB_D_IDS(&snb_display),
INTEL_SNB_M_IDS(&snb_display),
- INTEL_IVB_Q_IDS(NULL), /* must be first IVB in list */
INTEL_IVB_M_IDS(&ivb_display),
INTEL_IVB_D_IDS(&ivb_display),
INTEL_HSW_IDS(&hsw_display),
@@ -800,6 +813,15 @@ probe_gmdid_display(struct drm_i915_private *i915, u16 *ver, u16 *rel, u16 *step
u32 val;
int i;
+ /* The caller expects to ver, rel and step to be initialized
+ * here, and there's no good way to check when there was a
+ * failure and no_display was returned. So initialize all these
+ * values here zero, to be sure.
+ */
+ *ver = 0;
+ *rel = 0;
+ *step = 0;
+
addr = pci_iomap_range(pdev, 0, i915_mmio_reg_offset(GMD_ID_DISPLAY), sizeof(u32));
if (!addr) {
drm_err(&i915->drm, "Cannot map MMIO BAR to read display GMD_ID\n");
@@ -809,9 +831,10 @@ probe_gmdid_display(struct drm_i915_private *i915, u16 *ver, u16 *rel, u16 *step
val = ioread32(addr);
pci_iounmap(pdev, addr);
- if (val == 0)
- /* Platform doesn't have display */
+ if (val == 0) {
+ drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
return &no_display;
+ }
*ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val);
*rel = REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
@@ -837,6 +860,11 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid,
if (has_gmdid)
return probe_gmdid_display(i915, gmdid_ver, gmdid_rel, gmdid_step);
+ if (has_no_display(pdev)) {
+ drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
+ return &no_display;
+ }
+
for (i = 0; i < ARRAY_SIZE(intel_display_ids); i++) {
if (intel_display_ids[i].devid == pdev->device)
return intel_display_ids[i].info;
@@ -858,7 +886,7 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->port_mask) < I915_MAX_PORTS);
/* Wa_14011765242: adl-s A0,A1 */
- if (IS_ADLS_DISPLAY_STEP(i915, STEP_A0, STEP_A2))
+ if (IS_ALDERLAKE_S(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_A2))
for_each_pipe(i915, pipe)
display_runtime->num_scalers[pipe] = 0;
else if (DISPLAY_VER(i915) >= 11) {
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 3324bd453ca7..215e682bd8b7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -54,7 +54,7 @@ struct drm_printer;
#define HAS_GMCH(i915) (DISPLAY_INFO(i915)->has_gmch)
#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
#define HAS_IPC(i915) (DISPLAY_INFO(i915)->has_ipc)
-#define HAS_IPS(i915) (IS_HSW_ULT(i915) || IS_BROADWELL(i915))
+#define HAS_IPS(i915) (IS_HASWELL_ULT(i915) || IS_BROADWELL(i915))
#define HAS_LSPCON(i915) (IS_DISPLAY_VER(i915, 9, 10))
#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index b909814ae02b..8f144d4d3c39 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -28,6 +28,7 @@
#include "intel_crtc.h"
#include "intel_display_debugfs.h"
#include "intel_display_driver.h"
+#include "intel_display_irq.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
@@ -177,6 +178,7 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
+ intel_display_irq_init(i915);
intel_dkl_phy_init(i915);
intel_color_init_hooks(i915);
intel_init_cdclk_hooks(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index e6f172cc665a..62ce55475554 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -1537,7 +1537,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
* to avoid races with the irq handler, assuming we have MSI. Shared legacy
* interrupts could still race.
*/
-void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u32 mask;
@@ -1583,6 +1583,50 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
vlv_display_irq_reset(dev_priv);
}
+void ilk_de_irq_postinstall(struct drm_i915_private *i915)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 display_mask, extra_mask;
+
+ if (GRAPHICS_VER(i915) >= 7) {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+ DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
+ extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
+ DE_DP_A_HOTPLUG_IVB);
+ } else {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
+ DE_PIPEA_CRC_DONE | DE_POISON);
+ extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
+ DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+ DE_PLANE_FLIP_DONE(PLANE_A) |
+ DE_PLANE_FLIP_DONE(PLANE_B) |
+ DE_DP_A_HOTPLUG);
+ }
+
+ if (IS_HASWELL(i915)) {
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ display_mask |= DE_EDP_PSR_INT_HSW;
+ }
+
+ if (IS_IRONLAKE_M(i915))
+ extra_mask |= DE_PCU_EVENT;
+
+ i915->irq_mask = ~display_mask;
+
+ ibx_irq_postinstall(i915);
+
+ GEN3_IRQ_INIT(uncore, DE, i915->irq_mask,
+ display_mask | extra_mask);
+}
+
+static void mtp_irq_postinstall(struct drm_i915_private *i915);
+static void icp_irq_postinstall(struct drm_i915_private *i915);
+
void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -1600,6 +1644,13 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
+ if (DISPLAY_VER(dev_priv) >= 14)
+ mtp_irq_postinstall(dev_priv);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_irq_postinstall(dev_priv);
+ else if (HAS_PCH_SPLIT(dev_priv))
+ ibx_irq_postinstall(dev_priv);
+
if (DISPLAY_VER(dev_priv) <= 10)
de_misc_masked |= GEN8_DE_MISC_GSE;
@@ -1666,7 +1717,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
-void mtp_irq_postinstall(struct drm_i915_private *i915)
+static void mtp_irq_postinstall(struct drm_i915_private *i915)
{
struct intel_uncore *uncore = &i915->uncore;
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
@@ -1680,7 +1731,7 @@ void mtp_irq_postinstall(struct drm_i915_private *i915)
GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff);
}
-void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u32 mask = SDE_GMBUS_ICP;
@@ -1699,3 +1750,30 @@ void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN11_DISPLAY_IRQ_ENABLE);
}
+void dg1_de_irq_postinstall(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ gen8_de_irq_postinstall(i915);
+ intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL,
+ GEN11_DISPLAY_IRQ_ENABLE);
+}
+
+void intel_display_irq_init(struct drm_i915_private *i915)
+{
+ i915->drm.vblank_disable_immediate = true;
+
+ /*
+ * Most platforms treat the display irq block as an always-on power
+ * domain. vlv/chv can disable it at runtime and need special care to
+ * avoid writing any of the display block registers outside of the power
+ * domain. We defer setting up the display irqs in this case to the
+ * runtime pm.
+ */
+ i915->display_irqs_enabled = true;
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->display_irqs_enabled = false;
+
+ intel_hotplug_irq_init(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index 874893f4f16d..2a090dd6abd7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -58,12 +58,11 @@ void vlv_display_irq_reset(struct drm_i915_private *i915);
void gen8_display_irq_reset(struct drm_i915_private *i915);
void gen11_display_irq_reset(struct drm_i915_private *i915);
-void ibx_irq_postinstall(struct drm_i915_private *i915);
void vlv_display_irq_postinstall(struct drm_i915_private *i915);
-void icp_irq_postinstall(struct drm_i915_private *i915);
+void ilk_de_irq_postinstall(struct drm_i915_private *i915);
void gen8_de_irq_postinstall(struct drm_i915_private *i915);
-void mtp_irq_postinstall(struct drm_i915_private *i915);
void gen11_de_irq_postinstall(struct drm_i915_private *i915);
+void dg1_de_irq_postinstall(struct drm_i915_private *i915);
u32 i915_pipestat_enable_mask(struct drm_i915_private *i915, enum pipe pipe);
void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
@@ -78,4 +77,6 @@ void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_
void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
void i8xx_pipestat_irq_handler(struct drm_i915_private *i915, u16 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void intel_display_irq_init(struct drm_i915_private *i915);
+
#endif /* __INTEL_DISPLAY_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 38225e5d311e..9e01054c2430 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1609,7 +1609,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
return;
if (IS_ALDERLAKE_S(dev_priv) ||
- IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ (IS_ROCKETLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)))
/* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 03675620e3ea..12bd2f322e62 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -500,7 +500,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
max_rate = 810000;
- else if (IS_JSL_EHL(dev_priv))
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
@@ -510,7 +510,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
} else if (DISPLAY_VER(dev_priv) == 9) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
- } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+ } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) ||
IS_BROADWELL(dev_priv)) {
source_rates = hsw_rates;
size = ARRAY_SIZE(hsw_rates);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 6b2d8a1e2aa9..a9b19e80bff7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -191,7 +191,8 @@ intel_combo_pll_enable_reg(struct drm_i915_private *i915,
{
if (IS_DG1(i915))
return DG1_DPLL_ENABLE(pll->info->id);
- else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
+ else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ (pll->info->id == DPLL_ID_EHL_DPLL4))
return MG_PLL_ENABLE(0);
return ICL_DPLL_ENABLE(pll->info->id);
@@ -927,7 +928,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
switch (wrpll & WRPLL_REF_MASK) {
case WRPLL_REF_SPECIAL_HSW:
/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
- if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+ if (IS_HASWELL(dev_priv) && !IS_HASWELL_ULT(dev_priv)) {
refclk = dev_priv->display.dpll.ref_clks.nssc;
break;
}
@@ -2460,8 +2461,8 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
- return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
- IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
+ return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
+ IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->display.dpll.ref_clks.nssc == 38400;
}
@@ -3226,7 +3227,8 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
BIT(DPLL_ID_EHL_DPLL4) |
BIT(DPLL_ID_ICL_DPLL1) |
BIT(DPLL_ID_ICL_DPLL0);
- } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
+ } else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ port != PORT_A) {
dpll_mask =
BIT(DPLL_ID_EHL_DPLL4) |
BIT(DPLL_ID_ICL_DPLL1) |
@@ -3567,7 +3569,8 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
}
} else {
- if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ id == DPLL_ID_EHL_DPLL4) {
hw_state->cfgcr0 = intel_de_read(dev_priv,
ICL_DPLL_CFGCR0(4));
hw_state->cfgcr1 = intel_de_read(dev_priv,
@@ -3623,7 +3626,8 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
cfgcr1_reg = TGL_DPLL_CFGCR1(id);
div0_reg = TGL_DPLL0_DIV0(id);
} else {
- if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ id == DPLL_ID_EHL_DPLL4) {
cfgcr0_reg = ICL_DPLL_CFGCR0(4);
cfgcr1_reg = ICL_DPLL_CFGCR1(4);
} else {
@@ -3781,7 +3785,7 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
{
u32 val;
- if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
+ if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
pll->info->id != DPLL_ID_ICL_DPLL0)
return;
/*
@@ -3806,7 +3810,7 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
- if (IS_JSL_EHL(dev_priv) &&
+ if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
pll->info->id == DPLL_ID_EHL_DPLL4) {
/*
@@ -3914,7 +3918,7 @@ static void combo_pll_disable(struct drm_i915_private *dev_priv,
icl_pll_disable(dev_priv, pll, enable_reg);
- if (IS_JSL_EHL(dev_priv) &&
+ if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
pll->info->id == DPLL_ID_EHL_DPLL4)
intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
pll->wakeref);
@@ -4150,7 +4154,7 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
dpll_mgr = &rkl_pll_mgr;
else if (DISPLAY_VER(dev_priv) >= 12)
dpll_mgr = &tgl_pll_mgr;
- else if (IS_JSL_EHL(dev_priv))
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
dpll_mgr = &ehl_pll_mgr;
else if (DISPLAY_VER(dev_priv) >= 11)
dpll_mgr = &icl_pll_mgr;
@@ -4335,7 +4339,8 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
- if (IS_JSL_EHL(i915) && pll->on &&
+ if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ pll->on &&
pll->info->id == DPLL_ID_EHL_DPLL4) {
pll->wakeref = intel_display_power_get(i915,
POWER_DOMAIN_DC_OFF);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 7f8b2d7713c7..25382022cd27 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -47,6 +47,7 @@
#include "i915_reg.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
+#include "i915_vma.h"
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_trace.h"
@@ -94,8 +95,7 @@ struct intel_fbc {
struct mutex lock;
unsigned int busy_bits;
- struct drm_mm_node compressed_fb;
- struct drm_mm_node compressed_llb;
+ struct i915_stolen_fb compressed_fb, compressed_llb;
enum intel_fbc_id id;
@@ -332,15 +332,16 @@ static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
- GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
- fbc->compressed_fb.start, U32_MAX));
- GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
- fbc->compressed_llb.start, U32_MAX));
-
+ GEM_BUG_ON(range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
+ i915_gem_stolen_node_offset(&fbc->compressed_fb),
+ U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
+ i915_gem_stolen_node_offset(&fbc->compressed_llb),
+ U32_MAX));
intel_de_write(i915, FBC_CFB_BASE,
- i915->dsm.stolen.start + fbc->compressed_fb.start);
+ i915_gem_stolen_node_address(i915, &fbc->compressed_fb));
intel_de_write(i915, FBC_LL_BASE,
- i915->dsm.stolen.start + fbc->compressed_llb.start);
+ i915_gem_stolen_node_address(i915, &fbc->compressed_llb));
}
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
@@ -447,7 +448,8 @@ static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
- intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
+ intel_de_write(i915, DPFC_CB_BASE,
+ i915_gem_stolen_node_offset(&fbc->compressed_fb));
}
static const struct intel_fbc_funcs g4x_fbc_funcs = {
@@ -498,7 +500,8 @@ static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
- intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start);
+ intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id),
+ i915_gem_stolen_node_offset(&fbc->compressed_fb));
}
static const struct intel_fbc_funcs ilk_fbc_funcs = {
@@ -605,7 +608,7 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
else if (DISPLAY_VER(i915) == 9)
skl_fbc_program_cfb_stride(fbc);
- if (to_gt(i915)->ggtt->num_fences)
+ if (intel_gt_support_legacy_fencing(to_gt(i915)))
snb_fbc_program_fence(fbc);
intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
@@ -713,7 +716,7 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(i915) ||
(DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
- end = resource_size(&i915->dsm.stolen) - 8 * 1024 * 1024;
+ end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -770,9 +773,9 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
int ret;
drm_WARN_ON(&i915->drm,
- drm_mm_node_allocated(&fbc->compressed_fb));
+ i915_gem_stolen_node_allocated(&fbc->compressed_fb));
drm_WARN_ON(&i915->drm,
- drm_mm_node_allocated(&fbc->compressed_llb));
+ i915_gem_stolen_node_allocated(&fbc->compressed_llb));
if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
@@ -792,15 +795,14 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
drm_dbg_kms(&i915->drm,
"reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
- fbc->compressed_fb.size, fbc->limit);
-
+ i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit);
return 0;
err_llb:
- if (drm_mm_node_allocated(&fbc->compressed_llb))
+ if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
err:
- if (drm_mm_initialized(&i915->mm.stolen))
+ if (i915_gem_stolen_initialized(i915))
drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -825,9 +827,9 @@ static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
if (WARN_ON(intel_fbc_hw_is_active(fbc)))
return;
- if (drm_mm_node_allocated(&fbc->compressed_llb))
+ if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
- if (drm_mm_node_allocated(&fbc->compressed_fb))
+ if (i915_gem_stolen_node_allocated(&fbc->compressed_fb))
i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
}
@@ -990,11 +992,10 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
- !plane_state->ggtt_vma->fence);
+ !intel_gt_support_legacy_fencing(to_gt(i915)));
- if (plane_state->flags & PLANE_HAS_FENCE &&
- plane_state->ggtt_vma->fence)
- fbc_state->fence_id = plane_state->ggtt_vma->fence->id;
+ if (plane_state->flags & PLANE_HAS_FENCE)
+ fbc_state->fence_id = i915_vma_fence_id(plane_state->ggtt_vma);
else
fbc_state->fence_id = -1;
@@ -1021,7 +1022,7 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
*/
return DISPLAY_VER(i915) >= 9 ||
(plane_state->flags & PLANE_HAS_FENCE &&
- plane_state->ggtt_vma->fence);
+ i915_vma_fence_id(plane_state->ggtt_vma) != -1);
}
static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
@@ -1030,7 +1031,8 @@ static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
struct intel_fbc *fbc = plane->fbc;
return intel_fbc_min_limit(plane_state) <= fbc->limit &&
- intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit;
+ intel_fbc_cfb_size(plane_state) <= fbc->limit *
+ i915_gem_stolen_node_size(&fbc->compressed_fb);
}
static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
@@ -1054,6 +1056,11 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (!fbc)
return 0;
+ if (!i915_gem_stolen_initialized(i915)) {
+ plane_state->no_fbc_reason = "stolen memory not initialised";
+ return 0;
+ }
+
if (intel_vgpu_active(i915)) {
plane_state->no_fbc_reason = "VGPU active";
return 0;
@@ -1707,9 +1714,6 @@ void intel_fbc_init(struct drm_i915_private *i915)
{
enum intel_fbc_id fbc_id;
- if (!drm_mm_initialized(&i915->mm.stolen))
- DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
-
if (need_fbc_vtd_wa(i915))
DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index ad0405375881..d753db3eef15 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -6,6 +6,7 @@
#include <drm/i915_hdcp_interface.h>
#include "gem/i915_gem_region.h"
+#include "gt/intel_gt.h"
#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
#include "i915_drv.h"
#include "i915_utils.h"
@@ -632,7 +633,7 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
return PTR_ERR(obj);
}
- cmd_in = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+ cmd_in = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
if (IS_ERR(cmd_in)) {
drm_err(&i915->drm, "Failed to map gsc message page!\n");
err = PTR_ERR(cmd_in);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 8d1c8abfcffa..94a7e1537f42 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2894,7 +2894,8 @@ static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
- else if (IS_JSL_EHL(dev_priv) && HAS_PCH_TGP(dev_priv))
+ else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ HAS_PCH_TGP(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index f4c09cc37a5e..9583e86b602a 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -423,7 +423,7 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
return true;
- if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
+ if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
(ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 04ab034a8d57..97d5eef10130 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -748,7 +748,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -918,7 +918,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
return;
/*
@@ -1074,7 +1074,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
/* JSL and EHL only supports eDP 1.3 */
- if (IS_JSL_EHL(dev_priv)) {
+ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
return false;
}
@@ -1086,7 +1086,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
return false;
}
@@ -1144,7 +1144,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
- IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 8298a86d1334..7d25a64698e2 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2097,7 +2097,7 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
const struct edid *edid = drm_edid_raw(drm_edid);
/* DDC bus is shared, match EDID to connector type */
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
status = connector_status_connected;
else
status = connector_status_disconnected;
@@ -2752,7 +2752,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
__drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
&conn_state->base.base);
- INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes);
+ intel_panel_init_alloc(&sdvo_connector->base);
return sdvo_connector;
}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 6b01a0b68b97..ffc15d278a39 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -2174,7 +2174,7 @@ static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
return false;
/* Wa_22011186057 */
- if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
return false;
if (DISPLAY_VER(i915) >= 11)
@@ -2196,11 +2196,11 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
/* Wa_14010477008 */
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
- IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
+ (IS_TIGERLAKE(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_D0)))
return false;
/* Wa_22011186057 */
- if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
return false;
/* Wa_14013215631 */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 148468098082..ef9346ed6d0f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -226,7 +226,7 @@ bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
* it, but since i915 takes the stance of always zeroing memory before
* handing it to userspace, we need to prevent this.
*/
- return IS_JSL_EHL(i915);
+ return (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915));
}
static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 112c130cfaaa..f607b87890dd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -716,10 +716,6 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
enum i915_map_type type);
-enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
- struct drm_i915_gem_object *obj,
- bool always_coherent);
-
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index eae40e8f52ed..6b6d22c19411 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -468,21 +468,6 @@ void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
return ret;
}
-enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
- struct drm_i915_gem_object *obj,
- bool always_coherent)
-{
- /*
- * Wa_22016122933: always return I915_MAP_WC for MTL
- */
- if (i915_gem_object_is_lmem(obj) || IS_METEORLAKE(i915))
- return I915_MAP_WC;
- if (HAS_LLC(i915) || always_coherent)
- return I915_MAP_WB;
- else
- return I915_MAP_WC;
-}
-
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 5b0a5cf9a98a..1a766d8e7cce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -974,3 +974,39 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
{
return obj->ops == &i915_gem_object_stolen_ops;
}
+
+bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
+{
+ return drm_mm_initialized(&i915->mm.stolen);
+}
+
+u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
+{
+ return i915->dsm.stolen.start;
+}
+
+u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
+{
+ return resource_size(&i915->dsm.stolen);
+}
+
+u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
+ const struct drm_mm_node *node)
+{
+ return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
+}
+
+bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
+{
+ return drm_mm_node_allocated(node);
+}
+
+u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
+{
+ return node->start;
+}
+
+u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
+{
+ return node->size;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index d5005a39d130..258381d1c054 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -12,6 +12,8 @@ struct drm_i915_private;
struct drm_mm_node;
struct drm_i915_gem_object;
+#define i915_stolen_fb drm_mm_node
+
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment);
@@ -36,4 +38,15 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
#define I915_GEM_STOLEN_BIAS SZ_128K
+bool i915_gem_stolen_initialized(const struct drm_i915_private *i915);
+u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915);
+u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915);
+
+u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
+ const struct drm_mm_node *node);
+
+bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node);
+u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node);
+u64 i915_gem_stolen_node_size(const struct drm_mm_node *node);
+
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index a93a90b15907..d8f4a10d71de 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -13,12 +13,12 @@
#include "selftests/igt_spinner.h"
static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
+ struct intel_gt *gt,
bool fill)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned int i, count = obj->base.size / sizeof(u32);
enum i915_map_type map_type =
- i915_coherent_map_type(i915, obj, false);
+ intel_gt_coherent_map_type(gt, obj, false);
u32 *cur;
int err = 0;
@@ -66,7 +66,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
if (err)
continue;
- err = igt_fill_check_buffer(obj, true);
+ err = igt_fill_check_buffer(obj, gt, true);
if (err)
continue;
@@ -86,7 +86,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
if (err)
continue;
- err = igt_fill_check_buffer(obj, false);
+ err = igt_fill_check_buffer(obj, gt, false);
}
i915_gem_object_put(obj);
@@ -233,7 +233,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
continue;
if (!vma) {
- err = igt_fill_check_buffer(obj, true);
+ err = igt_fill_check_buffer(obj, gt, true);
if (err)
continue;
}
@@ -276,7 +276,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
if (err)
goto out_unlock;
} else {
- err = igt_fill_check_buffer(obj, false);
+ err = igt_fill_check_buffer(obj, gt, false);
}
out_unlock:
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 8a3cbb01bcbe..a4ff55aa5e55 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -43,7 +43,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
vf_flush_wa = true;
/* WaForGAMHang:kbl */
- if (IS_KBL_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
+ if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
dc_flush_wa = true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 21af0ec52223..b538b5c04948 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -39,7 +39,7 @@ static void dbg_poison_ce(struct intel_context *ce)
if (ce->state) {
struct drm_i915_gem_object *obj = ce->state->obj;
- int type = i915_coherent_map_type(ce->engine->i915, obj, true);
+ int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true);
void *map;
if (!i915_gem_object_trylock(obj, NULL))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 18177a8e4aad..449f0b7fc843 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -179,7 +179,7 @@ int intel_gt_init_hw(struct intel_gt *gt)
if (IS_HASWELL(i915))
intel_uncore_write(uncore,
HSW_MI_PREDICATE_RESULT_2,
- IS_HSW_GT3(i915) ?
+ IS_HASWELL_GT3(i915) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
@@ -1003,3 +1003,19 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
+
+enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
+ struct drm_i915_gem_object *obj,
+ bool always_coherent)
+{
+ /*
+ * Wa_22016122933: always return I915_MAP_WC for Media
+ * version 13.0 when the object is on the Media GT
+ */
+ if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt))
+ return I915_MAP_WC;
+ if (HAS_LLC(gt->i915) || always_coherent)
+ return I915_MAP_WB;
+ else
+ return I915_MAP_WC;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 6549e28fa219..6c34547b58b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_GT__
#define __INTEL_GT__
+#include "i915_drv.h"
#include "intel_engine_types.h"
#include "intel_gt_types.h"
#include "intel_reset.h"
@@ -24,6 +25,11 @@ static inline bool gt_is_root(struct intel_gt *gt)
return !gt->info.id;
}
+static inline bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
+{
+ return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
+}
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
@@ -107,4 +113,8 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
+enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
+ struct drm_i915_gem_object *obj,
+ bool always_coherent);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 1b22d7a50665..def7dd0eb6f1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -309,4 +309,6 @@ enum intel_gt_scratch_field {
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
};
+#define intel_gt_support_legacy_fencing(gt) ((gt)->ggtt->num_fences > 0)
+
#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 731d9f2bbc56..13944a14ea2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -89,7 +89,7 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
enum i915_map_type type;
void *vaddr;
- type = i915_coherent_map_type(vm->i915, obj, true);
+ type = intel_gt_coherent_map_type(vm->gt, obj, true);
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -103,7 +103,7 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
enum i915_map_type type;
void *vaddr;
- type = i915_coherent_map_type(vm->i915, obj, true);
+ type = intel_gt_coherent_map_type(vm->gt, obj, true);
vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 119deb9f938c..957d0aeb0c02 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1095,10 +1095,11 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
if (IS_ERR(obj)) {
obj = i915_gem_object_create_shmem(engine->i915, context_size);
/*
- * Wa_22016122933: For MTL the shared memory needs to be mapped
- * as WC on CPU side and UC (PAT index 2) on GPU side
+ * Wa_22016122933: For Media version 13.0, all Media GT shared
+ * memory needs to be mapped as WC on CPU side and UC (PAT
+ * index 2) on GPU side.
*/
- if (IS_METEORLAKE(engine->i915))
+ if (intel_gt_needs_wa_22016122933(engine->gt))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
}
if (IS_ERR(obj))
@@ -1191,9 +1192,9 @@ lrc_pre_pin(struct intel_context *ce,
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
*vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ce->engine->i915,
- ce->state->obj,
- false) |
+ intel_gt_coherent_map_type(ce->engine->gt,
+ ce->state->obj,
+ false) |
I915_MAP_OVERRIDE);
return PTR_ERR_OR_ZERO(*vaddr);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index fb99143be98e..59da4b7bd262 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -13,6 +13,7 @@
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_ring.h"
+#include "intel_gt.h"
#include "intel_timeline.h"
unsigned int intel_ring_update_space(struct intel_ring *ring)
@@ -56,7 +57,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
addr = (void __force *)i915_vma_pin_iomap(vma);
} else {
- int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
+ int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false);
addr = i915_gem_object_pin_map(vma->obj, type);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 1141f875f5bd..f602895f6d0d 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -302,7 +302,7 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
u8 eu_en;
u8 s_en;
- if (IS_JSL_EHL(gt->i915))
+ if (IS_JASPERLAKE(gt->i915) || IS_ELKHARTLAKE(gt->i915))
intel_sseu_set_info(sseu, 1, 4, 8);
else
intel_sseu_set_info(sseu, 1, 8, 8);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 589d009032fc..3ae0dbd39eaa 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -420,7 +420,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -600,7 +600,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
+ if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
@@ -1192,7 +1192,7 @@ skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
+ if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
@@ -1204,7 +1204,7 @@ kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
gen9_gt_workarounds_init(gt, wal);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
+ if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -1460,7 +1460,8 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_1607087056:icl,ehl,jsl */
if (IS_ICELAKE(i915) ||
- IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+ ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
wa_write_or(wal,
GEN11_SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
@@ -2945,7 +2946,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
+ if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 76fbae358072..47070cba7eb1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -88,8 +88,9 @@ static int __live_context_size(struct intel_engine_cs *engine)
goto err;
vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915,
- ce->state->obj, false));
+ intel_gt_coherent_map_type(engine->gt,
+ ce->state->obj,
+ false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 8b0d84f2aad2..0dd4d00ee894 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -73,7 +73,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map_unlocked(h->obj,
- i915_coherent_map_type(gt->i915, h->obj, false));
+ intel_gt_coherent_map_type(gt, h->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -119,7 +119,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
}
- vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
+ vaddr = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, false));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
i915_vm_put(vm);
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index a78a3d2c2e16..5f826b6dcf5d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1292,9 +1292,9 @@ static int compare_isolation(struct intel_engine_cs *engine,
}
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915,
- ce->state->obj,
- false));
+ intel_gt_coherent_map_type(engine->gt,
+ ce->state->obj,
+ false));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
goto err_B1;
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 449c9ed44382..bccc3a1200bc 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -33,7 +33,6 @@ struct file *shmem_create_from_data(const char *name, void *data, size_t len)
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
enum i915_map_type map_type;
struct file *file;
void *ptr;
@@ -44,7 +43,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
- map_type = i915_coherent_map_type(i915, obj, true);
+ map_type = i915_gem_object_is_lmem(obj) ? I915_MAP_WC : I915_MAP_WB;
ptr = i915_gem_object_pin_map_unlocked(obj, map_type);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index 163021705210..e2e42b3e0d5d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -282,7 +282,6 @@ out_rq:
static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
- struct drm_i915_private *i915 = gt->i915;
void *src;
if (!gsc->local)
@@ -292,17 +291,13 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
return -ENOSPC;
src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
- i915_coherent_map_type(i915, gsc->fw.obj, true));
+ intel_gt_coherent_map_type(gt, gsc->fw.obj, true));
if (IS_ERR(src))
return PTR_ERR(src);
memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
- /*
- * Wa_22016122933: Making sure the data in dst is
- * visible to GSC right away
- */
intel_guc_write_barrier(&gt->uc.guc);
i915_gem_object_unpin_map(gsc->fw.obj);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2eb891b270ae..569b5fe94c41 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -745,10 +745,11 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
return ERR_CAST(obj);
/*
- * Wa_22016122933: For MTL the shared memory needs to be mapped
- * as WC on CPU side and UC (PAT index 2) on GPU side
+ * Wa_22016122933: For Media version 13.0, all Media GT shared
+ * memory needs to be mapped as WC on CPU side and UC (PAT
+ * index 2) on GPU side.
*/
- if (IS_METEORLAKE(gt->i915))
+ if (intel_gt_needs_wa_22016122933(gt))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
@@ -792,8 +793,8 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
return PTR_ERR(vma);
vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
- i915_coherent_map_type(guc_to_gt(guc)->i915,
- vma->obj, true));
+ intel_gt_coherent_map_type(guc_to_gt(guc),
+ vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index f28a3a83742d..97eadd08181d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -960,10 +960,6 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
/* now update descriptor */
WRITE_ONCE(desc->head, head);
- /*
- * Wa_22016122933: Making sure the head update is
- * visible to GuC right away
- */
intel_guc_write_barrier(ct_to_guc(ct));
return available - len;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index 852bea0208ce..cc9569af7f0c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -94,7 +94,7 @@ static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig
static bool has_table(struct drm_i915_private *i915)
{
- if (IS_ALDERLAKE_P(i915) && !IS_ADLP_N(i915))
+ if (IS_ALDERLAKE_P(i915) && !IS_ALDERLAKE_P_N(i915))
return true;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return true;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 48f506a26e6d..b648238cc675 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -27,7 +27,6 @@ struct mtl_huc_auth_msg_out {
int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc)
{
struct intel_gt *gt = huc_to_gt(huc);
- struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
struct mtl_huc_auth_msg_in *msg_in;
struct mtl_huc_auth_msg_out *msg_out;
@@ -43,7 +42,7 @@ int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc)
pkt_offset = i915_ggtt_offset(huc->heci_pkt);
pkt_vaddr = i915_gem_object_pin_map_unlocked(obj,
- i915_coherent_map_type(i915, obj, true));
+ intel_gt_coherent_map_type(gt, obj, true));
if (IS_ERR(pkt_vaddr))
return PTR_ERR(pkt_vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 18250fb64bd8..98b103375b7a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -43,7 +43,7 @@ static void uc_expand_default_options(struct intel_uc *uc)
}
/* Intermediate platforms are HuC authentication only */
- if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) {
+ if (IS_ALDERLAKE_S(i915) && !IS_RAPTORLAKE_S(i915)) {
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
return;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 3621963df6b5..8be005de1d28 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,6 +11,7 @@
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
#include "intel_gsc_binary_headers.h"
#include "intel_gsc_fw.h"
@@ -279,7 +280,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
* ADL-S, otherwise the GuC might attempt to fetch a config table that
* does not exist.
*/
- if (IS_ADLP_N(i915))
+ if (IS_ALDERLAKE_P_N(i915))
p = INTEL_ALDERLAKE_S;
GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
@@ -1197,7 +1198,7 @@ static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
return PTR_ERR(vma);
vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
- i915_coherent_map_type(gt->i915, vma->obj, true));
+ intel_gt_coherent_map_type(gt, vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index 1fd760539f77..bfb72143566f 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -204,9 +204,9 @@ static int intel_guc_steal_guc_ids(void *arg)
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
- if (ret != -EAGAIN) {
- guc_err(guc, "Failed to create request %d: %pe\n",
- context_index, ERR_PTR(ret));
+ if ((ret != -EAGAIN) || !last) {
+ guc_err(guc, "Failed to create %srequest %d: %pe\n",
+ last ? "" : "first ", context_index, ERR_PTR(ret));
goto err_spin_rq;
}
} else {
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 294b022de22b..b870c0df081a 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -175,7 +175,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
bool pre = false;
- pre |= IS_HSW_EARLY_SDV(dev_priv);
+ pre |= IS_HASWELL_EARLY_SDV(dev_priv);
pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a1a2fe31f434..7a8ce7239bc9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -561,8 +561,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_COFFEELAKE(i915) IS_PLATFORM(i915, INTEL_COFFEELAKE)
#define IS_COMETLAKE(i915) IS_PLATFORM(i915, INTEL_COMETLAKE)
#define IS_ICELAKE(i915) IS_PLATFORM(i915, INTEL_ICELAKE)
-#define IS_JSL_EHL(i915) (IS_PLATFORM(i915, INTEL_JASPERLAKE) || \
- IS_PLATFORM(i915, INTEL_ELKHARTLAKE))
+#define IS_JASPERLAKE(i915) IS_PLATFORM(i915, INTEL_JASPERLAKE)
+#define IS_ELKHARTLAKE(i915) IS_PLATFORM(i915, INTEL_ELKHARTLAKE)
#define IS_TIGERLAKE(i915) IS_PLATFORM(i915, INTEL_TIGERLAKE)
#define IS_ROCKETLAKE(i915) IS_PLATFORM(i915, INTEL_ROCKETLAKE)
#define IS_DG1(i915) IS_PLATFORM(i915, INTEL_DG1)
@@ -583,105 +583,77 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G11)
#define IS_DG2_G12(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G12)
-#define IS_ADLS_RPLS(i915) \
+#define IS_RAPTORLAKE_S(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
-#define IS_ADLP_N(i915) \
+#define IS_ALDERLAKE_P_N(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
-#define IS_ADLP_RPLP(i915) \
+#define IS_RAPTORLAKE_P(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
-#define IS_ADLP_RPLU(i915) \
+#define IS_RAPTORLAKE_U(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU)
-#define IS_HSW_EARLY_SDV(i915) (IS_HASWELL(i915) && \
+#define IS_HASWELL_EARLY_SDV(i915) (IS_HASWELL(i915) && \
(INTEL_DEVID(i915) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(i915) \
+#define IS_BROADWELL_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_BDW_ULX(i915) \
+#define IS_BROADWELL_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_BDW_GT3(i915) (IS_BROADWELL(i915) && \
+#define IS_BROADWELL_GT3(i915) (IS_BROADWELL(i915) && \
INTEL_INFO(i915)->gt == 3)
-#define IS_HSW_ULT(i915) \
+#define IS_HASWELL_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_HSW_GT3(i915) (IS_HASWELL(i915) && \
+#define IS_HASWELL_GT3(i915) (IS_HASWELL(i915) && \
INTEL_INFO(i915)->gt == 3)
-#define IS_HSW_GT1(i915) (IS_HASWELL(i915) && \
+#define IS_HASWELL_GT1(i915) (IS_HASWELL(i915) && \
INTEL_INFO(i915)->gt == 1)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(i915) \
+#define IS_HASWELL_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_ULT(i915) \
+#define IS_SKYLAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_SKL_ULX(i915) \
+#define IS_SKYLAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_KBL_ULT(i915) \
+#define IS_KABYLAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_KBL_ULX(i915) \
+#define IS_KABYLAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_GT2(i915) (IS_SKYLAKE(i915) && \
+#define IS_SKYLAKE_GT2(i915) (IS_SKYLAKE(i915) && \
INTEL_INFO(i915)->gt == 2)
-#define IS_SKL_GT3(i915) (IS_SKYLAKE(i915) && \
+#define IS_SKYLAKE_GT3(i915) (IS_SKYLAKE(i915) && \
INTEL_INFO(i915)->gt == 3)
-#define IS_SKL_GT4(i915) (IS_SKYLAKE(i915) && \
+#define IS_SKYLAKE_GT4(i915) (IS_SKYLAKE(i915) && \
INTEL_INFO(i915)->gt == 4)
-#define IS_KBL_GT2(i915) (IS_KABYLAKE(i915) && \
+#define IS_KABYLAKE_GT2(i915) (IS_KABYLAKE(i915) && \
INTEL_INFO(i915)->gt == 2)
-#define IS_KBL_GT3(i915) (IS_KABYLAKE(i915) && \
+#define IS_KABYLAKE_GT3(i915) (IS_KABYLAKE(i915) && \
INTEL_INFO(i915)->gt == 3)
-#define IS_CFL_ULT(i915) \
+#define IS_COFFEELAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_CFL_ULX(i915) \
+#define IS_COFFEELAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_CFL_GT2(i915) (IS_COFFEELAKE(i915) && \
+#define IS_COFFEELAKE_GT2(i915) (IS_COFFEELAKE(i915) && \
INTEL_INFO(i915)->gt == 2)
-#define IS_CFL_GT3(i915) (IS_COFFEELAKE(i915) && \
+#define IS_COFFEELAKE_GT3(i915) (IS_COFFEELAKE(i915) && \
INTEL_INFO(i915)->gt == 3)
-#define IS_CML_ULT(i915) \
+#define IS_COMETLAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_CML_ULX(i915) \
+#define IS_COMETLAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_CML_GT2(i915) (IS_COMETLAKE(i915) && \
+#define IS_COMETLAKE_GT2(i915) (IS_COMETLAKE(i915) && \
INTEL_INFO(i915)->gt == 2)
#define IS_ICL_WITH_PORT_F(i915) \
IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
-#define IS_TGL_UY(i915) \
+#define IS_TIGERLAKE_UY(i915) \
IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
-#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_KBL_GRAPHICS_STEP(i915, since, until) \
- (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, since, until))
-#define IS_KBL_DISPLAY_STEP(i915, since, until) \
- (IS_KABYLAKE(i915) && IS_DISPLAY_STEP(i915, since, until))
-#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
- (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
- (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
-#define IS_TGL_DISPLAY_STEP(__i915, since, until) \
- (IS_TIGERLAKE(__i915) && \
- IS_DISPLAY_STEP(__i915, since, until))
-
-#define IS_RKL_DISPLAY_STEP(p, since, until) \
- (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
-#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
- (IS_ALDERLAKE_S(__i915) && \
- IS_DISPLAY_STEP(__i915, since, until))
-#define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \
- (IS_ALDERLAKE_S(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-#define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
- (IS_ALDERLAKE_P(__i915) && \
- IS_DISPLAY_STEP(__i915, since, until))
-
-#define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \
- (IS_ALDERLAKE_P(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
(IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
@@ -799,7 +771,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(i915) \
- (IS_SKL_GT3(i915) || IS_SKL_GT4(i915))
+ (IS_SKYLAKE_GT3(i915) || IS_SKYLAKE_GT4(i915))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -860,7 +832,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf)
-#define NUM_L3_SLICES(i915) (IS_HSW_GT3(i915) ? \
+#define NUM_L3_SLICES(i915) (IS_HASWELL_GT3(i915) ? \
2 : HAS_L3_DPF(i915))
/* Only valid when HAS_DISPLAY() is true */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 512fc0ef94a4..1bfcfbe6e30b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -772,45 +772,9 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 display_mask, extra_mask;
-
- if (GRAPHICS_VER(dev_priv) >= 7) {
- display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
- DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
- extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
- DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
- DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
- DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
- DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
- DE_DP_A_HOTPLUG_IVB);
- } else {
- display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
- DE_PIPEA_CRC_DONE | DE_POISON);
- extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
- DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
- DE_PLANE_FLIP_DONE(PLANE_A) |
- DE_PLANE_FLIP_DONE(PLANE_B) |
- DE_DP_A_HOTPLUG);
- }
-
- if (IS_HASWELL(dev_priv)) {
- gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
- display_mask |= DE_EDP_PSR_INT_HSW;
- }
-
- if (IS_IRONLAKE_M(dev_priv))
- extra_mask |= DE_PCU_EVENT;
-
- dev_priv->irq_mask = ~display_mask;
-
- ibx_irq_postinstall(dev_priv);
-
gen5_gt_irq_postinstall(to_gt(dev_priv));
- GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
- display_mask | extra_mask);
+ ilk_de_irq_postinstall(dev_priv);
}
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -828,11 +792,6 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev_priv);
- else if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev_priv);
-
gen8_gt_irq_postinstall(to_gt(dev_priv));
gen8_de_irq_postinstall(dev_priv);
@@ -845,9 +804,6 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev_priv);
-
gen11_gt_irq_postinstall(gt);
gen11_de_irq_postinstall(dev_priv);
@@ -869,16 +825,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
- if (HAS_DISPLAY(dev_priv)) {
- if (DISPLAY_VER(dev_priv) >= 14)
- mtp_irq_postinstall(dev_priv);
- else
- icp_irq_postinstall(dev_priv);
-
- gen8_de_irq_postinstall(dev_priv);
- intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
- GEN11_DISPLAY_IRQ_ENABLE);
- }
+ dg1_de_irq_postinstall(dev_priv);
dg1_master_intr_enable(intel_uncore_regs(uncore));
intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
@@ -1343,23 +1290,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- dev_priv->drm.vblank_disable_immediate = true;
-
- /* Most platforms treat the display irq block as an always-on
- * power domain. vlv/chv can disable it at runtime and need
- * special care to avoid writing any of the display block registers
- * outside of the power domain. We defer setting up the display irqs
- * in this case to the runtime pm.
- */
- dev_priv->display_irqs_enabled = true;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display_irqs_enabled = false;
-
- intel_hotplug_irq_init(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index eaa310864370..e356dfb883d3 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -418,6 +418,11 @@ i915_vma_unpin_fence(struct i915_vma *vma)
__i915_vma_unpin_fence(vma);
}
+static inline int i915_vma_fence_id(const struct i915_vma *vma)
+{
+ return vma->fence ? vma->fence->id : -1;
+}
+
void i915_vma_parked(struct intel_gt *gt);
static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index d9600cd1ab06..81a4d32734e9 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -456,12 +456,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_rmw(&i915->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN);
/* WaDisableSDEUnitClockGating:kbl */
- if (IS_KBL_GRAPHICS_STEP(i915, 0, STEP_C0))
+ if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, 0, STEP_C0))
intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6,
0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
- if (IS_KBL_GRAPHICS_STEP(i915, 0, STEP_C0))
+ if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, 0, STEP_C0))
intel_uncore_rmw(&i915->uncore, GEN6_UCGCTL1,
0, GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index 8a9ff6227e53..c02a6f156a00 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -192,16 +192,16 @@ void intel_step_init(struct drm_i915_private *i915)
} else if (IS_XEHPSDV(i915)) {
revids = xehpsdv_revids;
size = ARRAY_SIZE(xehpsdv_revids);
- } else if (IS_ADLP_N(i915)) {
+ } else if (IS_ALDERLAKE_P_N(i915)) {
revids = adlp_n_revids;
size = ARRAY_SIZE(adlp_n_revids);
- } else if (IS_ADLP_RPLP(i915)) {
+ } else if (IS_RAPTORLAKE_P(i915)) {
revids = adlp_rplp_revids;
size = ARRAY_SIZE(adlp_rplp_revids);
} else if (IS_ALDERLAKE_P(i915)) {
revids = adlp_revids;
size = ARRAY_SIZE(adlp_revids);
- } else if (IS_ADLS_RPLS(i915)) {
+ } else if (IS_RAPTORLAKE_S(i915)) {
revids = adls_rpls_revids;
size = ARRAY_SIZE(adls_rpls_revids);
} else if (IS_ALDERLAKE_S(i915)) {
@@ -213,13 +213,13 @@ void intel_step_init(struct drm_i915_private *i915)
} else if (IS_ROCKETLAKE(i915)) {
revids = rkl_revids;
size = ARRAY_SIZE(rkl_revids);
- } else if (IS_TGL_UY(i915)) {
+ } else if (IS_TIGERLAKE_UY(i915)) {
revids = tgl_uy_revids;
size = ARRAY_SIZE(tgl_uy_revids);
} else if (IS_TIGERLAKE(i915)) {
revids = tgl_revids;
size = ARRAY_SIZE(tgl_revids);
- } else if (IS_JSL_EHL(i915)) {
+ } else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
revids = jsl_ehl_revids;
size = ARRAY_SIZE(jsl_ehl_revids);
} else if (IS_ICELAKE(i915)) {
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
index c7df47364013..2a600184a077 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
@@ -6,6 +6,7 @@
#include "gem/i915_gem_internal.h"
#include "gt/intel_context.h"
+#include "gt/intel_gt.h"
#include "gt/uc/intel_gsc_fw.h"
#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
@@ -336,7 +337,7 @@ gsccs_create_buffer(struct intel_gt *gt,
}
/* return a virtual pointer */
- *map = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+ *map = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
if (IS_ERR(*map)) {
drm_err(&i915->drm, "Failed to map gsccs backend %s.\n", bufname);
err = PTR_ERR(*map);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 1ce07d7e8769..80bb00189865 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -11,6 +11,7 @@
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+#include "gt/intel_gt.h"
#include "intel_pxp.h"
#include "intel_pxp_cmd_interface_42.h"
@@ -245,7 +246,9 @@ static int alloc_streaming_command(struct intel_pxp *pxp)
}
/* map the lmem into the virtual memory pointer */
- cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+ cmd = i915_gem_object_pin_map_unlocked(obj,
+ intel_gt_coherent_map_type(pxp->ctrl_gt,
+ obj, true));
if (IS_ERR(cmd)) {
drm_err(&i915->drm, "Failed to map gsc message page!\n");
err = PTR_ERR(cmd);
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 3c5e0952f1b8..0f064930ef11 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -97,7 +97,7 @@ int igt_spinner_pin(struct igt_spinner *spin,
if (!spin->batch) {
unsigned int mode;
- mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
+ mode = intel_gt_coherent_map_type(spin->gt, spin->obj, false);
vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
index ba9843cb1b13..19a8f27c404e 100644
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ b/drivers/gpu/drm/i915/soc/intel_pch.c
@@ -32,21 +32,21 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
- IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
- !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_WPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
- IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
/* WPT is LPT compatible */
return PCH_LPT;
case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
@@ -54,7 +54,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
- !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
/* WPT is LPT compatible */
return PCH_LPT;
case INTEL_PCH_SPT_DEVICE_ID_TYPE:
@@ -115,7 +115,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
+ IS_ELKHARTLAKE(dev_priv)));
/* MCC is TGP compatible */
return PCH_TGP;
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
@@ -127,7 +128,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
+ IS_ELKHARTLAKE(dev_priv)));
/* JSP is ICP compatible */
return PCH_ICP;
case INTEL_PCH_ADP_DEVICE_ID_TYPE:
@@ -177,7 +179,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
- else if (IS_JSL_EHL(dev_priv))
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
else if (IS_ICELAKE(dev_priv))
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
@@ -186,7 +188,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ else if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c
index bb0c8fd43a75..bf79dc55afa4 100644
--- a/drivers/gpu/drm/loongson/lsdc_ttm.c
+++ b/drivers/gpu/drm/loongson/lsdc_ttm.c
@@ -496,6 +496,8 @@ struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
int ret;
lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL);
+ if (IS_ERR(lbo))
+ return ERR_CAST(lbo);
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index db82b38600b2..e2fad1a048b5 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1226,7 +1226,7 @@ static const struct of_device_id mcde_dsi_of_match[] = {
struct platform_driver mcde_dsi_driver = {
.driver = {
.name = "mcde-dsi",
- .of_match_table = of_match_ptr(mcde_dsi_of_match),
+ .of_match_table = mcde_dsi_of_match,
},
.probe = mcde_dsi_probe,
.remove_new = mcde_dsi_remove,
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index b451dee64d34..76cab28e010c 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -26,6 +26,7 @@ config DRM_MEDIATEK_DP
select PHY_MTK_DP
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
+ select DRM_DP_AUX_BUS
help
DRM/KMS Display Port driver for MediaTek SoCs.
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index b640bc0559e7..f47f417d8ba6 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -235,13 +235,12 @@ static int mtk_cec_probe(struct platform_device *pdev)
return 0;
}
-static int mtk_cec_remove(struct platform_device *pdev)
+static void mtk_cec_remove(struct platform_device *pdev)
{
struct mtk_cec *cec = platform_get_drvdata(pdev);
mtk_cec_htplg_irq_disable(cec);
clk_disable_unprepare(cec->clk);
- return 0;
}
static const struct of_device_id mtk_cec_of_ids[] = {
@@ -252,7 +251,7 @@ MODULE_DEVICE_TABLE(of, mtk_cec_of_ids);
struct platform_driver mtk_cec_driver = {
.probe = mtk_cec_probe,
- .remove = mtk_cec_remove,
+ .remove_new = mtk_cec_remove,
.driver = {
.name = "mediatek-cec",
.of_match_table = mtk_cec_of_ids,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
index cdbec79474d1..4da9ac93b29e 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
@@ -25,11 +25,6 @@ struct mtk_disp_aal_data {
bool has_gamma;
};
-/**
- * struct mtk_disp_aal - DISP_AAL driver structure
- * @ddp_comp - structure containing type enum and hardware resources
- * @crtc - associated crtc to report irq events to
- */
struct mtk_disp_aal {
struct clk *clk;
void __iomem *regs;
@@ -139,11 +134,9 @@ static int mtk_disp_aal_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_aal_remove(struct platform_device *pdev)
+static void mtk_disp_aal_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_aal_component_ops);
-
- return 0;
}
static const struct mtk_disp_aal_data mt8173_aal_driver_data = {
@@ -160,7 +153,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_aal_driver_dt_match);
struct platform_driver mtk_disp_aal_driver = {
.probe = mtk_disp_aal_probe,
- .remove = mtk_disp_aal_remove,
+ .remove_new = mtk_disp_aal_remove,
.driver = {
.name = "mediatek-disp-aal",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 7c5e04ee5b9f..4234ff7485e8 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -33,11 +33,6 @@ struct mtk_disp_ccorr_data {
u32 matrix_bits;
};
-/**
- * struct mtk_disp_ccorr - DISP_CCORR driver structure
- * @ddp_comp - structure containing type enum and hardware resources
- * @crtc - associated crtc to report irq events to
- */
struct mtk_disp_ccorr {
struct clk *clk;
void __iomem *regs;
@@ -194,11 +189,9 @@ static int mtk_disp_ccorr_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_ccorr_remove(struct platform_device *pdev)
+static void mtk_disp_ccorr_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_ccorr_component_ops);
-
- return 0;
}
static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = {
@@ -220,7 +213,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match);
struct platform_driver mtk_disp_ccorr_driver = {
.probe = mtk_disp_ccorr_probe,
- .remove = mtk_disp_ccorr_remove,
+ .remove_new = mtk_disp_ccorr_remove,
.driver = {
.name = "mediatek-disp-ccorr",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 0b9b4b06d19c..78ea99f1444f 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -131,11 +131,9 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_color_remove(struct platform_device *pdev)
+static void mtk_disp_color_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_color_component_ops);
-
- return 0;
}
static const struct mtk_disp_color_data mt2701_color_driver_data = {
@@ -163,7 +161,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match);
struct platform_driver mtk_disp_color_driver = {
.probe = mtk_disp_color_probe,
- .remove = mtk_disp_color_remove,
+ .remove_new = mtk_disp_color_remove,
.driver = {
.name = "mediatek-disp-color",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index 7746dceadb20..673f9a5738f2 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -182,11 +182,9 @@ static int mtk_disp_gamma_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_gamma_remove(struct platform_device *pdev)
+static void mtk_disp_gamma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_gamma_component_ops);
-
- return 0;
}
static const struct mtk_disp_gamma_data mt8173_gamma_driver_data = {
@@ -208,7 +206,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_gamma_driver_dt_match);
struct platform_driver mtk_disp_gamma_driver = {
.probe = mtk_disp_gamma_probe,
- .remove = mtk_disp_gamma_remove,
+ .remove_new = mtk_disp_gamma_remove,
.driver = {
.name = "mediatek-disp-gamma",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
index bea8a0d8040a..e525a6b9e5b0 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -294,11 +294,9 @@ static int mtk_disp_merge_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_merge_remove(struct platform_device *pdev)
+static void mtk_disp_merge_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_merge_component_ops);
-
- return 0;
}
static const struct of_device_id mtk_disp_merge_driver_dt_match[] = {
@@ -310,7 +308,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_merge_driver_dt_match);
struct platform_driver mtk_disp_merge_driver = {
.probe = mtk_disp_merge_probe,
- .remove = mtk_disp_merge_remove,
+ .remove_new = mtk_disp_merge_remove,
.driver = {
.name = "mediatek-disp-merge",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 73320a7425cd..2bffe4245466 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -561,12 +561,10 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_ovl_remove(struct platform_device *pdev)
+static void mtk_disp_ovl_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
@@ -658,7 +656,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
struct platform_driver mtk_disp_ovl_driver = {
.probe = mtk_disp_ovl_probe,
- .remove = mtk_disp_ovl_remove,
+ .remove_new = mtk_disp_ovl_remove,
.driver = {
.name = "mediatek-disp-ovl",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index 878575bb9671..6bf6367853fb 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -427,7 +427,7 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
continue;
}
- type = (enum mtk_ovl_adaptor_comp_type)of_id->data;
+ type = (enum mtk_ovl_adaptor_comp_type)(uintptr_t)of_id->data;
id = ovl_adaptor_comp_get_id(dev, node, type);
if (id < 0) {
dev_warn(dev, "Skipping unknown component %pOF\n",
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index a12c05786d28..faa907f2f443 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -379,13 +379,11 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_rdma_remove(struct platform_device *pdev)
+static void mtk_disp_rdma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_rdma_component_ops);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct mtk_disp_rdma_data mt2701_rdma_driver_data = {
@@ -427,7 +425,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
struct platform_driver mtk_disp_rdma_driver = {
.probe = mtk_disp_rdma_probe,
- .remove = mtk_disp_rdma_remove,
+ .remove_new = mtk_disp_rdma_remove,
.driver = {
.name = "mediatek-disp-rdma",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 64eee77452c0..2cb47f663756 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -4,6 +4,7 @@
* Copyright (c) 2022 BayLibre
*/
+#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -100,6 +101,7 @@ struct mtk_dp_efuse_fmt {
struct mtk_dp {
bool enabled;
bool need_debounce;
+ int irq;
u8 max_lanes;
u8 max_linkrate;
u8 rx_cap[DP_RECEIVER_CAP_SIZE];
@@ -847,7 +849,7 @@ static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) &
AUX_RX_PHY_STATE_AUX_TX_P0_MASK;
if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) {
- drm_err(mtk_dp->drm_dev,
+ dev_err(mtk_dp->dev,
"AUX Rx Aux hang, need SW reset\n");
return -EIO;
}
@@ -1009,6 +1011,11 @@ static void mtk_dp_initialize_aux_settings(struct mtk_dp *mtk_dp)
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_37C8,
MTK_ATOP_EN_AUX_TX_P0,
MTK_ATOP_EN_AUX_TX_P0);
+
+ /* Set complete reply mode for AUX */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3690,
+ RX_REPLY_COMPLETE_MODE_AUX_TX_P0,
+ RX_REPLY_COMPLETE_MODE_AUX_TX_P0);
}
static void mtk_dp_initialize_digital_settings(struct mtk_dp *mtk_dp)
@@ -1251,6 +1258,29 @@ static void mtk_dp_audio_mute(struct mtk_dp *mtk_dp, bool mute)
val[2], AU_TS_CFG_DP_ENC0_P0_MASK);
}
+static void mtk_dp_aux_panel_poweron(struct mtk_dp *mtk_dp, bool pwron)
+{
+ if (pwron) {
+ /* power on aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE,
+ DP_PWR_STATE_MASK);
+
+ /* power on panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ usleep_range(2000, 5000);
+ } else {
+ /* power off panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ usleep_range(2000, 3000);
+
+ /* power off aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+ }
+}
+
static void mtk_dp_power_enable(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_RESET_AND_PROBE,
@@ -1284,9 +1314,11 @@ static void mtk_dp_power_disable(struct mtk_dp *mtk_dp)
static void mtk_dp_initialize_priv_data(struct mtk_dp *mtk_dp)
{
+ bool plugged_in = (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP);
+
mtk_dp->train_info.link_rate = DP_LINK_BW_5_4;
mtk_dp->train_info.lane_count = mtk_dp->max_lanes;
- mtk_dp->train_info.cable_plugged_in = false;
+ mtk_dp->train_info.cable_plugged_in = plugged_in;
mtk_dp->info.format = DP_PIXELFORMAT_RGB;
memset(&mtk_dp->info.vm, 0, sizeof(struct videomode));
@@ -1588,7 +1620,19 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
u8 val;
ssize_t ret;
- drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
+ /*
+ * If we're eDP and capabilities were already parsed we can skip
+ * reading again because eDP panels aren't hotpluggable hence the
+ * caps and training information won't ever change in a boot life
+ */
+ if (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP &&
+ mtk_dp->rx_cap[DP_MAX_LINK_RATE] &&
+ mtk_dp->train_info.sink_ssc)
+ return 0;
+
+ ret = drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
+ if (ret < 0)
+ return ret;
if (drm_dp_tps4_supported(mtk_dp->rx_cap))
mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_4;
@@ -1615,10 +1659,13 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
return ret == 0 ? -EIO : ret;
}
- if (val)
- drm_dp_dpcd_writeb(&mtk_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
- val);
+ if (val) {
+ ret = drm_dp_dpcd_writeb(&mtk_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
+ val);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
@@ -1798,10 +1845,6 @@ static void mtk_dp_init_port(struct mtk_dp *mtk_dp)
mtk_dp_initialize_settings(mtk_dp);
mtk_dp_initialize_aux_settings(mtk_dp);
mtk_dp_initialize_digital_settings(mtk_dp);
-
- mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3690,
- RX_REPLY_COMPLETE_MODE_AUX_TX_P0,
- RX_REPLY_COMPLETE_MODE_AUX_TX_P0);
mtk_dp_initialize_hpd_detect_settings(mtk_dp);
mtk_dp_digital_sw_reset(mtk_dp);
@@ -1877,6 +1920,31 @@ static irqreturn_t mtk_dp_hpd_event(int hpd, void *dev)
return IRQ_WAKE_THREAD;
}
+static int mtk_dp_wait_hpd_asserted(struct drm_dp_aux *mtk_aux, unsigned long wait_us)
+{
+ struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
+ u32 val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(mtk_dp->regs, MTK_DP_TRANS_P0_3414,
+ val, !!(val & HPD_DB_DP_TRANS_P0_MASK),
+ wait_us / 100, wait_us);
+ if (ret) {
+ mtk_dp->train_info.cable_plugged_in = false;
+ return ret;
+ }
+
+ mtk_dp->train_info.cable_plugged_in = true;
+
+ ret = mtk_dp_parse_capabilities(mtk_dp);
+ if (ret) {
+ drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp,
struct platform_device *pdev)
{
@@ -1918,6 +1986,9 @@ static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp,
static void mtk_dp_update_plugged_status(struct mtk_dp *mtk_dp)
{
+ if (!mtk_dp->data->audio_supported || !mtk_dp->audio_enable)
+ return;
+
mutex_lock(&mtk_dp->update_plugged_status_lock);
if (mtk_dp->plugged_cb && mtk_dp->codec_dev)
mtk_dp->plugged_cb(mtk_dp->codec_dev,
@@ -1936,16 +2007,9 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
if (!mtk_dp->train_info.cable_plugged_in)
return ret;
- if (!enabled) {
- /* power on aux */
- mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
- DP_PWR_STATE_BANDGAP_TPLL_LANE,
- DP_PWR_STATE_MASK);
+ if (!enabled)
+ mtk_dp_aux_panel_poweron(mtk_dp, true);
- /* power on panel */
- drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
- usleep_range(2000, 5000);
- }
/*
* Some dongles still source HPD when they do not connect to any
* sink device. To avoid this, we need to read the sink count
@@ -1957,16 +2021,8 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
if (DP_GET_SINK_COUNT(sink_count))
ret = connector_status_connected;
- if (!enabled) {
- /* power off panel */
- drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
- usleep_range(2000, 3000);
-
- /* power off aux */
- mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
- DP_PWR_STATE_BANDGAP_TPLL,
- DP_PWR_STATE_MASK);
- }
+ if (!enabled)
+ mtk_dp_aux_panel_poweron(mtk_dp, false);
return ret;
}
@@ -1982,15 +2038,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
if (!enabled) {
drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
-
- /* power on aux */
- mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
- DP_PWR_STATE_BANDGAP_TPLL_LANE,
- DP_PWR_STATE_MASK);
-
- /* power on panel */
- drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
- usleep_range(2000, 5000);
+ mtk_dp_aux_panel_poweron(mtk_dp, true);
}
new_edid = drm_get_edid(connector, &mtk_dp->aux.ddc);
@@ -2010,15 +2058,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
}
if (!enabled) {
- /* power off panel */
- drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
- usleep_range(2000, 3000);
-
- /* power off aux */
- mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
- DP_PWR_STATE_BANDGAP_TPLL,
- DP_PWR_STATE_MASK);
-
+ mtk_dp_aux_panel_poweron(mtk_dp, false);
drm_atomic_bridge_chain_post_disable(bridge, connector->state->state);
}
@@ -2028,15 +2068,14 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
struct drm_dp_aux_msg *msg)
{
- struct mtk_dp *mtk_dp;
+ struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
bool is_read;
u8 request;
size_t accessed_bytes = 0;
int ret;
- mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
-
- if (!mtk_dp->train_info.cable_plugged_in) {
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP &&
+ !mtk_dp->train_info.cable_plugged_in) {
ret = -EAGAIN;
goto err;
}
@@ -2057,7 +2096,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
is_read = true;
break;
default:
- drm_err(mtk_aux->drm_dev, "invalid aux cmd = %d\n",
+ dev_err(mtk_dp->dev, "invalid aux cmd = %d\n",
msg->request);
ret = -EINVAL;
goto err;
@@ -2073,7 +2112,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
to_access, &msg->reply);
if (ret) {
- drm_info(mtk_dp->drm_dev,
+ dev_info(mtk_dp->dev,
"Failed to do AUX transfer: %d\n", ret);
goto err;
}
@@ -2143,7 +2182,11 @@ static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
mtk_dp->drm_dev = bridge->dev;
- mtk_dp_hwirq_enable(mtk_dp, true);
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP) {
+ irq_clear_status_flags(mtk_dp->irq, IRQ_NOAUTOEN);
+ enable_irq(mtk_dp->irq);
+ mtk_dp_hwirq_enable(mtk_dp, true);
+ }
return 0;
@@ -2158,7 +2201,10 @@ static void mtk_dp_bridge_detach(struct drm_bridge *bridge)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
- mtk_dp_hwirq_enable(mtk_dp, false);
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP) {
+ mtk_dp_hwirq_enable(mtk_dp, false);
+ disable_irq(mtk_dp->irq);
+ }
mtk_dp->drm_dev = NULL;
mtk_dp_poweroff(mtk_dp);
drm_dp_aux_unregister(&mtk_dp->aux);
@@ -2178,15 +2224,7 @@ static void mtk_dp_bridge_atomic_enable(struct drm_bridge *bridge,
return;
}
- /* power on aux */
- mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
- DP_PWR_STATE_BANDGAP_TPLL_LANE,
- DP_PWR_STATE_MASK);
-
- if (mtk_dp->train_info.cable_plugged_in) {
- drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
- usleep_range(2000, 5000);
- }
+ mtk_dp_aux_panel_poweron(mtk_dp, true);
/* Training */
ret = mtk_dp_training(mtk_dp);
@@ -2481,11 +2519,62 @@ static int mtk_dp_register_audio_driver(struct device *dev)
return PTR_ERR_OR_ZERO(mtk_dp->audio_pdev);
}
+static int mtk_dp_register_phy(struct mtk_dp *mtk_dp)
+{
+ struct device *dev = mtk_dp->dev;
+
+ mtk_dp->phy_dev = platform_device_register_data(dev, "mediatek-dp-phy",
+ PLATFORM_DEVID_AUTO,
+ &mtk_dp->regs,
+ sizeof(struct regmap *));
+ if (IS_ERR(mtk_dp->phy_dev))
+ return dev_err_probe(dev, PTR_ERR(mtk_dp->phy_dev),
+ "Failed to create device mediatek-dp-phy\n");
+
+ mtk_dp_get_calibration_data(mtk_dp);
+
+ mtk_dp->phy = devm_phy_get(&mtk_dp->phy_dev->dev, "dp");
+ if (IS_ERR(mtk_dp->phy)) {
+ platform_device_unregister(mtk_dp->phy_dev);
+ return dev_err_probe(dev, PTR_ERR(mtk_dp->phy), "Failed to get phy\n");
+ }
+
+ return 0;
+}
+
+static int mtk_dp_edp_link_panel(struct drm_dp_aux *mtk_aux)
+{
+ struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
+ struct device *dev = mtk_aux->dev;
+ int ret;
+
+ mtk_dp->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+
+ /* Power off the DP and AUX: either detection is done, or no panel present */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+ mtk_dp_power_disable(mtk_dp);
+
+ if (IS_ERR(mtk_dp->next_bridge)) {
+ ret = PTR_ERR(mtk_dp->next_bridge);
+ mtk_dp->next_bridge = NULL;
+ return ret;
+ }
+
+ /* For eDP, we add the bridge only if the panel was found */
+ ret = devm_drm_bridge_add(dev, &mtk_dp->bridge);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int mtk_dp_probe(struct platform_device *pdev)
{
struct mtk_dp *mtk_dp;
struct device *dev = &pdev->dev;
- int ret, irq_num;
+ int ret;
mtk_dp = devm_kzalloc(dev, sizeof(*mtk_dp), GFP_KERNEL);
if (!mtk_dp)
@@ -2494,42 +2583,49 @@ static int mtk_dp_probe(struct platform_device *pdev)
mtk_dp->dev = dev;
mtk_dp->data = (struct mtk_dp_data *)of_device_get_match_data(dev);
- irq_num = platform_get_irq(pdev, 0);
- if (irq_num < 0)
- return dev_err_probe(dev, irq_num,
- "failed to request dp irq resource\n");
-
- mtk_dp->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
- if (IS_ERR(mtk_dp->next_bridge) &&
- PTR_ERR(mtk_dp->next_bridge) == -ENODEV)
- mtk_dp->next_bridge = NULL;
- else if (IS_ERR(mtk_dp->next_bridge))
- return dev_err_probe(dev, PTR_ERR(mtk_dp->next_bridge),
- "Failed to get bridge\n");
-
ret = mtk_dp_dt_parse(mtk_dp, pdev);
if (ret)
return dev_err_probe(dev, ret, "Failed to parse dt\n");
- drm_dp_aux_init(&mtk_dp->aux);
- mtk_dp->aux.name = "aux_mtk_dp";
- mtk_dp->aux.transfer = mtk_dp_aux_transfer;
-
- spin_lock_init(&mtk_dp->irq_thread_lock);
+ /*
+ * Request the interrupt and install service routine only if we are
+ * on full DisplayPort.
+ * For eDP, polling the HPD instead is more convenient because we
+ * don't expect any (un)plug events during runtime, hence we can
+ * avoid some locking.
+ */
+ if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP) {
+ mtk_dp->irq = platform_get_irq(pdev, 0);
+ if (mtk_dp->irq < 0)
+ return dev_err_probe(dev, mtk_dp->irq,
+ "failed to request dp irq resource\n");
+
+ spin_lock_init(&mtk_dp->irq_thread_lock);
+
+ irq_set_status_flags(mtk_dp->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, mtk_dp->irq, mtk_dp_hpd_event,
+ mtk_dp_hpd_event_thread,
+ IRQ_TYPE_LEVEL_HIGH, dev_name(dev),
+ mtk_dp);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to request mediatek dptx irq\n");
- ret = devm_request_threaded_irq(dev, irq_num, mtk_dp_hpd_event,
- mtk_dp_hpd_event_thread,
- IRQ_TYPE_LEVEL_HIGH, dev_name(dev),
- mtk_dp);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to request mediatek dptx irq\n");
+ mtk_dp->need_debounce = true;
+ timer_setup(&mtk_dp->debounce_timer, mtk_dp_debounce_timer, 0);
+ }
- mutex_init(&mtk_dp->update_plugged_status_lock);
+ mtk_dp->aux.name = "aux_mtk_dp";
+ mtk_dp->aux.dev = dev;
+ mtk_dp->aux.transfer = mtk_dp_aux_transfer;
+ mtk_dp->aux.wait_hpd_asserted = mtk_dp_wait_hpd_asserted;
+ drm_dp_aux_init(&mtk_dp->aux);
platform_set_drvdata(pdev, mtk_dp);
if (mtk_dp->data->audio_supported) {
+ mutex_init(&mtk_dp->update_plugged_status_lock);
+
ret = mtk_dp_register_audio_driver(dev);
if (ret) {
dev_err(dev, "Failed to register audio driver: %d\n",
@@ -2538,35 +2634,59 @@ static int mtk_dp_probe(struct platform_device *pdev)
}
}
- mtk_dp->phy_dev = platform_device_register_data(dev, "mediatek-dp-phy",
- PLATFORM_DEVID_AUTO,
- &mtk_dp->regs,
- sizeof(struct regmap *));
- if (IS_ERR(mtk_dp->phy_dev))
- return dev_err_probe(dev, PTR_ERR(mtk_dp->phy_dev),
- "Failed to create device mediatek-dp-phy\n");
-
- mtk_dp_get_calibration_data(mtk_dp);
-
- mtk_dp->phy = devm_phy_get(&mtk_dp->phy_dev->dev, "dp");
-
- if (IS_ERR(mtk_dp->phy)) {
- platform_device_unregister(mtk_dp->phy_dev);
- return dev_err_probe(dev, PTR_ERR(mtk_dp->phy),
- "Failed to get phy\n");
- }
+ ret = mtk_dp_register_phy(mtk_dp);
+ if (ret)
+ return ret;
mtk_dp->bridge.funcs = &mtk_dp_bridge_funcs;
mtk_dp->bridge.of_node = dev->of_node;
-
- mtk_dp->bridge.ops =
- DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
mtk_dp->bridge.type = mtk_dp->data->bridge_type;
- drm_bridge_add(&mtk_dp->bridge);
+ if (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP) {
+ /*
+ * Set the data lanes to idle in case the bootloader didn't
+ * properly close the eDP port to avoid stalls and then
+ * reinitialize, reset and power on the AUX block.
+ */
+ mtk_dp_set_idle_pattern(mtk_dp, true);
+ mtk_dp_initialize_aux_settings(mtk_dp);
+ mtk_dp_power_enable(mtk_dp);
- mtk_dp->need_debounce = true;
- timer_setup(&mtk_dp->debounce_timer, mtk_dp_debounce_timer, 0);
+ /* Disable HW interrupts: we don't need any for eDP */
+ mtk_dp_hwirq_enable(mtk_dp, false);
+
+ /*
+ * Power on the AUX to allow reading the EDID from aux-bus:
+ * please note that it is necessary to call power off in the
+ * .done_probing() callback (mtk_dp_edp_link_panel), as only
+ * there we can safely assume that we finished reading EDID.
+ */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE,
+ DP_PWR_STATE_MASK);
+
+ ret = devm_of_dp_aux_populate_bus(&mtk_dp->aux, mtk_dp_edp_link_panel);
+ if (ret) {
+ /* -ENODEV this means that the panel is not on the aux-bus */
+ if (ret == -ENODEV) {
+ ret = mtk_dp_edp_link_panel(&mtk_dp->aux);
+ if (ret)
+ return ret;
+ } else {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+ mtk_dp_power_disable(mtk_dp);
+ return ret;
+ }
+ }
+ } else {
+ mtk_dp->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+ ret = devm_drm_bridge_add(dev, &mtk_dp->bridge);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add bridge\n");
+ }
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -2574,19 +2694,17 @@ static int mtk_dp_probe(struct platform_device *pdev)
return 0;
}
-static int mtk_dp_remove(struct platform_device *pdev)
+static void mtk_dp_remove(struct platform_device *pdev)
{
struct mtk_dp *mtk_dp = platform_get_drvdata(pdev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- del_timer_sync(&mtk_dp->debounce_timer);
- drm_bridge_remove(&mtk_dp->bridge);
+ if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP)
+ del_timer_sync(&mtk_dp->debounce_timer);
platform_device_unregister(mtk_dp->phy_dev);
if (mtk_dp->audio_pdev)
platform_device_unregister(mtk_dp->audio_pdev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2595,7 +2713,8 @@ static int mtk_dp_suspend(struct device *dev)
struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
mtk_dp_power_disable(mtk_dp);
- mtk_dp_hwirq_enable(mtk_dp, false);
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP)
+ mtk_dp_hwirq_enable(mtk_dp, false);
pm_runtime_put_sync(dev);
return 0;
@@ -2607,7 +2726,8 @@ static int mtk_dp_resume(struct device *dev)
pm_runtime_get_sync(dev);
mtk_dp_init_port(mtk_dp);
- mtk_dp_hwirq_enable(mtk_dp, true);
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP)
+ mtk_dp_hwirq_enable(mtk_dp, true);
mtk_dp_power_enable(mtk_dp);
return 0;
@@ -2645,7 +2765,7 @@ MODULE_DEVICE_TABLE(of, mtk_dp_of_match);
static struct platform_driver mtk_dp_driver = {
.probe = mtk_dp_probe,
- .remove = mtk_dp_remove,
+ .remove_new = mtk_dp_remove,
.driver = {
.name = "mediatek-drm-dp",
.of_match_table = mtk_dp_of_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 28bdb1f427ff..2f931e4e2b60 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -1006,7 +1006,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_dpi *dpi;
- struct resource *mem;
int ret;
dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
@@ -1037,49 +1036,34 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n");
}
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dpi->regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(dpi->regs)) {
- ret = PTR_ERR(dpi->regs);
- dev_err(dev, "Failed to ioremap mem resource: %d\n", ret);
- return ret;
- }
+ dpi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dpi->regs))
+ return dev_err_probe(dev, PTR_ERR(dpi->regs),
+ "Failed to ioremap mem resource\n");
dpi->engine_clk = devm_clk_get(dev, "engine");
- if (IS_ERR(dpi->engine_clk)) {
- ret = PTR_ERR(dpi->engine_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
-
- return ret;
- }
+ if (IS_ERR(dpi->engine_clk))
+ return dev_err_probe(dev, PTR_ERR(dpi->engine_clk),
+ "Failed to get engine clock\n");
dpi->pixel_clk = devm_clk_get(dev, "pixel");
- if (IS_ERR(dpi->pixel_clk)) {
- ret = PTR_ERR(dpi->pixel_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get pixel clock: %d\n", ret);
-
- return ret;
- }
+ if (IS_ERR(dpi->pixel_clk))
+ return dev_err_probe(dev, PTR_ERR(dpi->pixel_clk),
+ "Failed to get pixel clock\n");
dpi->tvd_clk = devm_clk_get(dev, "pll");
- if (IS_ERR(dpi->tvd_clk)) {
- ret = PTR_ERR(dpi->tvd_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
-
- return ret;
- }
+ if (IS_ERR(dpi->tvd_clk))
+ return dev_err_probe(dev, PTR_ERR(dpi->tvd_clk),
+ "Failed to get tvdpll clock\n");
dpi->irq = platform_get_irq(pdev, 0);
- if (dpi->irq <= 0)
- return -EINVAL;
+ if (dpi->irq < 0)
+ return dpi->irq;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- NULL, &dpi->next_bridge);
- if (ret)
- return ret;
+ dpi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(dpi->next_bridge))
+ return dev_err_probe(dev, PTR_ERR(dpi->next_bridge),
+ "Failed to get bridge\n");
dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node);
@@ -1089,57 +1073,37 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->bridge.of_node = dev->of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
- drm_bridge_add(&dpi->bridge);
+ ret = devm_drm_bridge_add(dev, &dpi->bridge);
+ if (ret)
+ return ret;
ret = component_add(dev, &mtk_dpi_component_ops);
- if (ret) {
- drm_bridge_remove(&dpi->bridge);
- dev_err(dev, "Failed to add component: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add component.\n");
return 0;
}
-static int mtk_dpi_remove(struct platform_device *pdev)
+static void mtk_dpi_remove(struct platform_device *pdev)
{
- struct mtk_dpi *dpi = platform_get_drvdata(pdev);
-
component_del(&pdev->dev, &mtk_dpi_component_ops);
- drm_bridge_remove(&dpi->bridge);
-
- return 0;
}
static const struct of_device_id mtk_dpi_of_ids[] = {
- { .compatible = "mediatek,mt2701-dpi",
- .data = &mt2701_conf,
- },
- { .compatible = "mediatek,mt8173-dpi",
- .data = &mt8173_conf,
- },
- { .compatible = "mediatek,mt8183-dpi",
- .data = &mt8183_conf,
- },
- { .compatible = "mediatek,mt8186-dpi",
- .data = &mt8186_conf,
- },
- { .compatible = "mediatek,mt8188-dp-intf",
- .data = &mt8188_dpintf_conf,
- },
- { .compatible = "mediatek,mt8192-dpi",
- .data = &mt8192_conf,
- },
- { .compatible = "mediatek,mt8195-dp-intf",
- .data = &mt8195_dpintf_conf,
- },
- { },
+ { .compatible = "mediatek,mt2701-dpi", .data = &mt2701_conf },
+ { .compatible = "mediatek,mt8173-dpi", .data = &mt8173_conf },
+ { .compatible = "mediatek,mt8183-dpi", .data = &mt8183_conf },
+ { .compatible = "mediatek,mt8186-dpi", .data = &mt8186_conf },
+ { .compatible = "mediatek,mt8188-dp-intf", .data = &mt8188_dpintf_conf },
+ { .compatible = "mediatek,mt8192-dpi", .data = &mt8192_conf },
+ { .compatible = "mediatek,mt8195-dp-intf", .data = &mt8195_dpintf_conf },
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mtk_dpi_of_ids);
struct platform_driver mtk_dpi_driver = {
.probe = mtk_dpi_probe,
- .remove = mtk_dpi_remove,
+ .remove_new = mtk_dpi_remove,
.driver = {
.name = "mediatek-dpi",
.of_match_table = mtk_dpi_of_ids,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 8a43656ecc30..b6fa4ad2f94d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -117,10 +117,9 @@ static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *
dma_addr_t dma_addr;
pkt->va_base = kzalloc(size, GFP_KERNEL);
- if (!pkt->va_base) {
- kfree(pkt);
+ if (!pkt->va_base)
return -ENOMEM;
- }
+
pkt->buf_size = size;
pkt->cl = (void *)client;
@@ -130,7 +129,6 @@ static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
kfree(pkt->va_base);
- kfree(pkt);
return -ENOMEM;
}
@@ -146,7 +144,6 @@ static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
DMA_TO_DEVICE);
kfree(pkt->va_base);
- kfree(pkt);
}
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index f114da4d36a9..771f4e173353 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -563,14 +563,15 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
/* Not all drm components have a DTS device node, such as ovl_adaptor,
* which is the drm bring up sub driver
*/
- if (node) {
- comp_pdev = of_find_device_by_node(node);
- if (!comp_pdev) {
- DRM_INFO("Waiting for device %s\n", node->full_name);
- return -EPROBE_DEFER;
- }
- comp->dev = &comp_pdev->dev;
+ if (!node)
+ return 0;
+
+ comp_pdev = of_find_device_by_node(node);
+ if (!comp_pdev) {
+ DRM_INFO("Waiting for device %s\n", node->full_name);
+ return -EPROBE_DEFER;
}
+ comp->dev = &comp_pdev->dev;
if (type == MTK_DISP_AAL ||
type == MTK_DISP_BLS ||
@@ -580,7 +581,6 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
type == MTK_DISP_MERGE ||
type == MTK_DISP_OVL ||
type == MTK_DISP_OVL_2L ||
- type == MTK_DISP_OVL_ADAPTOR ||
type == MTK_DISP_PWM ||
type == MTK_DISP_RDMA ||
type == MTK_DPI ||
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 961715dd5b11..93552d76b6e7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -355,7 +355,7 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
const struct of_device_id *of_id;
struct device_node *node;
struct device *drm_dev;
- int cnt = 0;
+ unsigned int cnt = 0;
int i, j;
for_each_child_of_node(phandle->parent, node) {
@@ -376,6 +376,9 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
all_drm_priv[cnt] = dev_get_drvdata(drm_dev);
if (all_drm_priv[cnt] && all_drm_priv[cnt]->mtk_drm_bound)
cnt++;
+
+ if (cnt == MAX_CRTC)
+ break;
}
if (drm_priv->data->mmsys_dev_num == cnt) {
@@ -827,7 +830,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
continue;
}
- comp_type = (enum mtk_ddp_comp_type)of_id->data;
+ comp_type = (enum mtk_ddp_comp_type)(uintptr_t)of_id->data;
if (comp_type == MTK_DISP_MUTEX) {
int id;
@@ -907,7 +910,7 @@ err_node:
return ret;
}
-static int mtk_drm_remove(struct platform_device *pdev)
+static void mtk_drm_remove(struct platform_device *pdev)
{
struct mtk_drm_private *private = platform_get_drvdata(pdev);
int i;
@@ -917,8 +920,6 @@ static int mtk_drm_remove(struct platform_device *pdev)
of_node_put(private->mutex_node);
for (i = 0; i < DDP_COMPONENT_DRM_ID_MAX; i++)
of_node_put(private->comp_node[i]);
-
- return 0;
}
static int mtk_drm_sys_prepare(struct device *dev)
@@ -951,7 +952,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = {
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
- .remove = mtk_drm_remove,
+ .remove_new = mtk_drm_remove,
.driver = {
.name = "mediatek-drm",
.pm = &mtk_drm_pm_ops,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index a25b28d3ee90..9f364df52478 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -247,7 +247,11 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
-
+ if (!mtk_gem->kvaddr) {
+ kfree(sgt);
+ kfree(mtk_gem->pages);
+ return -ENOMEM;
+ }
out:
kfree(sgt);
iosys_map_set_vaddr(map, mtk_gem->kvaddr);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 31f9420aff6f..db2f70ae060d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -122,11 +122,7 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (ret)
return ret;
- if (state)
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
- else /* Special case for asynchronous cursor updates. */
- crtc_state = new_plane_state->crtc->state;
+ crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
DRM_PLANE_NO_SCALING,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 7d5250351193..d8bfc2cce54d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1178,14 +1178,12 @@ err_unregister_host:
return ret;
}
-static int mtk_dsi_remove(struct platform_device *pdev)
+static void mtk_dsi_remove(struct platform_device *pdev)
{
struct mtk_dsi *dsi = platform_get_drvdata(pdev);
mtk_output_dsi_disable(dsi);
mipi_dsi_host_unregister(&dsi->host);
-
- return 0;
}
static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
@@ -1223,7 +1221,7 @@ MODULE_DEVICE_TABLE(of, mtk_dsi_of_match);
struct platform_driver mtk_dsi_driver = {
.probe = mtk_dsi_probe,
- .remove = mtk_dsi_remove,
+ .remove_new = mtk_dsi_remove,
.driver = {
.name = "mtk-dsi",
.of_match_table = mtk_dsi_of_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 0a8e0a13f516..86133bf16326 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1746,13 +1746,12 @@ err_bridge_remove:
return ret;
}
-static int mtk_drm_hdmi_remove(struct platform_device *pdev)
+static void mtk_drm_hdmi_remove(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
drm_bridge_remove(&hdmi->bridge);
mtk_hdmi_clk_disable_audio(hdmi);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1806,7 +1805,7 @@ MODULE_DEVICE_TABLE(of, mtk_drm_hdmi_of_ids);
static struct platform_driver mtk_hdmi_driver = {
.probe = mtk_drm_hdmi_probe,
- .remove = mtk_drm_hdmi_remove,
+ .remove_new = mtk_drm_hdmi_remove,
.driver = {
.name = "mediatek-drm-hdmi",
.of_match_table = mtk_drm_hdmi_of_ids,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index 4d39ea0a05ca..d675c954befe 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -324,14 +324,12 @@ err_clk_disable:
return ret;
}
-static int mtk_hdmi_ddc_remove(struct platform_device *pdev)
+static void mtk_hdmi_ddc_remove(struct platform_device *pdev)
{
struct mtk_hdmi_ddc *ddc = platform_get_drvdata(pdev);
i2c_del_adapter(&ddc->adap);
clk_disable_unprepare(ddc->clk);
-
- return 0;
}
static const struct of_device_id mtk_hdmi_ddc_match[] = {
@@ -342,7 +340,7 @@ MODULE_DEVICE_TABLE(of, mtk_hdmi_ddc_match);
struct platform_driver mtk_hdmi_ddc_driver = {
.probe = mtk_hdmi_ddc_probe,
- .remove = mtk_hdmi_ddc_remove,
+ .remove_new = mtk_hdmi_ddc_remove,
.driver = {
.name = "mediatek-hdmi-ddc",
.of_match_table = mtk_hdmi_ddc_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
index 4806bdd4b8fa..c3adaeefd551 100644
--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -314,11 +314,10 @@ static int mtk_mdp_rdma_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_mdp_rdma_remove(struct platform_device *pdev)
+static void mtk_mdp_rdma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_mdp_rdma_component_ops);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static const struct of_device_id mtk_mdp_rdma_driver_dt_match[] = {
@@ -329,7 +328,7 @@ MODULE_DEVICE_TABLE(of, mtk_mdp_rdma_driver_dt_match);
struct platform_driver mtk_mdp_rdma_driver = {
.probe = mtk_mdp_rdma_probe,
- .remove = mtk_mdp_rdma_remove,
+ .remove_new = mtk_mdp_rdma_remove,
.driver = {
.name = "mediatek-mdp-rdma",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 5e5617006da5..cf6b3a80c0c8 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -47,6 +47,9 @@ nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o
nouveau-y += nouveau_ttm.o
nouveau-y += nouveau_vmm.o
+nouveau-y += nouveau_exec.o
+nouveau-y += nouveau_sched.o
+nouveau-y += nouveau_uvmm.o
# DRM - modesetting
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index a70bd65e1400..c52e8096cca4 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,6 +10,8 @@ config DRM_NOUVEAU
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select DRM_EXEC
+ select DRM_SCHED
select I2C
select I2C_ALGOBIT
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index a6f2e681bde9..a34924523133 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1122,11 +1122,18 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
PUSH_KICK(push);
- ret = nouveau_fence_new(chan, false, pfence);
+ ret = nouveau_fence_new(pfence);
if (ret)
goto fail;
+ ret = nouveau_fence_emit(*pfence, chan);
+ if (ret)
+ goto fail_fence_unref;
+
return 0;
+
+fail_fence_unref:
+ nouveau_fence_unref(pfence);
fail:
spin_lock_irqsave(&dev->event_lock, flags);
list_del(&s->head);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000c.h b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
index 9c7ff56831c5..a5a182b3c28d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if000c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
@@ -3,7 +3,10 @@
struct nvif_vmm_v0 {
__u8 version;
__u8 page_nr;
- __u8 managed;
+#define NVIF_VMM_V0_TYPE_UNMANAGED 0x00
+#define NVIF_VMM_V0_TYPE_MANAGED 0x01
+#define NVIF_VMM_V0_TYPE_RAW 0x02
+ __u8 type;
__u8 pad03[5];
__u64 addr;
__u64 size;
@@ -17,6 +20,7 @@ struct nvif_vmm_v0 {
#define NVIF_VMM_V0_UNMAP 0x04
#define NVIF_VMM_V0_PFNMAP 0x05
#define NVIF_VMM_V0_PFNCLR 0x06
+#define NVIF_VMM_V0_RAW 0x07
#define NVIF_VMM_V0_MTHD(i) ((i) + 0x80)
struct nvif_vmm_page_v0 {
@@ -66,6 +70,26 @@ struct nvif_vmm_unmap_v0 {
__u64 addr;
};
+struct nvif_vmm_raw_v0 {
+ __u8 version;
+#define NVIF_VMM_RAW_V0_GET 0x0
+#define NVIF_VMM_RAW_V0_PUT 0x1
+#define NVIF_VMM_RAW_V0_MAP 0x2
+#define NVIF_VMM_RAW_V0_UNMAP 0x3
+#define NVIF_VMM_RAW_V0_SPARSE 0x4
+ __u8 op;
+ __u8 sparse;
+ __u8 ref;
+ __u8 shift;
+ __u32 argc;
+ __u8 pad01[7];
+ __u64 addr;
+ __u64 size;
+ __u64 offset;
+ __u64 memory;
+ __u64 argv;
+};
+
struct nvif_vmm_pfnmap_v0 {
__u8 version;
__u8 page;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/vmm.h b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
index a2ee92201ace..0ecedd0ee0a5 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/vmm.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
@@ -4,6 +4,12 @@
struct nvif_mem;
struct nvif_mmu;
+enum nvif_vmm_type {
+ UNMANAGED,
+ MANAGED,
+ RAW,
+};
+
enum nvif_vmm_get {
ADDR,
PTES,
@@ -30,8 +36,9 @@ struct nvif_vmm {
int page_nr;
};
-int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass, bool managed,
- u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *);
+int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass,
+ enum nvif_vmm_type, u64 addr, u64 size, void *argv, u32 argc,
+ struct nvif_vmm *);
void nvif_vmm_dtor(struct nvif_vmm *);
int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *);
@@ -39,4 +46,12 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_mem *, u64 offset);
int nvif_vmm_unmap(struct nvif_vmm *, u64);
+
+int nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
+int nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
+int nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
+ void *argv, u32 argc, struct nvif_mem *mem, u64 offset);
+int nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
+ u8 shift, bool sparse);
+int nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 70e7887ef4b4..2fd2f2433fc7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -17,6 +17,7 @@ struct nvkm_vma {
bool part:1; /* Region was split from an allocated region by map(). */
bool busy:1; /* Region busy (for temporarily preventing user access). */
bool mapped:1; /* Region contains valid pages. */
+ bool no_comp:1; /* Force no memory compression. */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */
};
@@ -27,10 +28,26 @@ struct nvkm_vmm {
const char *name;
u32 debug;
struct kref kref;
- struct mutex mutex;
+
+ struct {
+ struct mutex vmm;
+ struct mutex ref;
+ struct mutex map;
+ } mutex;
u64 start;
u64 limit;
+ struct {
+ struct {
+ u64 addr;
+ u64 size;
+ } p;
+ struct {
+ u64 addr;
+ u64 size;
+ } n;
+ bool raw;
+ } managed;
struct nvkm_vmm_pt *pd;
struct list_head join;
@@ -70,6 +87,7 @@ struct nvkm_vmm_map {
const struct nvkm_vmm_page *page;
+ bool no_comp;
struct nvkm_tags *tags;
u64 next;
u64 type;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 82dab51d8aeb..30afbec9e3b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -35,6 +35,7 @@
#include "nouveau_chan.h"
#include "nouveau_abi16.h"
#include "nouveau_vmm.h"
+#include "nouveau_sched.h"
static struct nouveau_abi16 *
nouveau_abi16(struct drm_file *file_priv)
@@ -125,6 +126,17 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{
struct nouveau_abi16_ntfy *ntfy, *temp;
+ /* When a client exits without waiting for it's queued up jobs to
+ * finish it might happen that we fault the channel. This is due to
+ * drm_file_free() calling drm_gem_release() before the postclose()
+ * callback. Hence, we can't tear down this scheduler entity before
+ * uvmm mappings are unmapped. Currently, we can't detect this case.
+ *
+ * However, this should be rare and harmless, since the channel isn't
+ * needed anymore.
+ */
+ nouveau_sched_entity_fini(&chan->sched_entity);
+
/* wait for all activity to stop before cleaning up */
if (chan->chan)
nouveau_channel_idle(chan->chan);
@@ -261,6 +273,13 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (!drm->channel)
return nouveau_abi16_put(abi16, -ENODEV);
+ /* If uvmm wasn't initialized until now disable it completely to prevent
+ * userspace from mixing up UAPIs.
+ *
+ * The client lock is already acquired by nouveau_abi16_get().
+ */
+ __nouveau_cli_disable_uvmm_noinit(cli);
+
device = &abi16->device;
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
@@ -304,6 +323,11 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
+ ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
+ drm->sched_wq);
+ if (ret)
+ goto done;
+
init->channel = chan->chan->chid;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 27eae85f33e6..9f538486c10e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -26,6 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
+ struct nouveau_sched_entity sched_entity;
};
struct nouveau_abi16 {
@@ -43,28 +44,6 @@ int nouveau_abi16_usif(struct drm_file *, void *data, u32 size);
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
-struct drm_nouveau_channel_alloc {
- uint32_t fb_ctxdma_handle;
- uint32_t tt_ctxdma_handle;
-
- int channel;
- uint32_t pushbuf_domains;
-
- /* Notifier memory */
- uint32_t notifier_handle;
-
- /* DRM-enforced subchannel assignments */
- struct {
- uint32_t handle;
- uint32_t grclass;
- } subchan[8];
- uint32_t nr_subchan;
-};
-
-struct drm_nouveau_channel_free {
- int channel;
-};
-
struct drm_nouveau_grobj_alloc {
int channel;
uint32_t handle;
@@ -83,31 +62,12 @@ struct drm_nouveau_gpuobj_free {
uint32_t handle;
};
-#define NOUVEAU_GETPARAM_PCI_VENDOR 3
-#define NOUVEAU_GETPARAM_PCI_DEVICE 4
-#define NOUVEAU_GETPARAM_BUS_TYPE 5
-#define NOUVEAU_GETPARAM_FB_SIZE 8
-#define NOUVEAU_GETPARAM_AGP_SIZE 9
-#define NOUVEAU_GETPARAM_CHIPSET_ID 11
-#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
-#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
-#define NOUVEAU_GETPARAM_PTIMER_TIME 14
-#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
-#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
-struct drm_nouveau_getparam {
- uint64_t param;
- uint64_t value;
-};
-
struct drm_nouveau_setparam {
uint64_t param;
uint64_t value;
};
-#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
-#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
-#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c2ec91cc845d..19cab37ac69c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -199,12 +199,12 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
struct nouveau_bo *
nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
- u32 tile_mode, u32 tile_flags)
+ u32 tile_mode, u32 tile_flags, bool internal)
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
struct nvif_mmu *mmu = &cli->mmu;
- struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
+ struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm;
int i, pi = -1;
if (!*size) {
@@ -215,6 +215,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
return ERR_PTR(-ENOMEM);
+
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list);
@@ -232,68 +233,103 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
nvbo->force_coherent = true;
}
- if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
- nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
+ nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
+ if (!nouveau_cli_uvmm(cli) || internal) {
+ /* for BO noVM allocs, don't assign kinds */
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+ } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+ nvbo->comp = (tile_flags & 0x00030000) >> 16;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ nvbo->zeta = (tile_flags & 0x00000007);
}
+ nvbo->mode = tile_mode;
+
+ /* Determine the desirable target GPU page size for the buffer. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Because we cannot currently allow VMM maps to fail
+ * during buffer migration, we need to determine page
+ * size for the buffer up-front, and pre-allocate its
+ * page tables.
+ *
+ * Skip page sizes that can't support needed domains.
+ */
+ if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
+ (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
+ continue;
+ if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+ continue;
- nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
- } else
- if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- nvbo->kind = (tile_flags & 0x00007f00) >> 8;
- nvbo->comp = (tile_flags & 0x00030000) >> 16;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ /* Select this page size if it's the first that supports
+ * the potential memory domains, or when it's compatible
+ * with the requested compression settings.
+ */
+ if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
+ pi = i;
+
+ /* Stop once the buffer is larger than the current page size. */
+ if (*size >= 1ULL << vmm->page[i].shift)
+ break;
+ }
+
+ if (WARN_ON(pi < 0)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
- } else {
- nvbo->zeta = (tile_flags & 0x00000007);
- }
- nvbo->mode = tile_mode;
- nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
-
- /* Determine the desirable target GPU page size for the buffer. */
- for (i = 0; i < vmm->page_nr; i++) {
- /* Because we cannot currently allow VMM maps to fail
- * during buffer migration, we need to determine page
- * size for the buffer up-front, and pre-allocate its
- * page tables.
- *
- * Skip page sizes that can't support needed domains.
- */
- if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
- (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
- continue;
- if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
- (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
- continue;
- /* Select this page size if it's the first that supports
- * the potential memory domains, or when it's compatible
- * with the requested compression settings.
- */
- if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
- pi = i;
-
- /* Stop once the buffer is larger than the current page size. */
- if (*size >= 1ULL << vmm->page[i].shift)
- break;
- }
+ /* Disable compression if suitable settings couldn't be found. */
+ if (nvbo->comp && !vmm->page[pi].comp) {
+ if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+ nvbo->kind = mmu->kind[nvbo->kind];
+ nvbo->comp = 0;
+ }
+ nvbo->page = vmm->page[pi].shift;
+ } else {
+ /* reject other tile flags when in VM mode. */
+ if (tile_mode)
+ return ERR_PTR(-EINVAL);
+ if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
+ return ERR_PTR(-EINVAL);
- if (WARN_ON(pi < 0)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
+ /* Determine the desirable target GPU page size for the buffer. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Because we cannot currently allow VMM maps to fail
+ * during buffer migration, we need to determine page
+ * size for the buffer up-front, and pre-allocate its
+ * page tables.
+ *
+ * Skip page sizes that can't support needed domains.
+ */
+ if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
+ continue;
+ if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+ continue;
- /* Disable compression if suitable settings couldn't be found. */
- if (nvbo->comp && !vmm->page[pi].comp) {
- if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
- nvbo->kind = mmu->kind[nvbo->kind];
- nvbo->comp = 0;
+ if (pi < 0)
+ pi = i;
+ /* Stop once the buffer is larger than the current page size. */
+ if (*size >= 1ULL << vmm->page[i].shift)
+ break;
+ }
+ if (WARN_ON(pi < 0)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+ nvbo->page = vmm->page[pi].shift;
}
- nvbo->page = vmm->page[pi].shift;
nouveau_bo_fixup_align(nvbo, align, size);
@@ -306,18 +342,26 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
{
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
int ret;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ .resv = robj,
+ };
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
- ret = ttm_bo_init_validate(nvbo->bo.bdev, &nvbo->bo, type,
- &nvbo->placement, align >> PAGE_SHIFT, false,
+ ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type,
+ &nvbo->placement, align >> PAGE_SHIFT, &ctx,
sg, robj, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
+ if (!robj)
+ ttm_bo_unreserve(&nvbo->bo);
+
return 0;
}
@@ -331,7 +375,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
int ret;
nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
- tile_flags);
+ tile_flags, true);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
@@ -339,6 +383,11 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
dma_resv_init(&nvbo->bo.base._resv);
drm_vma_node_reset(&nvbo->bo.base.vma_node);
+ /* This must be called before ttm_bo_init_reserved(). Subsequent
+ * bo_move() callbacks might already iterate the GEMs GPUVA list.
+ */
+ drm_gem_gpuva_init(&nvbo->bo.base);
+
ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;
@@ -817,29 +866,39 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
mutex_lock(&cli->mutex);
else
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
+
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
- if (ret == 0) {
- ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
- if (ret == 0) {
- ret = nouveau_fence_new(chan, false, &fence);
- if (ret == 0) {
- /* TODO: figure out a better solution here
- *
- * wait on the fence here explicitly as going through
- * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
- *
- * Without this the operation can timeout and we'll fallback to a
- * software copy, which might take several minutes to finish.
- */
- nouveau_fence_wait(fence, false, false);
- ret = ttm_bo_move_accel_cleanup(bo,
- &fence->base,
- evict, false,
- new_reg);
- nouveau_fence_unref(&fence);
- }
- }
+ if (ret)
+ goto out_unlock;
+
+ ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = nouveau_fence_new(&fence);
+ if (ret)
+ goto out_unlock;
+
+ ret = nouveau_fence_emit(fence, chan);
+ if (ret) {
+ nouveau_fence_unref(&fence);
+ goto out_unlock;
}
+
+ /* TODO: figure out a better solution here
+ *
+ * wait on the fence here explicitly as going through
+ * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+ *
+ * Without this the operation can timeout and we'll fallback to a
+ * software copy, which might take several minutes to finish.
+ */
+ nouveau_fence_wait(fence, false, false);
+ ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false,
+ new_reg);
+ nouveau_fence_unref(&fence);
+
+out_unlock:
mutex_unlock(&cli->mutex);
return ret;
}
@@ -935,6 +994,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
list_for_each_entry(vma, &nvbo->vma_list, head) {
nouveau_vma_map(vma, mem);
}
+ nouveau_uvmm_bo_map_all(nvbo, mem);
} else {
list_for_each_entry(vma, &nvbo->vma_list, head) {
ret = dma_resv_wait_timeout(bo->base.resv,
@@ -943,6 +1003,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
WARN_ON(ret <= 0);
nouveau_vma_unmap(vma);
}
+ nouveau_uvmm_bo_unmap_all(nvbo);
}
if (new_reg)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 774dd93ca76b..07f671cf895e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -26,6 +26,7 @@ struct nouveau_bo {
struct list_head entry;
int pbbo_index;
bool validate_mapped;
+ bool no_share;
/* GPU address space is independent of CPU word size */
uint64_t offset;
@@ -73,7 +74,7 @@ extern struct ttm_device_funcs nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
- u32 domain, u32 tile_mode, u32 tile_flags);
+ u32 domain, u32 tile_mode, u32 tile_flags, bool internal);
int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj);
int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 3dfbc374478e..1fd5ccf41128 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -40,6 +40,14 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
+void
+nouveau_channel_kill(struct nouveau_channel *chan)
+{
+ atomic_set(&chan->killed, 1);
+ if (chan->fence)
+ nouveau_fence_context_kill(chan->fence, -ENODEV);
+}
+
static int
nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
{
@@ -47,9 +55,9 @@ nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
struct nouveau_cli *cli = (void *)chan->user.client;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
- atomic_set(&chan->killed, 1);
- if (chan->fence)
- nouveau_fence_context_kill(chan->fence, -ENODEV);
+
+ if (unlikely(!atomic_read(&chan->killed)))
+ nouveau_channel_kill(chan);
return NVIF_EVENT_DROP;
}
@@ -62,9 +70,11 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, false, &fence);
+ ret = nouveau_fence_new(&fence);
if (!ret) {
- ret = nouveau_fence_wait(fence, false, false);
+ ret = nouveau_fence_emit(fence, chan);
+ if (!ret)
+ ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
@@ -149,7 +159,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
chan->device = device;
chan->drm = drm;
- chan->vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
+ chan->vmm = nouveau_cli_vmm(cli);
atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index bad7466bd0d5..5de2ef4e98c2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -66,6 +66,7 @@ int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv,
u32 vram, u32 gart, struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
+void nouveau_channel_kill(struct nouveau_channel *);
extern int nouveau_vram_pushbuf;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 99d022a91afc..053f703f2f68 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -203,6 +203,44 @@ nouveau_debugfs_pstate_open(struct inode *inode, struct file *file)
return single_open(file, nouveau_debugfs_pstate_get, inode->i_private);
}
+static void
+nouveau_debugfs_gpuva_regions(struct seq_file *m, struct nouveau_uvmm *uvmm)
+{
+ MA_STATE(mas, &uvmm->region_mt, 0, 0);
+ struct nouveau_uvma_region *reg;
+
+ seq_puts (m, " VA regions | start | range | end \n");
+ seq_puts (m, "----------------------------------------------------------------------------\n");
+ mas_for_each(&mas, reg, ULONG_MAX)
+ seq_printf(m, " | 0x%016llx | 0x%016llx | 0x%016llx\n",
+ reg->va.addr, reg->va.range, reg->va.addr + reg->va.range);
+}
+
+static int
+nouveau_debugfs_gpuva(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+ struct nouveau_cli *cli;
+
+ mutex_lock(&drm->clients_lock);
+ list_for_each_entry(cli, &drm->clients, head) {
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+
+ if (!uvmm)
+ continue;
+
+ nouveau_uvmm_lock(uvmm);
+ drm_debugfs_gpuva_info(m, &uvmm->umgr);
+ seq_puts(m, "\n");
+ nouveau_debugfs_gpuva_regions(m, uvmm);
+ nouveau_uvmm_unlock(uvmm);
+ }
+ mutex_unlock(&drm->clients_lock);
+
+ return 0;
+}
+
static const struct file_operations nouveau_pstate_fops = {
.owner = THIS_MODULE,
.open = nouveau_debugfs_pstate_open,
@@ -214,6 +252,7 @@ static const struct file_operations nouveau_pstate_fops = {
static struct drm_info_list nouveau_debugfs_list[] = {
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
+ DRM_DEBUGFS_GPUVA_INFO(nouveau_debugfs_gpuva, NULL),
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 789857faa048..61e84562094a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -209,7 +209,8 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
goto done;
}
- nouveau_fence_new(dmem->migrate.chan, false, &fence);
+ if (!nouveau_fence_new(&fence))
+ nouveau_fence_emit(fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -402,7 +403,8 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
}
}
- nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
+ if (!nouveau_fence_new(&fence))
+ nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
@@ -675,7 +677,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE;
}
- nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
+ if (!nouveau_fence_new(&fence))
+ nouveau_fence_emit(fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8325fcf35c5e..4396f501b16a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -68,6 +68,9 @@
#include "nouveau_platform.h"
#include "nouveau_svm.h"
#include "nouveau_dmem.h"
+#include "nouveau_exec.h"
+#include "nouveau_uvmm.h"
+#include "nouveau_sched.h"
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -196,6 +199,8 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
+ nouveau_uvmm_fini(&cli->uvmm);
+ nouveau_sched_entity_fini(&cli->sched_entity);
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
@@ -301,6 +306,12 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
}
cli->mem = &mems[ret];
+
+ ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
+ drm->sched_wq);
+ if (ret)
+ goto done;
+
return 0;
done:
if (ret)
@@ -568,10 +579,14 @@ nouveau_drm_device_init(struct drm_device *dev)
nvif_parent_ctor(&nouveau_parent, &drm->parent);
drm->master.base.object.parent = &drm->parent;
- ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
+ ret = nouveau_sched_init(drm);
if (ret)
goto fail_alloc;
+ ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
+ if (ret)
+ goto fail_sched;
+
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
goto fail_master;
@@ -628,7 +643,6 @@ nouveau_drm_device_init(struct drm_device *dev)
}
return 0;
-
fail_dispinit:
nouveau_display_destroy(dev);
fail_dispctor:
@@ -641,6 +655,8 @@ fail_ttm:
nouveau_cli_fini(&drm->client);
fail_master:
nouveau_cli_fini(&drm->master);
+fail_sched:
+ nouveau_sched_fini(drm);
fail_alloc:
nvif_parent_dtor(&drm->parent);
kfree(drm);
@@ -692,6 +708,8 @@ nouveau_drm_device_fini(struct drm_device *dev)
}
mutex_unlock(&drm->clients_lock);
+ nouveau_sched_fini(drm);
+
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
nvif_parent_dtor(&drm->parent);
@@ -1193,6 +1211,9 @@ nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_VM_INIT, nouveau_uvmm_ioctl_vm_init, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_VM_BIND, nouveau_uvmm_ioctl_vm_bind, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW),
};
long
@@ -1240,6 +1261,8 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
.driver_features = DRIVER_GEM |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE |
+ DRIVER_GEM_GPUVA |
DRIVER_MODESET |
DRIVER_RENDER,
.open = nouveau_drm_open,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index b5de312a523f..1fe17ff95f5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -10,8 +10,8 @@
#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 3
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_MINOR 4
+#define DRIVER_PATCHLEVEL 0
/*
* 1.1.1:
@@ -63,7 +63,9 @@ struct platform_device;
#include "nouveau_fence.h"
#include "nouveau_bios.h"
+#include "nouveau_sched.h"
#include "nouveau_vmm.h"
+#include "nouveau_uvmm.h"
struct nouveau_drm_tile {
struct nouveau_fence *fence;
@@ -91,6 +93,10 @@ struct nouveau_cli {
struct nvif_mmu mmu;
struct nouveau_vmm vmm;
struct nouveau_vmm svm;
+ struct nouveau_uvmm uvmm;
+
+ struct nouveau_sched_entity sched_entity;
+
const struct nvif_mclass *mem;
struct list_head head;
@@ -112,6 +118,59 @@ struct nouveau_cli_work {
struct dma_fence_cb cb;
};
+static inline struct nouveau_uvmm *
+nouveau_cli_uvmm(struct nouveau_cli *cli)
+{
+ if (!cli || !cli->uvmm.vmm.cli)
+ return NULL;
+
+ return &cli->uvmm;
+}
+
+static inline struct nouveau_uvmm *
+nouveau_cli_uvmm_locked(struct nouveau_cli *cli)
+{
+ struct nouveau_uvmm *uvmm;
+
+ mutex_lock(&cli->mutex);
+ uvmm = nouveau_cli_uvmm(cli);
+ mutex_unlock(&cli->mutex);
+
+ return uvmm;
+}
+
+static inline struct nouveau_vmm *
+nouveau_cli_vmm(struct nouveau_cli *cli)
+{
+ struct nouveau_uvmm *uvmm;
+
+ uvmm = nouveau_cli_uvmm(cli);
+ if (uvmm)
+ return &uvmm->vmm;
+
+ if (cli->svm.cli)
+ return &cli->svm;
+
+ return &cli->vmm;
+}
+
+static inline void
+__nouveau_cli_disable_uvmm_noinit(struct nouveau_cli *cli)
+{
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+
+ if (!uvmm)
+ cli->uvmm.disabled = true;
+}
+
+static inline void
+nouveau_cli_disable_uvmm_noinit(struct nouveau_cli *cli)
+{
+ mutex_lock(&cli->mutex);
+ __nouveau_cli_disable_uvmm_noinit(cli);
+ mutex_unlock(&cli->mutex);
+}
+
void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *,
struct nouveau_cli_work *);
@@ -121,6 +180,32 @@ nouveau_cli(struct drm_file *fpriv)
return fpriv ? fpriv->driver_priv : NULL;
}
+static inline void
+u_free(void *addr)
+{
+ kvfree(addr);
+}
+
+static inline void *
+u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
+{
+ void *mem;
+ void __user *userptr = (void __force __user *)(uintptr_t)user;
+
+ size *= nmemb;
+
+ mem = kvmalloc(size, GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(mem, userptr, size)) {
+ u_free(mem);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return mem;
+}
+
#include <nvif/object.h>
#include <nvif/parent.h>
@@ -222,6 +307,10 @@ struct nouveau_drm {
struct mutex lock;
bool component_registered;
} audio;
+
+ struct drm_gpu_scheduler sched;
+ struct workqueue_struct *sched_wq;
+
};
static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
new file mode 100644
index 000000000000..0f927adda4ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_exec.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_dma.h"
+#include "nouveau_exec.h"
+#include "nouveau_abi16.h"
+#include "nouveau_chan.h"
+#include "nouveau_sched.h"
+#include "nouveau_uvmm.h"
+
+/**
+ * DOC: Overview
+ *
+ * Nouveau's VM_BIND / EXEC UAPI consists of three ioctls: DRM_NOUVEAU_VM_INIT,
+ * DRM_NOUVEAU_VM_BIND and DRM_NOUVEAU_EXEC.
+ *
+ * In order to use the UAPI firstly a user client must initialize the VA space
+ * using the DRM_NOUVEAU_VM_INIT ioctl specifying which region of the VA space
+ * should be managed by the kernel and which by the UMD.
+ *
+ * The DRM_NOUVEAU_VM_BIND ioctl provides clients an interface to manage the
+ * userspace-managable portion of the VA space. It provides operations to map
+ * and unmap memory. Mappings may be flagged as sparse. Sparse mappings are not
+ * backed by a GEM object and the kernel will ignore GEM handles provided
+ * alongside a sparse mapping.
+ *
+ * Userspace may request memory backed mappings either within or outside of the
+ * bounds (but not crossing those bounds) of a previously mapped sparse
+ * mapping. Subsequently requested memory backed mappings within a sparse
+ * mapping will take precedence over the corresponding range of the sparse
+ * mapping. If such memory backed mappings are unmapped the kernel will make
+ * sure that the corresponding sparse mapping will take their place again.
+ * Requests to unmap a sparse mapping that still contains memory backed mappings
+ * will result in those memory backed mappings being unmapped first.
+ *
+ * Unmap requests are not bound to the range of existing mappings and can even
+ * overlap the bounds of sparse mappings. For such a request the kernel will
+ * make sure to unmap all memory backed mappings within the given range,
+ * splitting up memory backed mappings which are only partially contained
+ * within the given range. Unmap requests with the sparse flag set must match
+ * the range of a previously mapped sparse mapping exactly though.
+ *
+ * While the kernel generally permits arbitrary sequences and ranges of memory
+ * backed mappings being mapped and unmapped, either within a single or multiple
+ * VM_BIND ioctl calls, there are some restrictions for sparse mappings.
+ *
+ * The kernel does not permit to:
+ * - unmap non-existent sparse mappings
+ * - unmap a sparse mapping and map a new sparse mapping overlapping the range
+ * of the previously unmapped sparse mapping within the same VM_BIND ioctl
+ * - unmap a sparse mapping and map new memory backed mappings overlapping the
+ * range of the previously unmapped sparse mapping within the same VM_BIND
+ * ioctl
+ *
+ * When using the VM_BIND ioctl to request the kernel to map memory to a given
+ * virtual address in the GPU's VA space there is no guarantee that the actual
+ * mappings are created in the GPU's MMU. If the given memory is swapped out
+ * at the time the bind operation is executed the kernel will stash the mapping
+ * details into it's internal alloctor and create the actual MMU mappings once
+ * the memory is swapped back in. While this is transparent for userspace, it is
+ * guaranteed that all the backing memory is swapped back in and all the memory
+ * mappings, as requested by userspace previously, are actually mapped once the
+ * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
+ *
+ * A VM_BIND job can be executed either synchronously or asynchronously. If
+ * exectued asynchronously, userspace may provide a list of syncobjs this job
+ * will wait for and/or a list of syncobj the kernel will signal once the
+ * VM_BIND job finished execution. If executed synchronously the ioctl will
+ * block until the bind job is finished. For synchronous jobs the kernel will
+ * not permit any syncobjs submitted to the kernel.
+ *
+ * To execute a push buffer the UAPI provides the DRM_NOUVEAU_EXEC ioctl. EXEC
+ * jobs are always executed asynchronously, and, equal to VM_BIND jobs, provide
+ * the option to synchronize them with syncobjs.
+ *
+ * Besides that, EXEC jobs can be scheduled for a specified channel to execute on.
+ *
+ * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
+ * an up to date view of the VA space. However, the actual mappings might still
+ * be pending. Hence, EXEC jobs require to have the particular fences - of
+ * the corresponding VM_BIND jobs they depent on - attached to them.
+ */
+
+static int
+nouveau_exec_job_submit(struct nouveau_job *job)
+{
+ struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+ struct nouveau_cli *cli = job->cli;
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+ struct drm_exec *exec = &job->exec;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ ret = nouveau_fence_new(&exec_job->fence);
+ if (ret)
+ return ret;
+
+ nouveau_uvmm_lock(uvmm);
+ drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_until_all_locked(exec) {
+ struct drm_gpuva *va;
+
+ drm_gpuva_for_each_va(va, &uvmm->umgr) {
+ if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+ continue;
+
+ ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ goto err_uvmm_unlock;
+ }
+ }
+ nouveau_uvmm_unlock(uvmm);
+
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+
+ ret = nouveau_bo_validate(nvbo, true, false);
+ if (ret)
+ goto err_exec_fini;
+ }
+
+ return 0;
+
+err_uvmm_unlock:
+ nouveau_uvmm_unlock(uvmm);
+err_exec_fini:
+ drm_exec_fini(exec);
+ return ret;
+
+}
+
+static void
+nouveau_exec_job_armed_submit(struct nouveau_job *job)
+{
+ struct drm_exec *exec = &job->exec;
+ struct drm_gem_object *obj;
+ unsigned long index;
+
+ drm_exec_for_each_locked_object(exec, index, obj)
+ dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
+
+ drm_exec_fini(exec);
+}
+
+static struct dma_fence *
+nouveau_exec_job_run(struct nouveau_job *job)
+{
+ struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+ struct nouveau_channel *chan = exec_job->chan;
+ struct nouveau_fence *fence = exec_job->fence;
+ int i, ret;
+
+ ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
+ if (ret) {
+ NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ for (i = 0; i < exec_job->push.count; i++) {
+ nv50_dma_push(chan, exec_job->push.s[i].va,
+ exec_job->push.s[i].va_len);
+ }
+
+ ret = nouveau_fence_emit(fence, chan);
+ if (ret) {
+ NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
+ WIND_RING(chan);
+ return ERR_PTR(ret);
+ }
+
+ exec_job->fence = NULL;
+
+ return &fence->base;
+}
+
+static void
+nouveau_exec_job_free(struct nouveau_job *job)
+{
+ struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+
+ nouveau_job_free(job);
+
+ nouveau_fence_unref(&exec_job->fence);
+ kfree(exec_job->push.s);
+ kfree(exec_job);
+}
+
+static enum drm_gpu_sched_stat
+nouveau_exec_job_timeout(struct nouveau_job *job)
+{
+ struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+ struct nouveau_channel *chan = exec_job->chan;
+
+ if (unlikely(!atomic_read(&chan->killed)))
+ nouveau_channel_kill(chan);
+
+ NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
+ chan->chid);
+
+ nouveau_sched_entity_fini(job->entity);
+
+ return DRM_GPU_SCHED_STAT_ENODEV;
+}
+
+static struct nouveau_job_ops nouveau_exec_job_ops = {
+ .submit = nouveau_exec_job_submit,
+ .armed_submit = nouveau_exec_job_armed_submit,
+ .run = nouveau_exec_job_run,
+ .free = nouveau_exec_job_free,
+ .timeout = nouveau_exec_job_timeout,
+};
+
+int
+nouveau_exec_job_init(struct nouveau_exec_job **pjob,
+ struct nouveau_exec_job_args *__args)
+{
+ struct nouveau_exec_job *job;
+ struct nouveau_job_args args = {};
+ int ret;
+
+ job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ job->push.count = __args->push.count;
+ if (__args->push.count) {
+ job->push.s = kmemdup(__args->push.s,
+ sizeof(*__args->push.s) *
+ __args->push.count,
+ GFP_KERNEL);
+ if (!job->push.s) {
+ ret = -ENOMEM;
+ goto err_free_job;
+ }
+ }
+
+ job->chan = __args->chan;
+
+ args.sched_entity = __args->sched_entity;
+ args.file_priv = __args->file_priv;
+
+ args.in_sync.count = __args->in_sync.count;
+ args.in_sync.s = __args->in_sync.s;
+
+ args.out_sync.count = __args->out_sync.count;
+ args.out_sync.s = __args->out_sync.s;
+
+ args.ops = &nouveau_exec_job_ops;
+ args.resv_usage = DMA_RESV_USAGE_WRITE;
+
+ ret = nouveau_job_init(&job->base, &args);
+ if (ret)
+ goto err_free_pushs;
+
+ return 0;
+
+err_free_pushs:
+ kfree(job->push.s);
+err_free_job:
+ kfree(job);
+ *pjob = NULL;
+
+ return ret;
+}
+
+static int
+nouveau_exec(struct nouveau_exec_job_args *args)
+{
+ struct nouveau_exec_job *job;
+ int ret;
+
+ ret = nouveau_exec_job_init(&job, args);
+ if (ret)
+ return ret;
+
+ ret = nouveau_job_submit(&job->base);
+ if (ret)
+ goto err_job_fini;
+
+ return 0;
+
+err_job_fini:
+ nouveau_job_fini(&job->base);
+ return ret;
+}
+
+static int
+nouveau_exec_ucopy(struct nouveau_exec_job_args *args,
+ struct drm_nouveau_exec *req)
+{
+ struct drm_nouveau_sync **s;
+ u32 inc = req->wait_count;
+ u64 ins = req->wait_ptr;
+ u32 outc = req->sig_count;
+ u64 outs = req->sig_ptr;
+ u32 pushc = req->push_count;
+ u64 pushs = req->push_ptr;
+ int ret;
+
+ if (pushc) {
+ args->push.count = pushc;
+ args->push.s = u_memcpya(pushs, pushc, sizeof(*args->push.s));
+ if (IS_ERR(args->push.s))
+ return PTR_ERR(args->push.s);
+ }
+
+ if (inc) {
+ s = &args->in_sync.s;
+
+ args->in_sync.count = inc;
+ *s = u_memcpya(ins, inc, sizeof(**s));
+ if (IS_ERR(*s)) {
+ ret = PTR_ERR(*s);
+ goto err_free_pushs;
+ }
+ }
+
+ if (outc) {
+ s = &args->out_sync.s;
+
+ args->out_sync.count = outc;
+ *s = u_memcpya(outs, outc, sizeof(**s));
+ if (IS_ERR(*s)) {
+ ret = PTR_ERR(*s);
+ goto err_free_ins;
+ }
+ }
+
+ return 0;
+
+err_free_pushs:
+ u_free(args->push.s);
+err_free_ins:
+ u_free(args->in_sync.s);
+ return ret;
+}
+
+static void
+nouveau_exec_ufree(struct nouveau_exec_job_args *args)
+{
+ u_free(args->push.s);
+ u_free(args->in_sync.s);
+ u_free(args->out_sync.s);
+}
+
+int
+nouveau_exec_ioctl_exec(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct nouveau_abi16_chan *chan16;
+ struct nouveau_channel *chan = NULL;
+ struct nouveau_exec_job_args args = {};
+ struct drm_nouveau_exec *req = data;
+ int ret = 0;
+
+ if (unlikely(!abi16))
+ return -ENOMEM;
+
+ /* abi16 locks already */
+ if (unlikely(!nouveau_cli_uvmm(cli)))
+ return nouveau_abi16_put(abi16, -ENOSYS);
+
+ list_for_each_entry(chan16, &abi16->channels, head) {
+ if (chan16->chan->chid == req->channel) {
+ chan = chan16->chan;
+ break;
+ }
+ }
+
+ if (!chan)
+ return nouveau_abi16_put(abi16, -ENOENT);
+
+ if (unlikely(atomic_read(&chan->killed)))
+ return nouveau_abi16_put(abi16, -ENODEV);
+
+ if (!chan->dma.ib_max)
+ return nouveau_abi16_put(abi16, -ENOSYS);
+
+ if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) {
+ NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
+ req->push_count, NOUVEAU_GEM_MAX_PUSH);
+ return nouveau_abi16_put(abi16, -EINVAL);
+ }
+
+ ret = nouveau_exec_ucopy(&args, req);
+ if (ret)
+ goto out;
+
+ args.sched_entity = &chan16->sched_entity;
+ args.file_priv = file_priv;
+ args.chan = chan;
+
+ ret = nouveau_exec(&args);
+ if (ret)
+ goto out_free_args;
+
+out_free_args:
+ nouveau_exec_ufree(&args);
+out:
+ return nouveau_abi16_put(abi16, ret);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h
new file mode 100644
index 000000000000..778cacd90f65
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __NOUVEAU_EXEC_H__
+#define __NOUVEAU_EXEC_H__
+
+#include <drm/drm_exec.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_sched.h"
+
+struct nouveau_exec_job_args {
+ struct drm_file *file_priv;
+ struct nouveau_sched_entity *sched_entity;
+
+ struct drm_exec exec;
+ struct nouveau_channel *chan;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } in_sync;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } out_sync;
+
+ struct {
+ struct drm_nouveau_exec_push *s;
+ u32 count;
+ } push;
+};
+
+struct nouveau_exec_job {
+ struct nouveau_job base;
+ struct nouveau_fence *fence;
+ struct nouveau_channel *chan;
+
+ struct {
+ struct drm_nouveau_exec_push *s;
+ u32 count;
+ } push;
+};
+
+#define to_nouveau_exec_job(job) \
+ container_of((job), struct nouveau_exec_job, base)
+
+int nouveau_exec_job_init(struct nouveau_exec_job **job,
+ struct nouveau_exec_job_args *args);
+
+int nouveau_exec_ioctl_exec(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ee5e9d40c166..77c739a55b19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -96,6 +96,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
if (nouveau_fence_signal(fence))
nvif_event_block(&fctx->event);
}
+ fctx->killed = 1;
spin_unlock_irqrestore(&fctx->lock, flags);
}
@@ -210,6 +211,9 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
+ if (unlikely(!chan->fence))
+ return -ENODEV;
+
fence->channel = chan;
fence->timeout = jiffies + (15 * HZ);
@@ -226,6 +230,12 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
dma_fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
+ if (unlikely(fctx->killed)) {
+ spin_unlock_irq(&fctx->lock);
+ dma_fence_put(&fence->base);
+ return -ENODEV;
+ }
+
if (nouveau_fence_update(chan, fctx))
nvif_event_block(&fctx->event);
@@ -396,25 +406,16 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
}
int
-nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
- struct nouveau_fence **pfence)
+nouveau_fence_new(struct nouveau_fence **pfence)
{
struct nouveau_fence *fence;
- int ret = 0;
-
- if (unlikely(!chan->fence))
- return -ENODEV;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
- ret = nouveau_fence_emit(fence, chan);
- if (ret)
- nouveau_fence_unref(&fence);
-
*pfence = fence;
- return ret;
+ return 0;
}
static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 0ca2bc85adf6..2c72d96ef17d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -17,8 +17,7 @@ struct nouveau_fence {
unsigned long timeout;
};
-int nouveau_fence_new(struct nouveau_channel *, bool sysmem,
- struct nouveau_fence **);
+int nouveau_fence_new(struct nouveau_fence **);
void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
@@ -45,7 +44,7 @@ struct nouveau_fence_chan {
char name[32];
struct nvif_event event;
- int notify_ref, dead;
+ int notify_ref, dead, killed;
};
struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ab9062e50977..f39360870c70 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -103,13 +103,17 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
- struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
int ret;
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
+ if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv)
+ return -EPERM;
+
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return ret;
@@ -120,7 +124,11 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
- ret = nouveau_vma_new(nvbo, vmm, &vma);
+ /* only create a VMA on binding */
+ if (!nouveau_cli_uvmm(cli))
+ ret = nouveau_vma_new(nvbo, vmm, &vma);
+ else
+ ret = 0;
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out:
@@ -180,13 +188,16 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
- struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
int ret;
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return;
+ if (nouveau_cli_uvmm(cli))
+ return;
+
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return;
@@ -209,6 +220,7 @@ const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
.free = nouveau_gem_object_del,
.open = nouveau_gem_object_open,
.close = nouveau_gem_object_close,
+ .export = nouveau_gem_prime_export,
.pin = nouveau_gem_prime_pin,
.unpin = nouveau_gem_prime_unpin,
.get_sg_table = nouveau_gem_prime_get_sg_table,
@@ -224,18 +236,28 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
struct nouveau_bo **pnvbo)
{
struct nouveau_drm *drm = cli->drm;
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+ struct dma_resv *resv = NULL;
struct nouveau_bo *nvbo;
int ret;
+ if (domain & NOUVEAU_GEM_DOMAIN_NO_SHARE) {
+ if (unlikely(!uvmm))
+ return -EINVAL;
+
+ resv = &uvmm->resv;
+ }
+
if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
domain |= NOUVEAU_GEM_DOMAIN_CPU;
nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
- tile_flags);
+ tile_flags, false);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
+ nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE;
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
@@ -246,7 +268,14 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
return ret;
}
- ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
+ if (resv)
+ dma_resv_lock(resv, NULL);
+
+ ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
+
+ if (resv)
+ dma_resv_unlock(resv);
+
if (ret)
return ret;
@@ -269,7 +298,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
- struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
if (is_power_of_2(nvbo->valid_domains))
@@ -279,13 +308,15 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->offset;
- if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+ if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50 &&
+ !nouveau_cli_uvmm(cli)) {
vma = nouveau_vma_find(nvbo, vmm);
if (!vma)
return -EINVAL;
rep->offset = vma->addr;
- }
+ } else
+ rep->offset = 0;
rep->size = nvbo->bo.base.size;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
@@ -310,6 +341,11 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo = NULL;
int ret = 0;
+ /* If uvmm wasn't initialized until now disable it completely to prevent
+ * userspace from mixing up UAPIs.
+ */
+ nouveau_cli_disable_uvmm_noinit(cli);
+
ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
@@ -613,32 +649,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return 0;
}
-static inline void
-u_free(void *addr)
-{
- kvfree(addr);
-}
-
-static inline void *
-u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
-{
- void *mem;
- void __user *userptr = (void __force __user *)(uintptr_t)user;
-
- size *= nmemb;
-
- mem = kvmalloc(size, GFP_KERNEL);
- if (!mem)
- return ERR_PTR(-ENOMEM);
-
- if (copy_from_user(mem, userptr, size)) {
- u_free(mem);
- return ERR_PTR(-EFAULT);
- }
-
- return mem;
-}
-
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
@@ -747,6 +757,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (unlikely(!abi16))
return -ENOMEM;
+ if (unlikely(nouveau_cli_uvmm(cli)))
+ return -ENOSYS;
+
list_for_each_entry(temp, &abi16->channels, head) {
if (temp->chan->chid == req->channel) {
chan = temp->chan;
@@ -899,8 +912,11 @@ revalidate:
}
}
- ret = nouveau_fence_new(chan, false, &fence);
+ ret = nouveau_fence_new(&fence);
+ if (!ret)
+ ret = nouveau_fence_emit(fence, chan);
if (ret) {
+ nouveau_fence_unref(&fence);
NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 3b919c7c931c..10814d446435 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -37,5 +37,6 @@ extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
struct drm_device *, struct dma_buf_attachment *, struct sg_table *);
-
+struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
+ int flags);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 76c86d8bb01e..5365a3d3a17f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -35,4 +35,9 @@ int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
+int
+nouveau_mem_map_fixed(struct nouveau_mem *mem,
+ struct nvif_vmm *vmm,
+ u8 kind, u64 addr,
+ u64 offset, u64 range);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index f42c2b1b0363..1b2ff0c40fc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -50,7 +50,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
dma_resv_lock(robj, NULL);
nvbo = nouveau_bo_alloc(&drm->client, &size, &align,
- NOUVEAU_GEM_DOMAIN_GART, 0, 0);
+ NOUVEAU_GEM_DOMAIN_GART, 0, 0, true);
if (IS_ERR(nvbo)) {
obj = ERR_CAST(nvbo);
goto unlock;
@@ -102,3 +102,14 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
nouveau_bo_unpin(nvbo);
}
+
+struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
+ int flags)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(gobj);
+
+ if (nvbo->no_share)
+ return ERR_PTR(-EPERM);
+
+ return drm_gem_prime_export(gobj, flags);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
new file mode 100644
index 000000000000..3424a1bf6af3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: MIT
+
+#include <linux/slab.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/drm_syncobj.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_dma.h"
+#include "nouveau_exec.h"
+#include "nouveau_abi16.h"
+#include "nouveau_sched.h"
+
+/* FIXME
+ *
+ * We want to make sure that jobs currently executing can't be deferred by
+ * other jobs competing for the hardware. Otherwise we might end up with job
+ * timeouts just because of too many clients submitting too many jobs. We don't
+ * want jobs to time out because of system load, but because of the job being
+ * too bulky.
+ *
+ * For now allow for up to 16 concurrent jobs in flight until we know how many
+ * rings the hardware can process in parallel.
+ */
+#define NOUVEAU_SCHED_HW_SUBMISSIONS 16
+#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
+
+int
+nouveau_job_init(struct nouveau_job *job,
+ struct nouveau_job_args *args)
+{
+ struct nouveau_sched_entity *entity = args->sched_entity;
+ int ret;
+
+ job->file_priv = args->file_priv;
+ job->cli = nouveau_cli(args->file_priv);
+ job->entity = entity;
+
+ job->sync = args->sync;
+ job->resv_usage = args->resv_usage;
+
+ job->ops = args->ops;
+
+ job->in_sync.count = args->in_sync.count;
+ if (job->in_sync.count) {
+ if (job->sync)
+ return -EINVAL;
+
+ job->in_sync.data = kmemdup(args->in_sync.s,
+ sizeof(*args->in_sync.s) *
+ args->in_sync.count,
+ GFP_KERNEL);
+ if (!job->in_sync.data)
+ return -ENOMEM;
+ }
+
+ job->out_sync.count = args->out_sync.count;
+ if (job->out_sync.count) {
+ if (job->sync) {
+ ret = -EINVAL;
+ goto err_free_in_sync;
+ }
+
+ job->out_sync.data = kmemdup(args->out_sync.s,
+ sizeof(*args->out_sync.s) *
+ args->out_sync.count,
+ GFP_KERNEL);
+ if (!job->out_sync.data) {
+ ret = -ENOMEM;
+ goto err_free_in_sync;
+ }
+
+ job->out_sync.objs = kcalloc(job->out_sync.count,
+ sizeof(*job->out_sync.objs),
+ GFP_KERNEL);
+ if (!job->out_sync.objs) {
+ ret = -ENOMEM;
+ goto err_free_out_sync;
+ }
+
+ job->out_sync.chains = kcalloc(job->out_sync.count,
+ sizeof(*job->out_sync.chains),
+ GFP_KERNEL);
+ if (!job->out_sync.chains) {
+ ret = -ENOMEM;
+ goto err_free_objs;
+ }
+
+ }
+
+ ret = drm_sched_job_init(&job->base, &entity->base, NULL);
+ if (ret)
+ goto err_free_chains;
+
+ job->state = NOUVEAU_JOB_INITIALIZED;
+
+ return 0;
+
+err_free_chains:
+ kfree(job->out_sync.chains);
+err_free_objs:
+ kfree(job->out_sync.objs);
+err_free_out_sync:
+ kfree(job->out_sync.data);
+err_free_in_sync:
+ kfree(job->in_sync.data);
+return ret;
+}
+
+void
+nouveau_job_free(struct nouveau_job *job)
+{
+ kfree(job->in_sync.data);
+ kfree(job->out_sync.data);
+ kfree(job->out_sync.objs);
+ kfree(job->out_sync.chains);
+}
+
+void nouveau_job_fini(struct nouveau_job *job)
+{
+ dma_fence_put(job->done_fence);
+ drm_sched_job_cleanup(&job->base);
+ job->ops->free(job);
+}
+
+static int
+sync_find_fence(struct nouveau_job *job,
+ struct drm_nouveau_sync *sync,
+ struct dma_fence **fence)
+{
+ u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
+ u64 point = 0;
+ int ret;
+
+ if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
+ stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
+ return -EOPNOTSUPP;
+
+ if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
+ point = sync->timeline_value;
+
+ ret = drm_syncobj_find_fence(job->file_priv,
+ sync->handle, point,
+ 0 /* flags */, fence);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+nouveau_job_add_deps(struct nouveau_job *job)
+{
+ struct dma_fence *in_fence = NULL;
+ int ret, i;
+
+ for (i = 0; i < job->in_sync.count; i++) {
+ struct drm_nouveau_sync *sync = &job->in_sync.data[i];
+
+ ret = sync_find_fence(job, sync, &in_fence);
+ if (ret) {
+ NV_PRINTK(warn, job->cli,
+ "Failed to find syncobj (-> in): handle=%d\n",
+ sync->handle);
+ return ret;
+ }
+
+ ret = drm_sched_job_add_dependency(&job->base, in_fence);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+nouveau_job_fence_attach_cleanup(struct nouveau_job *job)
+{
+ int i;
+
+ for (i = 0; i < job->out_sync.count; i++) {
+ struct drm_syncobj *obj = job->out_sync.objs[i];
+ struct dma_fence_chain *chain = job->out_sync.chains[i];
+
+ if (obj)
+ drm_syncobj_put(obj);
+
+ if (chain)
+ dma_fence_chain_free(chain);
+ }
+}
+
+static int
+nouveau_job_fence_attach_prepare(struct nouveau_job *job)
+{
+ int i, ret;
+
+ for (i = 0; i < job->out_sync.count; i++) {
+ struct drm_nouveau_sync *sync = &job->out_sync.data[i];
+ struct drm_syncobj **pobj = &job->out_sync.objs[i];
+ struct dma_fence_chain **pchain = &job->out_sync.chains[i];
+ u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
+
+ if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
+ stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
+ ret = -EINVAL;
+ goto err_sync_cleanup;
+ }
+
+ *pobj = drm_syncobj_find(job->file_priv, sync->handle);
+ if (!*pobj) {
+ NV_PRINTK(warn, job->cli,
+ "Failed to find syncobj (-> out): handle=%d\n",
+ sync->handle);
+ ret = -ENOENT;
+ goto err_sync_cleanup;
+ }
+
+ if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
+ *pchain = dma_fence_chain_alloc();
+ if (!*pchain) {
+ ret = -ENOMEM;
+ goto err_sync_cleanup;
+ }
+ }
+ }
+
+ return 0;
+
+err_sync_cleanup:
+ nouveau_job_fence_attach_cleanup(job);
+ return ret;
+}
+
+static void
+nouveau_job_fence_attach(struct nouveau_job *job)
+{
+ struct dma_fence *fence = job->done_fence;
+ int i;
+
+ for (i = 0; i < job->out_sync.count; i++) {
+ struct drm_nouveau_sync *sync = &job->out_sync.data[i];
+ struct drm_syncobj **pobj = &job->out_sync.objs[i];
+ struct dma_fence_chain **pchain = &job->out_sync.chains[i];
+ u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
+
+ if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
+ drm_syncobj_add_point(*pobj, *pchain, fence,
+ sync->timeline_value);
+ } else {
+ drm_syncobj_replace_fence(*pobj, fence);
+ }
+
+ drm_syncobj_put(*pobj);
+ *pobj = NULL;
+ *pchain = NULL;
+ }
+}
+
+int
+nouveau_job_submit(struct nouveau_job *job)
+{
+ struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
+ struct dma_fence *done_fence = NULL;
+ int ret;
+
+ ret = nouveau_job_add_deps(job);
+ if (ret)
+ goto err;
+
+ ret = nouveau_job_fence_attach_prepare(job);
+ if (ret)
+ goto err;
+
+ /* Make sure the job appears on the sched_entity's queue in the same
+ * order as it was submitted.
+ */
+ mutex_lock(&entity->mutex);
+
+ /* Guarantee we won't fail after the submit() callback returned
+ * successfully.
+ */
+ if (job->ops->submit) {
+ ret = job->ops->submit(job);
+ if (ret)
+ goto err_cleanup;
+ }
+
+ drm_sched_job_arm(&job->base);
+ job->done_fence = dma_fence_get(&job->base.s_fence->finished);
+ if (job->sync)
+ done_fence = dma_fence_get(job->done_fence);
+
+ if (job->ops->armed_submit)
+ job->ops->armed_submit(job);
+
+ nouveau_job_fence_attach(job);
+
+ /* Set job state before pushing the job to the scheduler,
+ * such that we do not overwrite the job state set in run().
+ */
+ job->state = NOUVEAU_JOB_SUBMIT_SUCCESS;
+
+ drm_sched_entity_push_job(&job->base);
+
+ mutex_unlock(&entity->mutex);
+
+ if (done_fence) {
+ dma_fence_wait(done_fence, true);
+ dma_fence_put(done_fence);
+ }
+
+ return 0;
+
+err_cleanup:
+ mutex_unlock(&entity->mutex);
+ nouveau_job_fence_attach_cleanup(job);
+err:
+ job->state = NOUVEAU_JOB_SUBMIT_FAILED;
+ return ret;
+}
+
+bool
+nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
+ struct work_struct *work)
+{
+ return queue_work(entity->sched_wq, work);
+}
+
+static struct dma_fence *
+nouveau_job_run(struct nouveau_job *job)
+{
+ struct dma_fence *fence;
+
+ fence = job->ops->run(job);
+ if (IS_ERR(fence))
+ job->state = NOUVEAU_JOB_RUN_FAILED;
+ else
+ job->state = NOUVEAU_JOB_RUN_SUCCESS;
+
+ return fence;
+}
+
+static struct dma_fence *
+nouveau_sched_run_job(struct drm_sched_job *sched_job)
+{
+ struct nouveau_job *job = to_nouveau_job(sched_job);
+
+ return nouveau_job_run(job);
+}
+
+static enum drm_gpu_sched_stat
+nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct nouveau_job *job = to_nouveau_job(sched_job);
+
+ NV_PRINTK(warn, job->cli, "Job timed out.\n");
+
+ if (job->ops->timeout)
+ return job->ops->timeout(job);
+
+ return DRM_GPU_SCHED_STAT_ENODEV;
+}
+
+static void
+nouveau_sched_free_job(struct drm_sched_job *sched_job)
+{
+ struct nouveau_job *job = to_nouveau_job(sched_job);
+
+ nouveau_job_fini(job);
+}
+
+int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
+ struct drm_gpu_scheduler *sched,
+ struct workqueue_struct *sched_wq)
+{
+ mutex_init(&entity->mutex);
+ spin_lock_init(&entity->job.list.lock);
+ INIT_LIST_HEAD(&entity->job.list.head);
+ init_waitqueue_head(&entity->job.wq);
+
+ entity->sched_wq = sched_wq;
+ return drm_sched_entity_init(&entity->base,
+ DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+}
+
+void
+nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
+{
+ drm_sched_entity_destroy(&entity->base);
+}
+
+static const struct drm_sched_backend_ops nouveau_sched_ops = {
+ .run_job = nouveau_sched_run_job,
+ .timedout_job = nouveau_sched_timedout_job,
+ .free_job = nouveau_sched_free_job,
+};
+
+int nouveau_sched_init(struct nouveau_drm *drm)
+{
+ struct drm_gpu_scheduler *sched = &drm->sched;
+ long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+
+ drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
+ if (!drm->sched_wq)
+ return -ENOMEM;
+
+ return drm_sched_init(sched, &nouveau_sched_ops,
+ NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
+ NULL, NULL, "nouveau_sched", drm->dev->dev);
+}
+
+void nouveau_sched_fini(struct nouveau_drm *drm)
+{
+ destroy_workqueue(drm->sched_wq);
+ drm_sched_fini(&drm->sched);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
new file mode 100644
index 000000000000..27ac19792597
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef NOUVEAU_SCHED_H
+#define NOUVEAU_SCHED_H
+
+#include <linux/types.h>
+
+#include <drm/drm_exec.h>
+#include <drm/gpu_scheduler.h>
+
+#include "nouveau_drv.h"
+
+#define to_nouveau_job(sched_job) \
+ container_of((sched_job), struct nouveau_job, base)
+
+struct nouveau_job_ops;
+
+enum nouveau_job_state {
+ NOUVEAU_JOB_UNINITIALIZED = 0,
+ NOUVEAU_JOB_INITIALIZED,
+ NOUVEAU_JOB_SUBMIT_SUCCESS,
+ NOUVEAU_JOB_SUBMIT_FAILED,
+ NOUVEAU_JOB_RUN_SUCCESS,
+ NOUVEAU_JOB_RUN_FAILED,
+};
+
+struct nouveau_job_args {
+ struct drm_file *file_priv;
+ struct nouveau_sched_entity *sched_entity;
+
+ enum dma_resv_usage resv_usage;
+ bool sync;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } in_sync;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } out_sync;
+
+ struct nouveau_job_ops *ops;
+};
+
+struct nouveau_job {
+ struct drm_sched_job base;
+
+ enum nouveau_job_state state;
+
+ struct nouveau_sched_entity *entity;
+
+ struct drm_file *file_priv;
+ struct nouveau_cli *cli;
+
+ struct drm_exec exec;
+ enum dma_resv_usage resv_usage;
+ struct dma_fence *done_fence;
+
+ bool sync;
+
+ struct {
+ struct drm_nouveau_sync *data;
+ u32 count;
+ } in_sync;
+
+ struct {
+ struct drm_nouveau_sync *data;
+ struct drm_syncobj **objs;
+ struct dma_fence_chain **chains;
+ u32 count;
+ } out_sync;
+
+ struct nouveau_job_ops {
+ /* If .submit() returns without any error, it is guaranteed that
+ * armed_submit() is called.
+ */
+ int (*submit)(struct nouveau_job *);
+ void (*armed_submit)(struct nouveau_job *);
+ struct dma_fence *(*run)(struct nouveau_job *);
+ void (*free)(struct nouveau_job *);
+ enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
+ } *ops;
+};
+
+int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
+ u32 inc, u64 ins,
+ u32 outc, u64 outs);
+
+int nouveau_job_init(struct nouveau_job *job,
+ struct nouveau_job_args *args);
+void nouveau_job_free(struct nouveau_job *job);
+
+int nouveau_job_submit(struct nouveau_job *job);
+void nouveau_job_fini(struct nouveau_job *job);
+
+#define to_nouveau_sched_entity(entity) \
+ container_of((entity), struct nouveau_sched_entity, base)
+
+struct nouveau_sched_entity {
+ struct drm_sched_entity base;
+ struct mutex mutex;
+
+ struct workqueue_struct *sched_wq;
+
+ struct {
+ struct {
+ struct list_head head;
+ spinlock_t lock;
+ } list;
+ struct wait_queue_head wq;
+ } job;
+};
+
+int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
+ struct drm_gpu_scheduler *sched,
+ struct workqueue_struct *sched_wq);
+void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
+
+bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
+ struct work_struct *work);
+
+int nouveau_sched_init(struct nouveau_drm *drm);
+void nouveau_sched_fini(struct nouveau_drm *drm);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index a74ba8d84ba7..186351ecf72f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -350,7 +350,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
* VMM instead of the standard one.
*/
ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
- cli->vmm.vmm.object.oclass, true,
+ cli->vmm.vmm.object.oclass, MANAGED,
args->unmanaged_addr, args->unmanaged_size,
&(struct gp100_vmm_v0) {
.fault_replay = true,
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
new file mode 100644
index 000000000000..3a1e8538f205
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -0,0 +1,1916 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Locking:
+ *
+ * The uvmm mutex protects any operations on the GPU VA space provided by the
+ * DRM GPU VA manager.
+ *
+ * The GEMs dma_resv lock protects the GEMs GPUVA list, hence link/unlink of a
+ * mapping to it's backing GEM must be performed under this lock.
+ *
+ * Actual map/unmap operations within the fence signalling critical path are
+ * protected by installing DMA fences to the corresponding GEMs DMA
+ * reservations, such that concurrent BO moves, which itself walk the GEMs GPUVA
+ * list in order to map/unmap it's entries, can't occur concurrently.
+ *
+ * Accessing the DRM_GPUVA_INVALIDATED flag doesn't need any separate
+ * protection, since there are no accesses other than from BO move callbacks
+ * and from the fence signalling critical path, which are already protected by
+ * the corresponding GEMs DMA reservation fence.
+ */
+
+#include "nouveau_drv.h"
+#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_uvmm.h"
+
+#include <nvif/vmm.h>
+#include <nvif/mem.h>
+
+#include <nvif/class.h>
+#include <nvif/if000c.h>
+#include <nvif/if900d.h>
+
+#define NOUVEAU_VA_SPACE_BITS 47 /* FIXME */
+#define NOUVEAU_VA_SPACE_START 0x0
+#define NOUVEAU_VA_SPACE_END (1ULL << NOUVEAU_VA_SPACE_BITS)
+
+#define list_last_op(_ops) list_last_entry(_ops, struct bind_job_op, entry)
+#define list_prev_op(_op) list_prev_entry(_op, entry)
+#define list_for_each_op(_op, _ops) list_for_each_entry(_op, _ops, entry)
+#define list_for_each_op_from_reverse(_op, _ops) \
+ list_for_each_entry_from_reverse(_op, _ops, entry)
+#define list_for_each_op_safe(_op, _n, _ops) list_for_each_entry_safe(_op, _n, _ops, entry)
+
+enum vm_bind_op {
+ OP_MAP = DRM_NOUVEAU_VM_BIND_OP_MAP,
+ OP_UNMAP = DRM_NOUVEAU_VM_BIND_OP_UNMAP,
+ OP_MAP_SPARSE,
+ OP_UNMAP_SPARSE,
+};
+
+struct nouveau_uvma_prealloc {
+ struct nouveau_uvma *map;
+ struct nouveau_uvma *prev;
+ struct nouveau_uvma *next;
+};
+
+struct bind_job_op {
+ struct list_head entry;
+
+ enum vm_bind_op op;
+ u32 flags;
+
+ struct {
+ u64 addr;
+ u64 range;
+ } va;
+
+ struct {
+ u32 handle;
+ u64 offset;
+ struct drm_gem_object *obj;
+ } gem;
+
+ struct nouveau_uvma_region *reg;
+ struct nouveau_uvma_prealloc new;
+ struct drm_gpuva_ops *ops;
+};
+
+struct uvmm_map_args {
+ struct nouveau_uvma_region *region;
+ u64 addr;
+ u64 range;
+ u8 kind;
+};
+
+static int
+nouveau_uvmm_vmm_sparse_ref(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+ return nvif_vmm_raw_sparse(vmm, addr, range, true);
+}
+
+static int
+nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+ return nvif_vmm_raw_sparse(vmm, addr, range, false);
+}
+
+static int
+nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+ return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT);
+}
+
+static int
+nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+ return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT);
+}
+
+static int
+nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range, bool sparse)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+ return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse);
+}
+
+static int
+nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range,
+ u64 bo_offset, u8 kind,
+ struct nouveau_mem *mem)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+ union {
+ struct gf100_vmm_map_v0 gf100;
+ } args;
+ u32 argc = 0;
+
+ switch (vmm->object.oclass) {
+ case NVIF_CLASS_VMM_GF100:
+ case NVIF_CLASS_VMM_GM200:
+ case NVIF_CLASS_VMM_GP100:
+ args.gf100.version = 0;
+ if (mem->mem.type & NVIF_MEM_VRAM)
+ args.gf100.vol = 0;
+ else
+ args.gf100.vol = 1;
+ args.gf100.ro = 0;
+ args.gf100.priv = 0;
+ args.gf100.kind = kind;
+ argc = sizeof(args.gf100);
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOSYS;
+ }
+
+ return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT,
+ &args, argc,
+ &mem->mem, bo_offset);
+}
+
+static int
+nouveau_uvma_region_sparse_unref(struct nouveau_uvma_region *reg)
+{
+ u64 addr = reg->va.addr;
+ u64 range = reg->va.range;
+
+ return nouveau_uvmm_vmm_sparse_unref(reg->uvmm, addr, range);
+}
+
+static int
+nouveau_uvma_vmm_put(struct nouveau_uvma *uvma)
+{
+ u64 addr = uvma->va.va.addr;
+ u64 range = uvma->va.va.range;
+
+ return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range);
+}
+
+static int
+nouveau_uvma_map(struct nouveau_uvma *uvma,
+ struct nouveau_mem *mem)
+{
+ u64 addr = uvma->va.va.addr;
+ u64 offset = uvma->va.gem.offset;
+ u64 range = uvma->va.va.range;
+
+ return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range,
+ offset, uvma->kind, mem);
+}
+
+static int
+nouveau_uvma_unmap(struct nouveau_uvma *uvma)
+{
+ u64 addr = uvma->va.va.addr;
+ u64 range = uvma->va.va.range;
+ bool sparse = !!uvma->region;
+
+ if (drm_gpuva_invalidated(&uvma->va))
+ return 0;
+
+ return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+}
+
+static int
+nouveau_uvma_alloc(struct nouveau_uvma **puvma)
+{
+ *puvma = kzalloc(sizeof(**puvma), GFP_KERNEL);
+ if (!*puvma)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+nouveau_uvma_free(struct nouveau_uvma *uvma)
+{
+ kfree(uvma);
+}
+
+static void
+nouveau_uvma_gem_get(struct nouveau_uvma *uvma)
+{
+ drm_gem_object_get(uvma->va.gem.obj);
+}
+
+static void
+nouveau_uvma_gem_put(struct nouveau_uvma *uvma)
+{
+ drm_gem_object_put(uvma->va.gem.obj);
+}
+
+static int
+nouveau_uvma_region_alloc(struct nouveau_uvma_region **preg)
+{
+ *preg = kzalloc(sizeof(**preg), GFP_KERNEL);
+ if (!*preg)
+ return -ENOMEM;
+
+ kref_init(&(*preg)->kref);
+
+ return 0;
+}
+
+static void
+nouveau_uvma_region_free(struct kref *kref)
+{
+ struct nouveau_uvma_region *reg =
+ container_of(kref, struct nouveau_uvma_region, kref);
+
+ kfree(reg);
+}
+
+static void
+nouveau_uvma_region_get(struct nouveau_uvma_region *reg)
+{
+ kref_get(&reg->kref);
+}
+
+static void
+nouveau_uvma_region_put(struct nouveau_uvma_region *reg)
+{
+ kref_put(&reg->kref, nouveau_uvma_region_free);
+}
+
+static int
+__nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_region *reg)
+{
+ u64 addr = reg->va.addr;
+ u64 range = reg->va.range;
+ u64 last = addr + range - 1;
+ MA_STATE(mas, &uvmm->region_mt, addr, addr);
+
+ if (unlikely(mas_walk(&mas)))
+ return -EEXIST;
+
+ if (unlikely(mas.last < last))
+ return -EEXIST;
+
+ mas.index = addr;
+ mas.last = last;
+
+ mas_store_gfp(&mas, reg, GFP_KERNEL);
+
+ reg->uvmm = uvmm;
+
+ return 0;
+}
+
+static int
+nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_region *reg,
+ u64 addr, u64 range)
+{
+ int ret;
+
+ reg->uvmm = uvmm;
+ reg->va.addr = addr;
+ reg->va.range = range;
+
+ ret = __nouveau_uvma_region_insert(uvmm, reg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+nouveau_uvma_region_remove(struct nouveau_uvma_region *reg)
+{
+ struct nouveau_uvmm *uvmm = reg->uvmm;
+ MA_STATE(mas, &uvmm->region_mt, reg->va.addr, 0);
+
+ mas_erase(&mas);
+}
+
+static int
+nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nouveau_uvma_region *reg;
+ int ret;
+
+ if (!drm_gpuva_interval_empty(&uvmm->umgr, addr, range))
+ return -ENOSPC;
+
+ ret = nouveau_uvma_region_alloc(&reg);
+ if (ret)
+ return ret;
+
+ ret = nouveau_uvma_region_insert(uvmm, reg, addr, range);
+ if (ret)
+ goto err_free_region;
+
+ ret = nouveau_uvmm_vmm_sparse_ref(uvmm, addr, range);
+ if (ret)
+ goto err_region_remove;
+
+ return 0;
+
+err_region_remove:
+ nouveau_uvma_region_remove(reg);
+err_free_region:
+ nouveau_uvma_region_put(reg);
+ return ret;
+}
+
+static struct nouveau_uvma_region *
+nouveau_uvma_region_find_first(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ MA_STATE(mas, &uvmm->region_mt, addr, 0);
+
+ return mas_find(&mas, addr + range - 1);
+}
+
+static struct nouveau_uvma_region *
+nouveau_uvma_region_find(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nouveau_uvma_region *reg;
+
+ reg = nouveau_uvma_region_find_first(uvmm, addr, range);
+ if (!reg)
+ return NULL;
+
+ if (reg->va.addr != addr ||
+ reg->va.range != range)
+ return NULL;
+
+ return reg;
+}
+
+static bool
+nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
+{
+ struct nouveau_uvmm *uvmm = reg->uvmm;
+
+ return drm_gpuva_interval_empty(&uvmm->umgr,
+ reg->va.addr,
+ reg->va.range);
+}
+
+static int
+__nouveau_uvma_region_destroy(struct nouveau_uvma_region *reg)
+{
+ struct nouveau_uvmm *uvmm = reg->uvmm;
+ u64 addr = reg->va.addr;
+ u64 range = reg->va.range;
+
+ if (!nouveau_uvma_region_empty(reg))
+ return -EBUSY;
+
+ nouveau_uvma_region_remove(reg);
+ nouveau_uvmm_vmm_sparse_unref(uvmm, addr, range);
+ nouveau_uvma_region_put(reg);
+
+ return 0;
+}
+
+static int
+nouveau_uvma_region_destroy(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nouveau_uvma_region *reg;
+
+ reg = nouveau_uvma_region_find(uvmm, addr, range);
+ if (!reg)
+ return -ENOENT;
+
+ return __nouveau_uvma_region_destroy(reg);
+}
+
+static void
+nouveau_uvma_region_dirty(struct nouveau_uvma_region *reg)
+{
+
+ init_completion(&reg->complete);
+ reg->dirty = true;
+}
+
+static void
+nouveau_uvma_region_complete(struct nouveau_uvma_region *reg)
+{
+ complete_all(&reg->complete);
+}
+
+static void
+op_map_prepare_unwind(struct nouveau_uvma *uvma)
+{
+ nouveau_uvma_gem_put(uvma);
+ drm_gpuva_remove(&uvma->va);
+ nouveau_uvma_free(uvma);
+}
+
+static void
+op_unmap_prepare_unwind(struct drm_gpuva *va)
+{
+ drm_gpuva_insert(va->mgr, va);
+}
+
+static void
+nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops,
+ struct drm_gpuva_op *last,
+ struct uvmm_map_args *args)
+{
+ struct drm_gpuva_op *op = last;
+ u64 vmm_get_start = args ? args->addr : 0;
+ u64 vmm_get_end = args ? args->addr + args->range : 0;
+
+ /* Unwind GPUVA space. */
+ drm_gpuva_for_each_op_from_reverse(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ op_map_prepare_unwind(new->map);
+ break;
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva_op_remap *r = &op->remap;
+
+ if (r->next)
+ op_map_prepare_unwind(new->next);
+
+ if (r->prev)
+ op_map_prepare_unwind(new->prev);
+
+ op_unmap_prepare_unwind(r->unmap->va);
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP:
+ op_unmap_prepare_unwind(op->unmap.va);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Unmap operation don't allocate page tables, hence skip the following
+ * page table unwind.
+ */
+ if (!args)
+ return;
+
+ drm_gpuva_for_each_op(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP: {
+ u64 vmm_get_range = vmm_get_end - vmm_get_start;
+
+ if (vmm_get_range)
+ nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
+ vmm_get_range);
+ break;
+ }
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva_op_remap *r = &op->remap;
+ struct drm_gpuva *va = r->unmap->va;
+ u64 ustart = va->va.addr;
+ u64 urange = va->va.range;
+ u64 uend = ustart + urange;
+
+ if (r->prev)
+ vmm_get_start = uend;
+
+ if (r->next)
+ vmm_get_end = ustart;
+
+ if (r->prev && r->next)
+ vmm_get_start = vmm_get_end = 0;
+
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP: {
+ struct drm_gpuva_op_unmap *u = &op->unmap;
+ struct drm_gpuva *va = u->va;
+ u64 ustart = va->va.addr;
+ u64 urange = va->va.range;
+ u64 uend = ustart + urange;
+
+ /* Nothing to do for mappings we merge with. */
+ if (uend == vmm_get_start ||
+ ustart == vmm_get_end)
+ break;
+
+ if (ustart > vmm_get_start) {
+ u64 vmm_get_range = ustart - vmm_get_start;
+
+ nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
+ vmm_get_range);
+ }
+ vmm_get_start = uend;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (op == last)
+ break;
+ }
+}
+
+static void
+nouveau_uvmm_sm_map_prepare_unwind(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops,
+ u64 addr, u64 range)
+{
+ struct drm_gpuva_op *last = drm_gpuva_last_op(ops);
+ struct uvmm_map_args args = {
+ .addr = addr,
+ .range = range,
+ };
+
+ nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, &args);
+}
+
+static void
+nouveau_uvmm_sm_unmap_prepare_unwind(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ struct drm_gpuva_op *last = drm_gpuva_last_op(ops);
+
+ nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, NULL);
+}
+
+static int
+op_map_prepare(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma **puvma,
+ struct drm_gpuva_op_map *op,
+ struct uvmm_map_args *args)
+{
+ struct nouveau_uvma *uvma;
+ int ret;
+
+ ret = nouveau_uvma_alloc(&uvma);
+ if (ret)
+ return ret;
+
+ uvma->region = args->region;
+ uvma->kind = args->kind;
+
+ drm_gpuva_map(&uvmm->umgr, &uvma->va, op);
+
+ /* Keep a reference until this uvma is destroyed. */
+ nouveau_uvma_gem_get(uvma);
+
+ *puvma = uvma;
+ return 0;
+}
+
+static void
+op_unmap_prepare(struct drm_gpuva_op_unmap *u)
+{
+ drm_gpuva_unmap(u);
+}
+
+static int
+nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops,
+ struct uvmm_map_args *args)
+{
+ struct drm_gpuva_op *op;
+ u64 vmm_get_start = args ? args->addr : 0;
+ u64 vmm_get_end = args ? args->addr + args->range : 0;
+ int ret;
+
+ drm_gpuva_for_each_op(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP: {
+ u64 vmm_get_range = vmm_get_end - vmm_get_start;
+
+ ret = op_map_prepare(uvmm, &new->map, &op->map, args);
+ if (ret)
+ goto unwind;
+
+ if (args && vmm_get_range) {
+ ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
+ vmm_get_range);
+ if (ret) {
+ op_map_prepare_unwind(new->map);
+ goto unwind;
+ }
+ }
+ break;
+ }
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva_op_remap *r = &op->remap;
+ struct drm_gpuva *va = r->unmap->va;
+ struct uvmm_map_args remap_args = {
+ .kind = uvma_from_va(va)->kind,
+ };
+ u64 ustart = va->va.addr;
+ u64 urange = va->va.range;
+ u64 uend = ustart + urange;
+
+ op_unmap_prepare(r->unmap);
+
+ if (r->prev) {
+ ret = op_map_prepare(uvmm, &new->prev, r->prev,
+ &remap_args);
+ if (ret)
+ goto unwind;
+
+ if (args)
+ vmm_get_start = uend;
+ }
+
+ if (r->next) {
+ ret = op_map_prepare(uvmm, &new->next, r->next,
+ &remap_args);
+ if (ret) {
+ if (r->prev)
+ op_map_prepare_unwind(new->prev);
+ goto unwind;
+ }
+
+ if (args)
+ vmm_get_end = ustart;
+ }
+
+ if (args && (r->prev && r->next))
+ vmm_get_start = vmm_get_end = 0;
+
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP: {
+ struct drm_gpuva_op_unmap *u = &op->unmap;
+ struct drm_gpuva *va = u->va;
+ u64 ustart = va->va.addr;
+ u64 urange = va->va.range;
+ u64 uend = ustart + urange;
+
+ op_unmap_prepare(u);
+
+ if (!args)
+ break;
+
+ /* Nothing to do for mappings we merge with. */
+ if (uend == vmm_get_start ||
+ ustart == vmm_get_end)
+ break;
+
+ if (ustart > vmm_get_start) {
+ u64 vmm_get_range = ustart - vmm_get_start;
+
+ ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
+ vmm_get_range);
+ if (ret) {
+ op_unmap_prepare_unwind(va);
+ goto unwind;
+ }
+ }
+ vmm_get_start = uend;
+
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ goto unwind;
+ }
+ }
+
+ return 0;
+
+unwind:
+ if (op != drm_gpuva_first_op(ops))
+ nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops,
+ drm_gpuva_prev_op(op),
+ args);
+ return ret;
+}
+
+static int
+nouveau_uvmm_sm_map_prepare(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct nouveau_uvma_region *region,
+ struct drm_gpuva_ops *ops,
+ u64 addr, u64 range, u8 kind)
+{
+ struct uvmm_map_args args = {
+ .region = region,
+ .addr = addr,
+ .range = range,
+ .kind = kind,
+ };
+
+ return nouveau_uvmm_sm_prepare(uvmm, new, ops, &args);
+}
+
+static int
+nouveau_uvmm_sm_unmap_prepare(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ return nouveau_uvmm_sm_prepare(uvmm, new, ops, NULL);
+}
+
+static struct drm_gem_object *
+op_gem_obj(struct drm_gpuva_op *op)
+{
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ return op->map.gem.obj;
+ case DRM_GPUVA_OP_REMAP:
+ /* Actually, we're looking for the GEMs backing remap.prev and
+ * remap.next, but since this is a remap they're identical to
+ * the GEM backing the unmapped GPUVA.
+ */
+ return op->remap.unmap->va->gem.obj;
+ case DRM_GPUVA_OP_UNMAP:
+ return op->unmap.va->gem.obj;
+ default:
+ WARN(1, "Unknown operation.\n");
+ return NULL;
+ }
+}
+
+static void
+op_map(struct nouveau_uvma *uvma)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(uvma->va.gem.obj);
+
+ nouveau_uvma_map(uvma, nouveau_mem(nvbo->bo.resource));
+}
+
+static void
+op_unmap(struct drm_gpuva_op_unmap *u)
+{
+ struct drm_gpuva *va = u->va;
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+
+ /* nouveau_uvma_unmap() does not unmap if backing BO is evicted. */
+ if (!u->keep)
+ nouveau_uvma_unmap(uvma);
+}
+
+static void
+op_unmap_range(struct drm_gpuva_op_unmap *u,
+ u64 addr, u64 range)
+{
+ struct nouveau_uvma *uvma = uvma_from_va(u->va);
+ bool sparse = !!uvma->region;
+
+ if (!drm_gpuva_invalidated(u->va))
+ nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+}
+
+static void
+op_remap(struct drm_gpuva_op_remap *r,
+ struct nouveau_uvma_prealloc *new)
+{
+ struct drm_gpuva_op_unmap *u = r->unmap;
+ struct nouveau_uvma *uvma = uvma_from_va(u->va);
+ u64 addr = uvma->va.va.addr;
+ u64 range = uvma->va.va.range;
+
+ if (r->prev)
+ addr = r->prev->va.addr + r->prev->va.range;
+
+ if (r->next)
+ range = r->next->va.addr - addr;
+
+ op_unmap_range(u, addr, range);
+}
+
+static int
+nouveau_uvmm_sm(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ struct drm_gpuva_op *op;
+
+ drm_gpuva_for_each_op(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ op_map(new->map);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ op_remap(&op->remap, new);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ op_unmap(&op->unmap);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nouveau_uvmm_sm_map(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ return nouveau_uvmm_sm(uvmm, new, ops);
+}
+
+static int
+nouveau_uvmm_sm_unmap(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ return nouveau_uvmm_sm(uvmm, new, ops);
+}
+
+static void
+nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops, bool unmap)
+{
+ struct drm_gpuva_op *op;
+
+ drm_gpuva_for_each_op(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ break;
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva_op_remap *r = &op->remap;
+ struct drm_gpuva_op_map *p = r->prev;
+ struct drm_gpuva_op_map *n = r->next;
+ struct drm_gpuva *va = r->unmap->va;
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+
+ if (unmap) {
+ u64 addr = va->va.addr;
+ u64 end = addr + va->va.range;
+
+ if (p)
+ addr = p->va.addr + p->va.range;
+
+ if (n)
+ end = n->va.addr;
+
+ nouveau_uvmm_vmm_put(uvmm, addr, end - addr);
+ }
+
+ nouveau_uvma_gem_put(uvma);
+ nouveau_uvma_free(uvma);
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP: {
+ struct drm_gpuva_op_unmap *u = &op->unmap;
+ struct drm_gpuva *va = u->va;
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+
+ if (unmap)
+ nouveau_uvma_vmm_put(uvma);
+
+ nouveau_uvma_gem_put(uvma);
+ nouveau_uvma_free(uvma);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+static void
+nouveau_uvmm_sm_map_cleanup(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ nouveau_uvmm_sm_cleanup(uvmm, new, ops, false);
+}
+
+static void
+nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ nouveau_uvmm_sm_cleanup(uvmm, new, ops, true);
+}
+
+static int
+nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
+{
+ u64 end = addr + range;
+ u64 kernel_managed_end = uvmm->kernel_managed_addr +
+ uvmm->kernel_managed_size;
+
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (range & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (end <= addr)
+ return -EINVAL;
+
+ if (addr < NOUVEAU_VA_SPACE_START ||
+ end > NOUVEAU_VA_SPACE_END)
+ return -EINVAL;
+
+ if (addr < kernel_managed_end &&
+ end > uvmm->kernel_managed_addr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nouveau_uvmm_bind_job_alloc(struct nouveau_uvmm_bind_job **pjob)
+{
+ *pjob = kzalloc(sizeof(**pjob), GFP_KERNEL);
+ if (!*pjob)
+ return -ENOMEM;
+
+ kref_init(&(*pjob)->kref);
+
+ return 0;
+}
+
+static void
+nouveau_uvmm_bind_job_free(struct kref *kref)
+{
+ struct nouveau_uvmm_bind_job *job =
+ container_of(kref, struct nouveau_uvmm_bind_job, kref);
+
+ nouveau_job_free(&job->base);
+ kfree(job);
+}
+
+static void
+nouveau_uvmm_bind_job_get(struct nouveau_uvmm_bind_job *job)
+{
+ kref_get(&job->kref);
+}
+
+static void
+nouveau_uvmm_bind_job_put(struct nouveau_uvmm_bind_job *job)
+{
+ kref_put(&job->kref, nouveau_uvmm_bind_job_free);
+}
+
+static int
+bind_validate_op(struct nouveau_job *job,
+ struct bind_job_op *op)
+{
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+ struct drm_gem_object *obj = op->gem.obj;
+
+ if (op->op == OP_MAP) {
+ if (op->gem.offset & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (obj->size <= op->gem.offset)
+ return -EINVAL;
+
+ if (op->va.range > (obj->size - op->gem.offset))
+ return -EINVAL;
+ }
+
+ return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range);
+}
+
+static void
+bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
+{
+ struct nouveau_uvmm_bind_job *bind_job;
+ struct nouveau_sched_entity *entity = job->entity;
+ struct bind_job_op *op;
+ u64 end = addr + range;
+
+again:
+ spin_lock(&entity->job.list.lock);
+ list_for_each_entry(bind_job, &entity->job.list.head, entry) {
+ list_for_each_op(op, &bind_job->ops) {
+ if (op->op == OP_UNMAP) {
+ u64 op_addr = op->va.addr;
+ u64 op_end = op_addr + op->va.range;
+
+ if (!(end <= op_addr || addr >= op_end)) {
+ nouveau_uvmm_bind_job_get(bind_job);
+ spin_unlock(&entity->job.list.lock);
+ wait_for_completion(&bind_job->complete);
+ nouveau_uvmm_bind_job_put(bind_job);
+ goto again;
+ }
+ }
+ }
+ }
+ spin_unlock(&entity->job.list.lock);
+}
+
+static int
+bind_validate_map_common(struct nouveau_job *job, u64 addr, u64 range,
+ bool sparse)
+{
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+ struct nouveau_uvma_region *reg;
+ u64 reg_addr, reg_end;
+ u64 end = addr + range;
+
+again:
+ nouveau_uvmm_lock(uvmm);
+ reg = nouveau_uvma_region_find_first(uvmm, addr, range);
+ if (!reg) {
+ nouveau_uvmm_unlock(uvmm);
+ return 0;
+ }
+
+ /* Generally, job submits are serialized, hence only
+ * dirty regions can be modified concurrently.
+ */
+ if (reg->dirty) {
+ nouveau_uvma_region_get(reg);
+ nouveau_uvmm_unlock(uvmm);
+ wait_for_completion(&reg->complete);
+ nouveau_uvma_region_put(reg);
+ goto again;
+ }
+ nouveau_uvmm_unlock(uvmm);
+
+ if (sparse)
+ return -ENOSPC;
+
+ reg_addr = reg->va.addr;
+ reg_end = reg_addr + reg->va.range;
+
+ /* Make sure the mapping is either outside of a
+ * region or fully enclosed by a region.
+ */
+ if (reg_addr > addr || reg_end < end)
+ return -ENOSPC;
+
+ return 0;
+}
+
+static int
+bind_validate_region(struct nouveau_job *job)
+{
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct bind_job_op *op;
+ int ret;
+
+ list_for_each_op(op, &bind_job->ops) {
+ u64 op_addr = op->va.addr;
+ u64 op_range = op->va.range;
+ bool sparse = false;
+
+ switch (op->op) {
+ case OP_MAP_SPARSE:
+ sparse = true;
+ bind_validate_map_sparse(job, op_addr, op_range);
+ fallthrough;
+ case OP_MAP:
+ ret = bind_validate_map_common(job, op_addr, op_range,
+ sparse);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void
+bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
+{
+ struct drm_gpuva_op *op;
+
+ drm_gpuva_for_each_op(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ drm_gpuva_link(&new->map->va);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ drm_gpuva_link(&new->prev->va);
+ if (op->remap.next)
+ drm_gpuva_link(&new->next->va);
+ drm_gpuva_unlink(op->remap.unmap->va);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ drm_gpuva_unlink(op->unmap.va);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int
+nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+{
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct nouveau_sched_entity *entity = job->entity;
+ struct drm_exec *exec = &job->exec;
+ struct bind_job_op *op;
+ int ret;
+
+ list_for_each_op(op, &bind_job->ops) {
+ if (op->op == OP_MAP) {
+ op->gem.obj = drm_gem_object_lookup(job->file_priv,
+ op->gem.handle);
+ if (!op->gem.obj)
+ return -ENOENT;
+ }
+
+ ret = bind_validate_op(job, op);
+ if (ret)
+ return ret;
+ }
+
+ /* If a sparse region or mapping overlaps a dirty region, we need to
+ * wait for the region to complete the unbind process. This is due to
+ * how page table management is currently implemented. A future
+ * implementation might change this.
+ */
+ ret = bind_validate_region(job);
+ if (ret)
+ return ret;
+
+ /* Once we start modifying the GPU VA space we need to keep holding the
+ * uvmm lock until we can't fail anymore. This is due to the set of GPU
+ * VA space changes must appear atomically and we need to be able to
+ * unwind all GPU VA space changes on failure.
+ */
+ nouveau_uvmm_lock(uvmm);
+ list_for_each_op(op, &bind_job->ops) {
+ switch (op->op) {
+ case OP_MAP_SPARSE:
+ ret = nouveau_uvma_region_create(uvmm,
+ op->va.addr,
+ op->va.range);
+ if (ret)
+ goto unwind_continue;
+
+ break;
+ case OP_UNMAP_SPARSE:
+ op->reg = nouveau_uvma_region_find(uvmm, op->va.addr,
+ op->va.range);
+ if (!op->reg || op->reg->dirty) {
+ ret = -ENOENT;
+ goto unwind_continue;
+ }
+
+ op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
+ op->va.addr,
+ op->va.range);
+ if (IS_ERR(op->ops)) {
+ ret = PTR_ERR(op->ops);
+ goto unwind_continue;
+ }
+
+ ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
+ op->ops);
+ if (ret) {
+ drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ op->ops = NULL;
+ op->reg = NULL;
+ goto unwind_continue;
+ }
+
+ nouveau_uvma_region_dirty(op->reg);
+
+ break;
+ case OP_MAP: {
+ struct nouveau_uvma_region *reg;
+
+ reg = nouveau_uvma_region_find_first(uvmm,
+ op->va.addr,
+ op->va.range);
+ if (reg) {
+ u64 reg_addr = reg->va.addr;
+ u64 reg_end = reg_addr + reg->va.range;
+ u64 op_addr = op->va.addr;
+ u64 op_end = op_addr + op->va.range;
+
+ if (unlikely(reg->dirty)) {
+ ret = -EINVAL;
+ goto unwind_continue;
+ }
+
+ /* Make sure the mapping is either outside of a
+ * region or fully enclosed by a region.
+ */
+ if (reg_addr > op_addr || reg_end < op_end) {
+ ret = -ENOSPC;
+ goto unwind_continue;
+ }
+ }
+
+ op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr,
+ op->va.addr,
+ op->va.range,
+ op->gem.obj,
+ op->gem.offset);
+ if (IS_ERR(op->ops)) {
+ ret = PTR_ERR(op->ops);
+ goto unwind_continue;
+ }
+
+ ret = nouveau_uvmm_sm_map_prepare(uvmm, &op->new,
+ reg, op->ops,
+ op->va.addr,
+ op->va.range,
+ op->flags & 0xff);
+ if (ret) {
+ drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ op->ops = NULL;
+ goto unwind_continue;
+ }
+
+ break;
+ }
+ case OP_UNMAP:
+ op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
+ op->va.addr,
+ op->va.range);
+ if (IS_ERR(op->ops)) {
+ ret = PTR_ERR(op->ops);
+ goto unwind_continue;
+ }
+
+ ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
+ op->ops);
+ if (ret) {
+ drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ op->ops = NULL;
+ goto unwind_continue;
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ goto unwind_continue;
+ }
+ }
+
+ drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_until_all_locked(exec) {
+ list_for_each_op(op, &bind_job->ops) {
+ struct drm_gpuva_op *va_op;
+
+ if (IS_ERR_OR_NULL(op->ops))
+ continue;
+
+ drm_gpuva_for_each_op(va_op, op->ops) {
+ struct drm_gem_object *obj = op_gem_obj(va_op);
+
+ if (unlikely(!obj))
+ continue;
+
+ ret = drm_exec_prepare_obj(exec, obj, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret) {
+ op = list_last_op(&bind_job->ops);
+ goto unwind;
+ }
+ }
+ }
+ }
+
+ list_for_each_op(op, &bind_job->ops) {
+ struct drm_gpuva_op *va_op;
+
+ if (IS_ERR_OR_NULL(op->ops))
+ continue;
+
+ drm_gpuva_for_each_op(va_op, op->ops) {
+ struct drm_gem_object *obj = op_gem_obj(va_op);
+
+ if (unlikely(!obj))
+ continue;
+
+ /* Don't validate GEMs backing mappings we're about to
+ * unmap, it's not worth the effort.
+ */
+ if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
+ continue;
+
+ ret = nouveau_bo_validate(nouveau_gem_object(obj),
+ true, false);
+ if (ret) {
+ op = list_last_op(&bind_job->ops);
+ goto unwind;
+ }
+ }
+ }
+
+ /* Link and unlink GPUVAs while holding the dma_resv lock.
+ *
+ * As long as we validate() all GEMs and add fences to all GEMs DMA
+ * reservations backing map and remap operations we can be sure there
+ * won't be any concurrent (in)validations during job execution, hence
+ * we're safe to check drm_gpuva_invalidated() within the fence
+ * signalling critical path without holding a separate lock.
+ *
+ * GPUVAs about to be unmapped are safe as well, since they're unlinked
+ * already.
+ *
+ * GEMs from map and remap operations must be validated before linking
+ * their corresponding mappings to prevent the actual PT update to
+ * happen right away in validate() rather than asynchronously as
+ * intended.
+ *
+ * Note that after linking and unlinking the GPUVAs in this loop this
+ * function cannot fail anymore, hence there is no need for an unwind
+ * path.
+ */
+ list_for_each_op(op, &bind_job->ops) {
+ switch (op->op) {
+ case OP_UNMAP_SPARSE:
+ case OP_MAP:
+ case OP_UNMAP:
+ bind_link_gpuvas(op->ops, &op->new);
+ break;
+ default:
+ break;
+ }
+ }
+ nouveau_uvmm_unlock(uvmm);
+
+ spin_lock(&entity->job.list.lock);
+ list_add(&bind_job->entry, &entity->job.list.head);
+ spin_unlock(&entity->job.list.lock);
+
+ return 0;
+
+unwind_continue:
+ op = list_prev_op(op);
+unwind:
+ list_for_each_op_from_reverse(op, &bind_job->ops) {
+ switch (op->op) {
+ case OP_MAP_SPARSE:
+ nouveau_uvma_region_destroy(uvmm, op->va.addr,
+ op->va.range);
+ break;
+ case OP_UNMAP_SPARSE:
+ __nouveau_uvma_region_insert(uvmm, op->reg);
+ nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
+ op->ops);
+ break;
+ case OP_MAP:
+ nouveau_uvmm_sm_map_prepare_unwind(uvmm, &op->new,
+ op->ops,
+ op->va.addr,
+ op->va.range);
+ break;
+ case OP_UNMAP:
+ nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
+ op->ops);
+ break;
+ }
+
+ drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ op->ops = NULL;
+ op->reg = NULL;
+ }
+
+ nouveau_uvmm_unlock(uvmm);
+ drm_exec_fini(exec);
+ return ret;
+}
+
+static void
+nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job)
+{
+ struct drm_exec *exec = &job->exec;
+ struct drm_gem_object *obj;
+ unsigned long index;
+
+ drm_exec_for_each_locked_object(exec, index, obj)
+ dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
+
+ drm_exec_fini(exec);
+}
+
+static struct dma_fence *
+nouveau_uvmm_bind_job_run(struct nouveau_job *job)
+{
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+ struct bind_job_op *op;
+ int ret = 0;
+
+ list_for_each_op(op, &bind_job->ops) {
+ switch (op->op) {
+ case OP_MAP_SPARSE:
+ /* noop */
+ break;
+ case OP_MAP:
+ ret = nouveau_uvmm_sm_map(uvmm, &op->new, op->ops);
+ if (ret)
+ goto out;
+ break;
+ case OP_UNMAP_SPARSE:
+ fallthrough;
+ case OP_UNMAP:
+ ret = nouveau_uvmm_sm_unmap(uvmm, &op->new, op->ops);
+ if (ret)
+ goto out;
+ break;
+ }
+ }
+
+out:
+ if (ret)
+ NV_PRINTK(err, job->cli, "bind job failed: %d\n", ret);
+ return ERR_PTR(ret);
+}
+
+static void
+nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
+{
+ struct nouveau_uvmm_bind_job *bind_job =
+ container_of(work, struct nouveau_uvmm_bind_job, work);
+ struct nouveau_job *job = &bind_job->base;
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+ struct nouveau_sched_entity *entity = job->entity;
+ struct bind_job_op *op, *next;
+
+ list_for_each_op(op, &bind_job->ops) {
+ struct drm_gem_object *obj = op->gem.obj;
+
+ /* When nouveau_uvmm_bind_job_submit() fails op->ops and op->reg
+ * will be NULL, hence skip the cleanup.
+ */
+ switch (op->op) {
+ case OP_MAP_SPARSE:
+ /* noop */
+ break;
+ case OP_UNMAP_SPARSE:
+ if (!IS_ERR_OR_NULL(op->ops))
+ nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
+ op->ops);
+
+ if (op->reg) {
+ nouveau_uvma_region_sparse_unref(op->reg);
+ nouveau_uvmm_lock(uvmm);
+ nouveau_uvma_region_remove(op->reg);
+ nouveau_uvmm_unlock(uvmm);
+ nouveau_uvma_region_complete(op->reg);
+ nouveau_uvma_region_put(op->reg);
+ }
+
+ break;
+ case OP_MAP:
+ if (!IS_ERR_OR_NULL(op->ops))
+ nouveau_uvmm_sm_map_cleanup(uvmm, &op->new,
+ op->ops);
+ break;
+ case OP_UNMAP:
+ if (!IS_ERR_OR_NULL(op->ops))
+ nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
+ op->ops);
+ break;
+ }
+
+ if (!IS_ERR_OR_NULL(op->ops))
+ drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+
+ if (obj)
+ drm_gem_object_put(obj);
+ }
+
+ spin_lock(&entity->job.list.lock);
+ list_del(&bind_job->entry);
+ spin_unlock(&entity->job.list.lock);
+
+ complete_all(&bind_job->complete);
+ wake_up(&entity->job.wq);
+
+ /* Remove and free ops after removing the bind job from the job list to
+ * avoid races against bind_validate_map_sparse().
+ */
+ list_for_each_op_safe(op, next, &bind_job->ops) {
+ list_del(&op->entry);
+ kfree(op);
+ }
+
+ nouveau_uvmm_bind_job_put(bind_job);
+}
+
+static void
+nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job)
+{
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct nouveau_sched_entity *entity = job->entity;
+
+ nouveau_sched_entity_qwork(entity, &bind_job->work);
+}
+
+static struct nouveau_job_ops nouveau_bind_job_ops = {
+ .submit = nouveau_uvmm_bind_job_submit,
+ .armed_submit = nouveau_uvmm_bind_job_armed_submit,
+ .run = nouveau_uvmm_bind_job_run,
+ .free = nouveau_uvmm_bind_job_free_qwork,
+};
+
+static int
+bind_job_op_from_uop(struct bind_job_op **pop,
+ struct drm_nouveau_vm_bind_op *uop)
+{
+ struct bind_job_op *op;
+
+ op = *pop = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ switch (uop->op) {
+ case OP_MAP:
+ op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ?
+ OP_MAP_SPARSE : OP_MAP;
+ break;
+ case OP_UNMAP:
+ op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ?
+ OP_UNMAP_SPARSE : OP_UNMAP;
+ break;
+ default:
+ op->op = uop->op;
+ break;
+ }
+
+ op->flags = uop->flags;
+ op->va.addr = uop->addr;
+ op->va.range = uop->range;
+ op->gem.handle = uop->handle;
+ op->gem.offset = uop->bo_offset;
+
+ return 0;
+}
+
+static void
+bind_job_ops_free(struct list_head *ops)
+{
+ struct bind_job_op *op, *next;
+
+ list_for_each_op_safe(op, next, ops) {
+ list_del(&op->entry);
+ kfree(op);
+ }
+}
+
+static int
+nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
+ struct nouveau_uvmm_bind_job_args *__args)
+{
+ struct nouveau_uvmm_bind_job *job;
+ struct nouveau_job_args args = {};
+ struct bind_job_op *op;
+ int i, ret;
+
+ ret = nouveau_uvmm_bind_job_alloc(&job);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&job->ops);
+ INIT_LIST_HEAD(&job->entry);
+
+ for (i = 0; i < __args->op.count; i++) {
+ ret = bind_job_op_from_uop(&op, &__args->op.s[i]);
+ if (ret)
+ goto err_free;
+
+ list_add_tail(&op->entry, &job->ops);
+ }
+
+ init_completion(&job->complete);
+ INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn);
+
+ args.sched_entity = __args->sched_entity;
+ args.file_priv = __args->file_priv;
+
+ args.in_sync.count = __args->in_sync.count;
+ args.in_sync.s = __args->in_sync.s;
+
+ args.out_sync.count = __args->out_sync.count;
+ args.out_sync.s = __args->out_sync.s;
+
+ args.sync = !(__args->flags & DRM_NOUVEAU_VM_BIND_RUN_ASYNC);
+ args.ops = &nouveau_bind_job_ops;
+ args.resv_usage = DMA_RESV_USAGE_BOOKKEEP;
+
+ ret = nouveau_job_init(&job->base, &args);
+ if (ret)
+ goto err_free;
+
+ *pjob = job;
+ return 0;
+
+err_free:
+ bind_job_ops_free(&job->ops);
+ kfree(job);
+ *pjob = NULL;
+
+ return ret;
+}
+
+int
+nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct drm_nouveau_vm_init *init = data;
+
+ return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
+ init->kernel_managed_size);
+}
+
+static int
+nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
+{
+ struct nouveau_uvmm_bind_job *job;
+ int ret;
+
+ ret = nouveau_uvmm_bind_job_init(&job, args);
+ if (ret)
+ return ret;
+
+ ret = nouveau_job_submit(&job->base);
+ if (ret)
+ goto err_job_fini;
+
+ return 0;
+
+err_job_fini:
+ nouveau_job_fini(&job->base);
+ return ret;
+}
+
+static int
+nouveau_uvmm_vm_bind_ucopy(struct nouveau_uvmm_bind_job_args *args,
+ struct drm_nouveau_vm_bind *req)
+{
+ struct drm_nouveau_sync **s;
+ u32 inc = req->wait_count;
+ u64 ins = req->wait_ptr;
+ u32 outc = req->sig_count;
+ u64 outs = req->sig_ptr;
+ u32 opc = req->op_count;
+ u64 ops = req->op_ptr;
+ int ret;
+
+ args->flags = req->flags;
+
+ if (opc) {
+ args->op.count = opc;
+ args->op.s = u_memcpya(ops, opc,
+ sizeof(*args->op.s));
+ if (IS_ERR(args->op.s))
+ return PTR_ERR(args->op.s);
+ }
+
+ if (inc) {
+ s = &args->in_sync.s;
+
+ args->in_sync.count = inc;
+ *s = u_memcpya(ins, inc, sizeof(**s));
+ if (IS_ERR(*s)) {
+ ret = PTR_ERR(*s);
+ goto err_free_ops;
+ }
+ }
+
+ if (outc) {
+ s = &args->out_sync.s;
+
+ args->out_sync.count = outc;
+ *s = u_memcpya(outs, outc, sizeof(**s));
+ if (IS_ERR(*s)) {
+ ret = PTR_ERR(*s);
+ goto err_free_ins;
+ }
+ }
+
+ return 0;
+
+err_free_ops:
+ u_free(args->op.s);
+err_free_ins:
+ u_free(args->in_sync.s);
+ return ret;
+}
+
+static void
+nouveau_uvmm_vm_bind_ufree(struct nouveau_uvmm_bind_job_args *args)
+{
+ u_free(args->op.s);
+ u_free(args->in_sync.s);
+ u_free(args->out_sync.s);
+}
+
+int
+nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct nouveau_uvmm_bind_job_args args = {};
+ struct drm_nouveau_vm_bind *req = data;
+ int ret = 0;
+
+ if (unlikely(!nouveau_cli_uvmm_locked(cli)))
+ return -ENOSYS;
+
+ ret = nouveau_uvmm_vm_bind_ucopy(&args, req);
+ if (ret)
+ return ret;
+
+ args.sched_entity = &cli->sched_entity;
+ args.file_priv = file_priv;
+
+ ret = nouveau_uvmm_vm_bind(&args);
+ if (ret)
+ goto out_free_args;
+
+out_free_args:
+ nouveau_uvmm_vm_bind_ufree(&args);
+ return ret;
+}
+
+void
+nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem)
+{
+ struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuva *va;
+
+ dma_resv_assert_held(obj->resv);
+
+ drm_gem_for_each_gpuva(va, obj) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+
+ nouveau_uvma_map(uvma, mem);
+ drm_gpuva_invalidate(va, false);
+ }
+}
+
+void
+nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
+{
+ struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuva *va;
+
+ dma_resv_assert_held(obj->resv);
+
+ drm_gem_for_each_gpuva(va, obj) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+
+ nouveau_uvma_unmap(uvma);
+ drm_gpuva_invalidate(va, true);
+ }
+}
+
+int
+nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
+ u64 kernel_managed_addr, u64 kernel_managed_size)
+{
+ int ret;
+ u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
+
+ mutex_init(&uvmm->mutex);
+ dma_resv_init(&uvmm->resv);
+ mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
+
+ mutex_lock(&cli->mutex);
+
+ if (unlikely(cli->uvmm.disabled)) {
+ ret = -ENOSYS;
+ goto out_unlock;
+ }
+
+ if (kernel_managed_end <= kernel_managed_addr) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ uvmm->kernel_managed_addr = kernel_managed_addr;
+ uvmm->kernel_managed_size = kernel_managed_size;
+
+ drm_gpuva_manager_init(&uvmm->umgr, cli->name,
+ NOUVEAU_VA_SPACE_START,
+ NOUVEAU_VA_SPACE_END,
+ kernel_managed_addr, kernel_managed_size,
+ NULL);
+
+ ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
+ cli->vmm.vmm.object.oclass, RAW,
+ kernel_managed_addr, kernel_managed_size,
+ NULL, 0, &cli->uvmm.vmm.vmm);
+ if (ret)
+ goto out_free_gpuva_mgr;
+
+ cli->uvmm.vmm.cli = cli;
+ mutex_unlock(&cli->mutex);
+
+ return 0;
+
+out_free_gpuva_mgr:
+ drm_gpuva_manager_destroy(&uvmm->umgr);
+out_unlock:
+ mutex_unlock(&cli->mutex);
+ return ret;
+}
+
+void
+nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
+{
+ MA_STATE(mas, &uvmm->region_mt, 0, 0);
+ struct nouveau_uvma_region *reg;
+ struct nouveau_cli *cli = uvmm->vmm.cli;
+ struct nouveau_sched_entity *entity = &cli->sched_entity;
+ struct drm_gpuva *va, *next;
+
+ if (!cli)
+ return;
+
+ rmb(); /* for list_empty to work without lock */
+ wait_event(entity->job.wq, list_empty(&entity->job.list.head));
+
+ nouveau_uvmm_lock(uvmm);
+ drm_gpuva_for_each_va_safe(va, next, &uvmm->umgr) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+ struct drm_gem_object *obj = va->gem.obj;
+
+ if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+ continue;
+
+ drm_gpuva_remove(va);
+
+ dma_resv_lock(obj->resv, NULL);
+ drm_gpuva_unlink(va);
+ dma_resv_unlock(obj->resv);
+
+ nouveau_uvma_unmap(uvma);
+ nouveau_uvma_vmm_put(uvma);
+
+ nouveau_uvma_gem_put(uvma);
+ nouveau_uvma_free(uvma);
+ }
+
+ mas_for_each(&mas, reg, ULONG_MAX) {
+ mas_erase(&mas);
+ nouveau_uvma_region_sparse_unref(reg);
+ nouveau_uvma_region_put(reg);
+ }
+
+ WARN(!mtree_empty(&uvmm->region_mt),
+ "nouveau_uvma_region tree not empty, potentially leaking memory.");
+ __mt_destroy(&uvmm->region_mt);
+ nouveau_uvmm_unlock(uvmm);
+
+ mutex_lock(&cli->mutex);
+ nouveau_vmm_fini(&uvmm->vmm);
+ drm_gpuva_manager_destroy(&uvmm->umgr);
+ mutex_unlock(&cli->mutex);
+
+ dma_resv_fini(&uvmm->resv);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
new file mode 100644
index 000000000000..fc7f6fd2a4e1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __NOUVEAU_UVMM_H__
+#define __NOUVEAU_UVMM_H__
+
+#include <drm/drm_gpuva_mgr.h>
+
+#include "nouveau_drv.h"
+
+struct nouveau_uvmm {
+ struct nouveau_vmm vmm;
+ struct drm_gpuva_manager umgr;
+ struct maple_tree region_mt;
+ struct mutex mutex;
+ struct dma_resv resv;
+
+ u64 kernel_managed_addr;
+ u64 kernel_managed_size;
+
+ bool disabled;
+};
+
+struct nouveau_uvma_region {
+ struct nouveau_uvmm *uvmm;
+
+ struct {
+ u64 addr;
+ u64 range;
+ } va;
+
+ struct kref kref;
+
+ struct completion complete;
+ bool dirty;
+};
+
+struct nouveau_uvma {
+ struct drm_gpuva va;
+
+ struct nouveau_uvma_region *region;
+ u8 kind;
+};
+
+#define uvmm_from_mgr(x) container_of((x), struct nouveau_uvmm, umgr)
+#define uvma_from_va(x) container_of((x), struct nouveau_uvma, va)
+
+#define to_uvmm(x) uvmm_from_mgr((x)->va.mgr)
+
+struct nouveau_uvmm_bind_job {
+ struct nouveau_job base;
+
+ struct kref kref;
+ struct list_head entry;
+ struct work_struct work;
+ struct completion complete;
+
+ /* struct bind_job_op */
+ struct list_head ops;
+};
+
+struct nouveau_uvmm_bind_job_args {
+ struct drm_file *file_priv;
+ struct nouveau_sched_entity *sched_entity;
+
+ unsigned int flags;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } in_sync;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } out_sync;
+
+ struct {
+ struct drm_nouveau_vm_bind_op *s;
+ u32 count;
+ } op;
+};
+
+#define to_uvmm_bind_job(job) container_of((job), struct nouveau_uvmm_bind_job, base)
+
+int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
+ u64 kernel_managed_addr, u64 kernel_managed_size);
+void nouveau_uvmm_fini(struct nouveau_uvmm *uvmm);
+
+void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbov, struct nouveau_mem *mem);
+void nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo);
+
+int nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+int nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+static inline void nouveau_uvmm_lock(struct nouveau_uvmm *uvmm)
+{
+ mutex_lock(&uvmm->mutex);
+}
+
+static inline void nouveau_uvmm_unlock(struct nouveau_uvmm *uvmm)
+{
+ mutex_unlock(&uvmm->mutex);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 67d6619fcd5e..a6602c012671 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -128,8 +128,8 @@ nouveau_vmm_fini(struct nouveau_vmm *vmm)
int
nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
{
- int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, false, PAGE_SIZE,
- 0, NULL, 0, &vmm->vmm);
+ int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, UNMANAGED,
+ PAGE_SIZE, 0, NULL, 0, &vmm->vmm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
index 6053d6dc2184..99296f03371a 100644
--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -104,6 +104,90 @@ nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
return ret;
}
+int
+nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size,
+ u8 shift)
+{
+ struct nvif_vmm_raw_v0 args = {
+ .version = 0,
+ .op = NVIF_VMM_RAW_V0_GET,
+ .addr = addr,
+ .size = size,
+ .shift = shift,
+ };
+
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+ &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift)
+{
+ struct nvif_vmm_raw_v0 args = {
+ .version = 0,
+ .op = NVIF_VMM_RAW_V0_PUT,
+ .addr = addr,
+ .size = size,
+ .shift = shift,
+ };
+
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+ &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
+ void *argv, u32 argc, struct nvif_mem *mem, u64 offset)
+{
+ struct nvif_vmm_raw_v0 args = {
+ .version = 0,
+ .op = NVIF_VMM_RAW_V0_MAP,
+ .addr = addr,
+ .size = size,
+ .shift = shift,
+ .memory = nvif_handle(&mem->object),
+ .offset = offset,
+ .argv = (u64)(uintptr_t)argv,
+ .argc = argc,
+ };
+
+
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+ &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
+ u8 shift, bool sparse)
+{
+ struct nvif_vmm_raw_v0 args = {
+ .version = 0,
+ .op = NVIF_VMM_RAW_V0_UNMAP,
+ .addr = addr,
+ .size = size,
+ .shift = shift,
+ .sparse = sparse,
+ };
+
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+ &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+ struct nvif_vmm_raw_v0 args = {
+ .version = 0,
+ .op = NVIF_VMM_RAW_V0_SPARSE,
+ .addr = addr,
+ .size = size,
+ .ref = ref,
+ };
+
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+ &args, sizeof(args));
+}
+
void
nvif_vmm_dtor(struct nvif_vmm *vmm)
{
@@ -112,8 +196,9 @@ nvif_vmm_dtor(struct nvif_vmm *vmm)
}
int
-nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
- u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
+nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
+ enum nvif_vmm_type type, u64 addr, u64 size, void *argv, u32 argc,
+ struct nvif_vmm *vmm)
{
struct nvif_vmm_v0 *args;
u32 argn = sizeof(*args) + argc;
@@ -125,9 +210,18 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
if (!(args = kmalloc(argn, GFP_KERNEL)))
return -ENOMEM;
args->version = 0;
- args->managed = managed;
args->addr = addr;
args->size = size;
+
+ switch (type) {
+ case UNMANAGED: args->type = NVIF_VMM_V0_TYPE_UNMANAGED; break;
+ case MANAGED: args->type = NVIF_VMM_V0_TYPE_MANAGED; break;
+ case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
memcpy(args->data, argv, argc);
ret = nvif_object_ctor(&mmu->object, name ? name : "nvifVmm", 0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index 524cd3c0e3fe..8e459d88ff8f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -58,10 +58,13 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
+ if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+ return -EINVAL;
+
if (size) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
}
return ret;
@@ -88,10 +91,13 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
+ if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+ return -EINVAL;
+
if (size) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
}
return ret;
@@ -113,7 +119,10 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- mutex_lock(&vmm->mutex);
+ if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw)
+ return -EINVAL;
+
+ mutex_lock(&vmm->mutex.vmm);
vma = nvkm_vmm_node_search(vmm, addr);
if (ret = -ENOENT, !vma || vma->addr != addr) {
VMM_DEBUG(vmm, "lookup %016llx: %016llx",
@@ -134,7 +143,7 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
nvkm_vmm_unmap_locked(vmm, vma, false);
ret = 0;
done:
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
return ret;
}
@@ -159,13 +168,16 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
+ if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+ return -EINVAL;
+
memory = nvkm_umem_search(client, handle);
if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory);
}
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
VMM_DEBUG(vmm, "lookup %016llx", addr);
goto fail;
@@ -198,7 +210,7 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
}
}
vma->busy = true;
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
if (ret == 0) {
@@ -207,11 +219,11 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return 0;
}
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
vma->busy = false;
nvkm_vmm_unmap_region(vmm, vma);
fail:
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
nvkm_memory_unref(&memory);
return ret;
}
@@ -232,7 +244,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
vma = nvkm_vmm_node_search(vmm, args->v0.addr);
if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
@@ -248,7 +260,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
nvkm_vmm_put_locked(vmm, vma);
ret = 0;
done:
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
return ret;
}
@@ -275,10 +287,10 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
page, align, size, &vma);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
if (ret)
return ret;
@@ -314,6 +326,168 @@ nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return 0;
}
+static inline int
+nvkm_uvmm_page_index(struct nvkm_uvmm *uvmm, u64 size, u8 shift, u8 *refd)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ const struct nvkm_vmm_page *page;
+
+ if (likely(shift)) {
+ for (page = vmm->func->page; page->shift; page++) {
+ if (shift == page->shift)
+ break;
+ }
+
+ if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
+ VMM_DEBUG(vmm, "page %d %016llx", shift, size);
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+ *refd = page - vmm->func->page;
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_get(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ return nvkm_vmm_raw_get(vmm, args->addr, args->size, refd);
+}
+
+static int
+nvkm_uvmm_mthd_raw_put(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_raw_put(vmm, args->addr, args->size, refd);
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_map(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma vma = {
+ .addr = args->addr,
+ .size = args->size,
+ .used = true,
+ .mapref = false,
+ .no_comp = true,
+ };
+ struct nvkm_memory *memory;
+ void *argv = (void *)(uintptr_t)args->argv;
+ unsigned int argc = args->argc;
+ u64 handle = args->memory;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ vma.page = vma.refd = refd;
+
+ memory = nvkm_umem_search(client, args->memory);
+ if (IS_ERR(memory)) {
+ VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
+ return PTR_ERR(memory);
+ }
+
+ ret = nvkm_memory_map(memory, args->offset, vmm, &vma, argv, argc);
+
+ nvkm_memory_unref(&vma.memory);
+ nvkm_memory_unref(&memory);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_raw_unmap(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_raw_unmap(vmm, args->addr, args->size,
+ args->sparse, refd);
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_sparse(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ return nvkm_vmm_raw_sparse(vmm, args->addr, args->size, args->ref);
+}
+
+static int
+nvkm_uvmm_mthd_raw(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_raw_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if (!uvmm->vmm->managed.raw)
+ return -EINVAL;
+
+ if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true)))
+ return ret;
+
+ switch (args->v0.op) {
+ case NVIF_VMM_RAW_V0_GET:
+ return nvkm_uvmm_mthd_raw_get(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_PUT:
+ return nvkm_uvmm_mthd_raw_put(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_MAP:
+ return nvkm_uvmm_mthd_raw_map(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_UNMAP:
+ return nvkm_uvmm_mthd_raw_unmap(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_SPARSE:
+ return nvkm_uvmm_mthd_raw_sparse(uvmm, &args->v0);
+ default:
+ return -EINVAL;
+ };
+}
+
static int
nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
@@ -326,6 +500,7 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
+ case NVIF_VMM_V0_RAW : return nvkm_uvmm_mthd_raw (uvmm, argv, argc);
case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
if (uvmm->vmm->func->mthd) {
return uvmm->vmm->func->mthd(uvmm->vmm,
@@ -366,10 +541,11 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_uvmm *uvmm;
int ret = -ENOSYS;
u64 addr, size;
- bool managed;
+ bool managed, raw;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
- managed = args->v0.managed != 0;
+ managed = args->v0.type == NVIF_VMM_V0_TYPE_MANAGED;
+ raw = args->v0.type == NVIF_VMM_V0_TYPE_RAW;
addr = args->v0.addr;
size = args->v0.size;
} else
@@ -377,12 +553,13 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
return -ENOMEM;
+
nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
*pobject = &uvmm->object;
if (!mmu->vmm) {
- ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
- NULL, "user", &uvmm->vmm);
+ ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size,
+ argv, argc, NULL, "user", &uvmm->vmm);
if (ret)
return ret;
@@ -393,6 +570,7 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
}
+ uvmm->vmm->managed.raw = raw;
page = uvmm->vmm->func->page;
args->v0.page_nr = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index ae793f400ba1..eb5fcadcb39a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -677,40 +677,17 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
}
static void
-nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
- u64 addr, u64 size, bool sparse, bool pfn)
-{
- const struct nvkm_vmm_desc_func *func = page->desc->func;
- nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
- false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
- sparse ? func->sparse : func->invalid ? func->invalid :
- func->unmap);
-}
-
-static int
-nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
- u64 addr, u64 size, struct nvkm_vmm_map *map,
- nvkm_vmm_pte_func func)
-{
- u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
- false, nvkm_vmm_ref_ptes, func, map, NULL);
- if (fail != ~0ULL) {
- if ((size = fail - addr))
- nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void
nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
+
+ mutex_lock(&vmm->mutex.map);
nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
NULL, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
+ mutex_unlock(&vmm->mutex.map);
}
static void
@@ -718,33 +695,108 @@ nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
+ mutex_lock(&vmm->mutex.map);
nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
NULL, func, map, NULL);
+ mutex_unlock(&vmm->mutex.map);
}
static void
-nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
- u64 addr, u64 size)
+nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
{
nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
nvkm_vmm_unref_ptes, NULL, NULL, NULL);
}
+static void
+nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ mutex_lock(&vmm->mutex.ref);
+ nvkm_vmm_ptes_put_locked(vmm, page, addr, size);
+ mutex_unlock(&vmm->mutex.ref);
+}
+
static int
nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
- u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
- nvkm_vmm_ref_ptes, NULL, NULL, NULL);
+ u64 fail;
+
+ mutex_lock(&vmm->mutex.ref);
+ fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
+ nvkm_vmm_ref_ptes, NULL, NULL, NULL);
if (fail != ~0ULL) {
if (fail != addr)
- nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
+ nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr);
+ mutex_unlock(&vmm->mutex.ref);
+ return -ENOMEM;
+ }
+ mutex_unlock(&vmm->mutex.ref);
+ return 0;
+}
+
+static void
+__nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse, bool pfn)
+{
+ const struct nvkm_vmm_desc_func *func = page->desc->func;
+
+ nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
+ false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
+ sparse ? func->sparse : func->invalid ? func->invalid :
+ func->unmap);
+}
+
+static void
+nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse, bool pfn)
+{
+ if (vmm->managed.raw) {
+ nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
+ nvkm_vmm_ptes_put(vmm, page, addr, size);
+ } else {
+ __nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
+ }
+}
+
+static int
+__nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
+ false, nvkm_vmm_ref_ptes, func, map, NULL);
+ if (fail != ~0ULL) {
+ if ((size = fail - addr))
+ nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
return -ENOMEM;
}
return 0;
}
-static inline struct nvkm_vma *
+static int
+nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ int ret;
+
+ if (vmm->managed.raw) {
+ ret = nvkm_vmm_ptes_get(vmm, page, addr, size);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
+
+ return 0;
+ } else {
+ return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
+ }
+}
+
+struct nvkm_vma *
nvkm_vma_new(u64 addr, u64 size)
{
struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
@@ -1045,7 +1097,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
vmm->debug = mmu->subdev.debug;
kref_init(&vmm->kref);
- __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
+ __mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key);
+ mutex_init(&vmm->mutex.ref);
+ mutex_init(&vmm->mutex.map);
/* Locate the smallest page size supported by the backend, it will
* have the deepest nesting of page tables.
@@ -1101,6 +1155,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
return ret;
+ vmm->managed.p.addr = 0;
+ vmm->managed.p.size = addr;
+
/* NVKM-managed area. */
if (size) {
if (!(vma = nvkm_vma_new(addr, size)))
@@ -1114,6 +1171,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
size = vmm->limit - addr;
if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
return ret;
+
+ vmm->managed.n.addr = addr;
+ vmm->managed.n.size = size;
} else {
/* Address-space fully managed by NVKM, requiring calls to
* nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
@@ -1362,9 +1422,9 @@ void
nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
if (vma->memory) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
nvkm_vmm_unmap_locked(vmm, vma, false);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
}
}
@@ -1423,6 +1483,8 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
nvkm_vmm_pte_func func;
int ret;
+ map->no_comp = vma->no_comp;
+
/* Make sure we won't overrun the end of the memory object. */
if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
@@ -1507,10 +1569,15 @@ nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
int ret;
- mutex_lock(&vmm->mutex);
+
+ if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
+ vmm->managed.raw)
+ return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
+
+ mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
vma->busy = false;
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
return ret;
}
@@ -1620,9 +1687,9 @@ nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
{
struct nvkm_vma *vma = *pvma;
if (vma) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
nvkm_vmm_put_locked(vmm, vma);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
*pvma = NULL;
}
}
@@ -1769,9 +1836,49 @@ int
nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
{
int ret;
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
+ return ret;
+}
+
+void
+nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
+ bool sparse, u8 refd)
+{
+ const struct nvkm_vmm_page *page = &vmm->func->page[refd];
+
+ nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false);
+}
+
+void
+nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+
+ nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
+}
+
+int
+nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+
+ if (unlikely(!size))
+ return -EINVAL;
+
+ return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size);
+}
+
+int
+nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+ int ret;
+
+ mutex_lock(&vmm->mutex.ref);
+ ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref);
+ mutex_unlock(&vmm->mutex.ref);
+
return ret;
}
@@ -1779,9 +1886,9 @@ void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
if (inst && vmm && vmm->func->part) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
vmm->func->part(vmm, inst);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
}
}
@@ -1790,9 +1897,9 @@ nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
int ret = 0;
if (vmm->func->join) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
ret = vmm->func->join(vmm, inst);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index f6188aa9171c..f9bc30cdb2b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -163,6 +163,7 @@ int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
u32 pd_header, bool managed, u64 addr, u64 size,
struct lock_class_key *, const char *name,
struct nvkm_vmm **);
+struct nvkm_vma *nvkm_vma_new(u64 addr, u64 size);
struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
u64 addr, u64 size);
@@ -173,6 +174,30 @@ void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
+int nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
+void nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
+void nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
+ bool sparse, u8 refd);
+int nvkm_vmm_raw_sparse(struct nvkm_vmm *, u64 addr, u64 size, bool ref);
+
+static inline bool
+nvkm_vmm_in_managed_range(struct nvkm_vmm *vmm, u64 start, u64 size)
+{
+ u64 p_start = vmm->managed.p.addr;
+ u64 p_end = p_start + vmm->managed.p.size;
+ u64 n_start = vmm->managed.n.addr;
+ u64 n_end = n_start + vmm->managed.n.size;
+ u64 end = start + size;
+
+ if (start >= p_start && end <= p_end)
+ return true;
+
+ if (start >= n_start && end <= n_end)
+ return true;
+
+ return false;
+}
+
#define NVKM_VMM_PFN_ADDR 0xfffffffffffff000ULL
#define NVKM_VMM_PFN_ADDR_SHIFT 12
#define NVKM_VMM_PFN_APER 0x00000000000000f0ULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index 5438384d9a67..5e857c02e9aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -287,15 +287,17 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
- ret = nvkm_memory_tags_get(memory, device, tags,
- nvkm_ltc_tags_clear,
- &map->tags);
- if (ret) {
- VMM_DEBUG(vmm, "comp %d", ret);
- return ret;
+ if (!map->no_comp) {
+ ret = nvkm_memory_tags_get(memory, device, tags,
+ nvkm_ltc_tags_clear,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
}
- if (map->tags->mn) {
+ if (!map->no_comp && map->tags->mn) {
u64 tags = map->tags->mn->offset + (map->offset >> 17);
if (page->shift == 17 || !gm20x) {
map->type |= tags << 44;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index 17899fc95b2d..f3630d0e0d55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -453,15 +453,17 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
- ret = nvkm_memory_tags_get(memory, device, tags,
- nvkm_ltc_tags_clear,
- &map->tags);
- if (ret) {
- VMM_DEBUG(vmm, "comp %d", ret);
- return ret;
+ if (!map->no_comp) {
+ ret = nvkm_memory_tags_get(memory, device, tags,
+ nvkm_ltc_tags_clear,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
}
- if (map->tags->mn) {
+ if (!map->no_comp && map->tags->mn) {
tags = map->tags->mn->offset + (map->offset >> 16);
map->ctag |= ((1ULL << page->shift) >> 16) << 36;
map->type |= tags << 36;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index b7548dcd72c7..ff08ad5005a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -296,19 +296,22 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
- ret = nvkm_memory_tags_get(memory, device, tags, NULL,
- &map->tags);
- if (ret) {
- VMM_DEBUG(vmm, "comp %d", ret);
- return ret;
- }
+ if (!map->no_comp) {
+ ret = nvkm_memory_tags_get(memory, device, tags, NULL,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
- if (map->tags->mn) {
- u32 tags = map->tags->mn->offset + (map->offset >> 16);
- map->ctag |= (u64)comp << 49;
- map->type |= (u64)comp << 47;
- map->type |= (u64)tags << 49;
- map->next |= map->ctag;
+ if (map->tags->mn) {
+ u32 tags = map->tags->mn->offset +
+ (map->offset >> 16);
+ map->ctag |= (u64)comp << 49;
+ map->type |= (u64)comp << 47;
+ map->type |= (u64)tags << 49;
+ map->next |= map->ctag;
+ }
}
}
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index dc010d87a9ef..88e80fe98112 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -118,6 +118,9 @@ struct st7789_panel_info {
u32 bus_format;
u32 bus_flags;
bool invert_mode;
+ bool partial_mode;
+ u16 partial_start;
+ u16 partial_end;
};
struct st7789v {
@@ -126,6 +129,7 @@ struct st7789v {
struct spi_device *spi;
struct gpio_desc *reset;
struct regulator *power;
+ enum drm_panel_orientation orientation;
};
enum st7789v_prefix {
@@ -275,6 +279,21 @@ static const struct drm_display_mode et028013dma_mode = {
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
+static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
+ .clock = 6000,
+ .hdisplay = 240,
+ .hsync_start = 240 + 28,
+ .hsync_end = 240 + 28 + 10,
+ .htotal = 240 + 28 + 10 + 10,
+ .vdisplay = 280,
+ .vsync_start = 280 + 8,
+ .vsync_end = 280 + 8 + 4,
+ .vtotal = 280 + 8 + 4 + 4,
+ .width_mm = 43,
+ .height_mm = 37,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
static const struct st7789_panel_info default_panel = {
.mode = &default_mode,
.invert_mode = true,
@@ -299,6 +318,17 @@ static const struct st7789_panel_info et028013dma_panel = {
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
};
+static const struct st7789_panel_info jt240mhqs_hwt_ek_e3_panel = {
+ .mode = &jt240mhqs_hwt_ek_e3_mode,
+ .invert_mode = true,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+ .partial_mode = true,
+ .partial_start = 38,
+ .partial_end = 318,
+};
+
static int st7789v_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -325,15 +355,33 @@ static int st7789v_get_modes(struct drm_panel *panel,
drm_display_info_set_bus_formats(&connector->display_info,
&ctx->info->bus_format, 1);
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
+ drm_connector_set_panel_orientation(connector, ctx->orientation);
+
return 1;
}
+static enum drm_panel_orientation st7789v_get_orientation(struct drm_panel *p)
+{
+ struct st7789v *ctx = panel_to_st7789v(p);
+
+ return ctx->orientation;
+}
+
static int st7789v_prepare(struct drm_panel *panel)
{
struct st7789v *ctx = panel_to_st7789v(panel);
- u8 pixel_fmt, polarity;
+ u8 mode, pixel_fmt, polarity;
int ret;
+ if (!ctx->info->partial_mode)
+ mode = ST7789V_RGBCTRL_WO;
+ else
+ mode = 0;
+
switch (ctx->info->bus_format) {
case MEDIA_BUS_FMT_RGB666_1X18:
pixel_fmt = MIPI_DCS_PIXEL_FMT_18BIT;
@@ -473,6 +521,37 @@ static int st7789v_prepare(struct drm_panel *panel)
MIPI_DCS_EXIT_INVERT_MODE));
}
+ if (ctx->info->partial_mode) {
+ u8 area_data[4] = {
+ (ctx->info->partial_start >> 8) & 0xff,
+ (ctx->info->partial_start >> 0) & 0xff,
+ ((ctx->info->partial_end - 1) >> 8) & 0xff,
+ ((ctx->info->partial_end - 1) >> 0) & 0xff,
+ };
+
+ /* Caution: if userspace ever pushes a mode different from the
+ * expected one (i.e., the one advertised by get_modes), we'll
+ * add margins.
+ */
+
+ ST7789V_TEST(ret, st7789v_write_command(
+ ctx, MIPI_DCS_ENTER_PARTIAL_MODE));
+
+ ST7789V_TEST(ret, st7789v_write_command(
+ ctx, MIPI_DCS_SET_PAGE_ADDRESS));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[0]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[1]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[2]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[3]));
+
+ ST7789V_TEST(ret, st7789v_write_command(
+ ctx, MIPI_DCS_SET_PARTIAL_ROWS));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[0]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[1]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[2]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[3]));
+ }
+
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RAMCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_DM_RGB |
ST7789V_RAMCTRL_RM_RGB));
@@ -480,7 +559,7 @@ static int st7789v_prepare(struct drm_panel *panel)
ST7789V_RAMCTRL_MAGIC));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RGBCTRL_CMD));
- ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_WO |
+ ST7789V_TEST(ret, st7789v_write_data(ctx, mode |
ST7789V_RGBCTRL_RCM(2) |
polarity));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_VBP(8)));
@@ -519,11 +598,12 @@ static int st7789v_unprepare(struct drm_panel *panel)
}
static const struct drm_panel_funcs st7789v_drm_funcs = {
- .disable = st7789v_disable,
- .enable = st7789v_enable,
- .get_modes = st7789v_get_modes,
- .prepare = st7789v_prepare,
- .unprepare = st7789v_unprepare,
+ .disable = st7789v_disable,
+ .enable = st7789v_enable,
+ .get_modes = st7789v_get_modes,
+ .get_orientation = st7789v_get_orientation,
+ .prepare = st7789v_prepare,
+ .unprepare = st7789v_unprepare,
};
static int st7789v_probe(struct spi_device *spi)
@@ -563,6 +643,8 @@ static int st7789v_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
+ of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+
drm_panel_add(&ctx->panel);
return 0;
@@ -579,6 +661,7 @@ static const struct spi_device_id st7789v_spi_id[] = {
{ "st7789v", (unsigned long) &default_panel },
{ "t28cp45tn89-v17", (unsigned long) &t28cp45tn89_panel },
{ "et028013dma", (unsigned long) &et028013dma_panel },
+ { "jt240mhqs-hwt-ek-e3", (unsigned long) &jt240mhqs_hwt_ek_e3_panel },
{ }
};
MODULE_DEVICE_TABLE(spi, st7789v_spi_id);
@@ -587,6 +670,8 @@ static const struct of_device_id st7789v_of_match[] = {
{ .compatible = "sitronix,st7789v", .data = &default_panel },
{ .compatible = "inanbo,t28cp45tn89-v17", .data = &t28cp45tn89_panel },
{ .compatible = "edt,et028013dma", .data = &et028013dma_panel },
+ { .compatible = "jasonic,jt240mhqs-hwt-ek-e3",
+ .data = &jt240mhqs_hwt_ek_e3_panel },
{ }
};
MODULE_DEVICE_TABLE(of, st7789v_of_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index dbc597ab46fb..a8b4827dc425 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -720,6 +720,22 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
if (dma_fence_is_signaled(job->done_fence))
return DRM_GPU_SCHED_STAT_NOMINAL;
+ /*
+ * Panfrost IRQ handler may take a long time to process an interrupt
+ * if there is another IRQ handler hogging the processing.
+ * For example, the HDMI encoder driver might be stuck in the IRQ
+ * handler for a significant time in a case of bad cable connection.
+ * In order to catch such cases and not report spurious Panfrost
+ * job timeouts, synchronize the IRQ handler and re-check the fence
+ * status.
+ */
+ synchronize_irq(pfdev->js->irq);
+
+ if (dma_fence_is_signaled(job->done_fence)) {
+ dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
+
dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js,
job_read(pfdev, JS_CONFIG(js)),
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
index 1dd722fe8631..dee530e4c8b2 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
@@ -696,6 +696,10 @@ static int rcar_du_probe(struct platform_device *pdev)
/* DRM/KMS objects */
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
+ /*
+ * Don't use dev_err_probe(), as it would overwrite the probe
+ * deferral reason recorded in rcar_du_modeset_init().
+ */
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"failed to initialize DRM/KMS (%d)\n", ret);
@@ -710,7 +714,7 @@ static int rcar_du_probe(struct platform_device *pdev)
if (ret)
goto error;
- DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
+ drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev));
drm_fbdev_generic_setup(&rcdu->ddev, 32);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
index 9ff4537c26c8..70d8ad065bfa 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
@@ -935,7 +935,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
/* Initialize the Color Management Modules. */
ret = rcar_du_cmm_init(rcdu);
if (ret)
- return ret;
+ return dev_err_probe(rcdu->dev, ret,
+ "failed to initialize CMM\n");
/* Create the CRTCs. */
for (swindex = 0, hwindex = 0; swindex < rcdu->num_crtcs; ++hwindex) {
@@ -955,7 +956,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
/* Initialize the encoders. */
ret = rcar_du_encoders_init(rcdu);
if (ret < 0)
- return ret;
+ return dev_err_probe(rcdu->dev, ret,
+ "failed to initialize encoders\n");
if (ret == 0) {
dev_err(rcdu->dev, "error: no encoder could be initialized\n");
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c
index 9cbb5e6e2cba..7aa0373563a4 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c
@@ -123,6 +123,8 @@ static const u32 rcar_du_vsp_formats[] = {
DRM_FORMAT_RGB888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_UYVY,
@@ -155,6 +157,8 @@ static const u32 rcar_du_vsp_formats_gen4[] = {
DRM_FORMAT_RGB888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGBX1010102,
@@ -177,6 +181,41 @@ static const u32 rcar_du_vsp_formats_gen4[] = {
DRM_FORMAT_Y212,
};
+static u32 rcar_du_vsp_state_get_format(struct rcar_du_vsp_plane_state *state)
+{
+ u32 fourcc = state->format->fourcc;
+
+ if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ fourcc = DRM_FORMAT_XRGB1555;
+ break;
+
+ case DRM_FORMAT_ARGB4444:
+ fourcc = DRM_FORMAT_XRGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB8888:
+ fourcc = DRM_FORMAT_XRGB8888;
+ break;
+
+ case DRM_FORMAT_ABGR8888:
+ fourcc = DRM_FORMAT_XBGR8888;
+ break;
+
+ case DRM_FORMAT_BGRA8888:
+ fourcc = DRM_FORMAT_BGRX8888;
+ break;
+
+ case DRM_FORMAT_RGBA1010102:
+ fourcc = DRM_FORMAT_RGBX1010102;
+ break;
+ }
+ }
+
+ return fourcc;
+}
+
static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
{
struct rcar_du_vsp_plane_state *state =
@@ -190,7 +229,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
.alpha = state->state.alpha >> 8,
.zpos = state->state.zpos,
};
- u32 fourcc = state->format->fourcc;
+ u32 fourcc = rcar_du_vsp_state_get_format(state);
unsigned int i;
cfg.src.left = state->state.src.x1 >> 16;
@@ -207,22 +246,6 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
- if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
- switch (fourcc) {
- case DRM_FORMAT_ARGB1555:
- fourcc = DRM_FORMAT_XRGB1555;
- break;
-
- case DRM_FORMAT_ARGB4444:
- fourcc = DRM_FORMAT_XRGB4444;
- break;
-
- case DRM_FORMAT_ARGB8888:
- fourcc = DRM_FORMAT_XRGB8888;
- break;
- }
- }
-
format = rcar_du_format_info(fourcc);
cfg.pixelformat = format->v4l2;
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 586c5c4ebb14..2dba7c5ffd2c 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -1001,7 +1001,6 @@ static int rcar_mipi_dsi_get_clocks(struct rcar_mipi_dsi *dsi)
static int rcar_mipi_dsi_probe(struct platform_device *pdev)
{
struct rcar_mipi_dsi *dsi;
- struct resource *mem;
int ret;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
@@ -1018,8 +1017,7 @@ static int rcar_mipi_dsi_probe(struct platform_device *pdev)
return ret;
/* Acquire resources. */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->mmio = devm_ioremap_resource(dsi->dev, mem);
+ dsi->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->mmio))
return PTR_ERR(dsi->mmio);
diff --git a/drivers/gpu/drm/tests/drm_exec_test.c b/drivers/gpu/drm/tests/drm_exec_test.c
index f79c9f0359aa..563949d777dd 100644
--- a/drivers/gpu/drm/tests/drm_exec_test.c
+++ b/drivers/gpu/drm/tests/drm_exec_test.c
@@ -125,8 +125,6 @@ static void test_duplicates(struct kunit *test)
drm_exec_fini(&exec);
}
-
-
static void test_prepare(struct kunit *test)
{
struct drm_exec_priv *priv = test->priv;
@@ -145,6 +143,8 @@ static void test_prepare(struct kunit *test)
break;
}
drm_exec_fini(&exec);
+
+ drm_gem_private_object_fini(&gobj);
}
static void test_prepare_array(struct kunit *test)
@@ -165,6 +165,29 @@ static void test_prepare_array(struct kunit *test)
1);
KUNIT_EXPECT_EQ(test, ret, 0);
drm_exec_fini(&exec);
+
+ drm_gem_private_object_fini(&gobj1);
+ drm_gem_private_object_fini(&gobj2);
+}
+
+static void test_multiple_loops(struct kunit *test)
+{
+ struct drm_exec exec;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec)
+ {
+ break;
+ }
+ drm_exec_fini(&exec);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec)
+ {
+ break;
+ }
+ drm_exec_fini(&exec);
+ KUNIT_SUCCEED(test);
}
static struct kunit_case drm_exec_tests[] = {
@@ -174,6 +197,7 @@ static struct kunit_case drm_exec_tests[] = {
KUNIT_CASE(test_duplicates),
KUNIT_CASE(test_prepare),
KUNIT_CASE(test_prepare_array),
+ KUNIT_CASE(test_multiple_loops),
{}
};
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index f906b22959cf..dad298127226 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -8,3 +8,4 @@ ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/ttm/tests/.kunitconfig b/drivers/gpu/drm/ttm/tests/.kunitconfig
new file mode 100644
index 000000000000..75fdce0cd98e
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_KUNIT_TEST_HELPERS=y
+CONFIG_DRM_TTM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/ttm/tests/Makefile b/drivers/gpu/drm/ttm/tests/Makefile
new file mode 100644
index 000000000000..ec87c4fc1ad5
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 AND MIT
+
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
+ ttm_device_test.o \
+ ttm_pool_test.o \
+ ttm_kunit_helpers.o
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
new file mode 100644
index 000000000000..b1b423b68cdf
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_kunit_helpers.h"
+
+struct ttm_device_test_case {
+ const char *description;
+ bool use_dma_alloc;
+ bool use_dma32;
+ bool pools_init_expected;
+};
+
+static void ttm_device_init_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource_manager *ttm_sys_man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+
+ ttm_sys_man = &ttm_dev->sysman;
+ KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man);
+ KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt);
+ KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping,
+ priv->drm->anon_inode->i_mapping);
+
+ ttm_device_fini(ttm_dev);
+}
+
+static void ttm_device_init_multiple(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_devs;
+ unsigned int i, num_dev = 3;
+ int err;
+
+ ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
+
+ for (i = 0; i < num_dev; i++) {
+ err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
+ priv->drm->anon_inode->i_mapping);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq);
+ KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]);
+ }
+
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev);
+
+ for (i = 0; i < num_dev; i++)
+ ttm_device_fini(&ttm_devs[i]);
+}
+
+static void ttm_device_fini_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource_manager *man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ ttm_device_fini(ttm_dev);
+
+ KUNIT_ASSERT_FALSE(test, man->use_type);
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0]));
+ KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+}
+
+static void ttm_device_init_no_vma_man(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct drm_device *drm = priv->drm;
+ struct ttm_device *ttm_dev;
+ struct drm_vma_offset_manager *vma_man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ /* Let's pretend there's no VMA manager allocated */
+ vma_man = drm->vma_offset_manager;
+ drm->vma_offset_manager = NULL;
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+
+ /* Bring the manager back for a graceful cleanup */
+ drm->vma_offset_manager = vma_man;
+}
+
+static const struct ttm_device_test_case ttm_device_cases[] = {
+ {
+ .description = "No DMA allocations, no DMA32 required",
+ .use_dma_alloc = false,
+ .use_dma32 = false,
+ .pools_init_expected = false,
+ },
+ {
+ .description = "DMA allocations, DMA32 required",
+ .use_dma_alloc = true,
+ .use_dma32 = true,
+ .pools_init_expected = true,
+ },
+ {
+ .description = "No DMA allocations, DMA32 required",
+ .use_dma_alloc = false,
+ .use_dma32 = true,
+ .pools_init_expected = false,
+ },
+ {
+ .description = "DMA allocations, no DMA32 required",
+ .use_dma_alloc = true,
+ .use_dma32 = false,
+ .pools_init_expected = true,
+ },
+};
+
+static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc);
+
+static void ttm_device_init_pools(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ const struct ttm_device_test_case *params = test->param_value;
+ struct ttm_device *ttm_dev;
+ struct ttm_pool *pool;
+ struct ttm_pool_type pt;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev,
+ params->use_dma_alloc,
+ params->use_dma32);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ pool = &ttm_dev->pool;
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+ KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
+ KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
+ KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
+
+ if (params->pools_init_expected) {
+ for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (int j = 0; j <= MAX_ORDER; ++j) {
+ pt = pool->caching[i].orders[j];
+ KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
+ KUNIT_EXPECT_EQ(test, pt.caching, i);
+ KUNIT_EXPECT_EQ(test, pt.order, j);
+
+ if (params->use_dma_alloc)
+ KUNIT_ASSERT_FALSE(test,
+ list_empty(&pt.pages));
+ }
+ }
+ }
+
+ ttm_device_fini(ttm_dev);
+}
+
+static struct kunit_case ttm_device_test_cases[] = {
+ KUNIT_CASE(ttm_device_init_basic),
+ KUNIT_CASE(ttm_device_init_multiple),
+ KUNIT_CASE(ttm_device_fini_basic),
+ KUNIT_CASE(ttm_device_init_no_vma_man),
+ KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params),
+ {}
+};
+
+static struct kunit_suite ttm_device_test_suite = {
+ .name = "ttm_device",
+ .init = ttm_test_devices_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_device_test_cases,
+};
+
+kunit_test_suites(&ttm_device_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
new file mode 100644
index 000000000000..81661d8827aa
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include "ttm_kunit_helpers.h"
+
+struct ttm_device_funcs ttm_dev_funcs = {
+};
+EXPORT_SYMBOL_GPL(ttm_dev_funcs);
+
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ bool use_dma_alloc,
+ bool use_dma32)
+{
+ struct drm_device *drm = priv->drm;
+ int err;
+
+ err = ttm_device_init(ttm, &ttm_dev_funcs, drm->dev,
+ drm->anon_inode->i_mapping,
+ drm->vma_offset_manager,
+ use_dma_alloc, use_dma32);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
+
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+ struct ttm_test_devices *devs,
+ size_t size)
+{
+ struct drm_gem_object gem_obj = { .size = size };
+ struct ttm_buffer_object *bo;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ bo->base = gem_obj;
+ bo->bdev = devs->ttm_dev;
+
+ return bo;
+}
+EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
+{
+ struct ttm_test_devices *devs;
+
+ devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, devs);
+
+ devs->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
+
+ devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
+ sizeof(*devs->drm), 0,
+ DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
+
+ return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
+
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
+{
+ struct ttm_test_devices *devs;
+ struct ttm_device *ttm_dev;
+ int err;
+
+ devs = ttm_test_devices_basic(test);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(devs, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ devs->ttm_dev = ttm_dev;
+
+ return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_all);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
+{
+ if (devs->ttm_dev)
+ ttm_device_fini(devs->ttm_dev);
+
+ drm_kunit_helper_free_device(test, devs->dev);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_put);
+
+int ttm_test_devices_init(struct kunit *test)
+{
+ struct ttm_test_devices *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv = ttm_test_devices_basic(test);
+ test->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_init);
+
+void ttm_test_devices_fini(struct kunit *test)
+{
+ ttm_test_devices_put(test, test->priv);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
new file mode 100644
index 000000000000..e261e3660d0b
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#ifndef TTM_KUNIT_HELPERS_H
+#define TTM_KUNIT_HELPERS_H
+
+#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_bo.h>
+
+#include <drm/drm_kunit_helpers.h>
+#include <kunit/test.h>
+
+extern struct ttm_device_funcs ttm_dev_funcs;
+
+struct ttm_test_devices {
+ struct drm_device *drm;
+ struct device *dev;
+ struct ttm_device *ttm_dev;
+};
+
+/* Building blocks for test-specific init functions */
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ bool use_dma_alloc,
+ bool use_dma32);
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+ struct ttm_test_devices *devs,
+ size_t size);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs);
+
+/* Generic init/fini for tests that only need DRM/TTM devices */
+int ttm_test_devices_init(struct kunit *test);
+void ttm_test_devices_fini(struct kunit *test);
+
+#endif // TTM_KUNIT_HELPERS_H
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
new file mode 100644
index 000000000000..8d90870fb199
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/mm.h>
+
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_pool.h>
+
+#include "ttm_kunit_helpers.h"
+
+struct ttm_pool_test_case {
+ const char *description;
+ unsigned int order;
+ bool use_dma_alloc;
+};
+
+struct ttm_pool_test_priv {
+ struct ttm_test_devices *devs;
+
+ /* Used to create mock ttm_tts */
+ struct ttm_buffer_object *mock_bo;
+};
+
+static struct ttm_operation_ctx simple_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+};
+
+static int ttm_pool_test_init(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->devs = ttm_test_devices_basic(test);
+ test->priv = priv;
+
+ return 0;
+}
+
+static void ttm_pool_test_fini(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+
+ ttm_test_devices_put(test, priv->devs);
+}
+
+static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
+ uint32_t page_flags,
+ enum ttm_caching caching,
+ size_t size)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, priv->devs, size);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+ priv->mock_bo = bo;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ return tt;
+}
+
+static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
+ size_t size,
+ enum ttm_caching caching)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_pool *pool;
+ struct ttm_tt *tt;
+ unsigned long order = __fls(size / PAGE_SIZE);
+ int err;
+
+ tt = ttm_tt_kunit_init(test, order, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ return pool;
+}
+
+static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
+ {
+ .description = "One page",
+ .order = 0,
+ },
+ {
+ .description = "More than one page",
+ .order = 2,
+ },
+ {
+ .description = "Above the allocation limit",
+ .order = MAX_ORDER + 1,
+ },
+ {
+ .description = "One page, with coherent DMA mappings enabled",
+ .order = 0,
+ .use_dma_alloc = true,
+ },
+ {
+ .description = "Above the allocation limit, with coherent DMA mappings enabled",
+ .order = MAX_ORDER + 1,
+ .use_dma_alloc = true,
+ },
+};
+
+static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
+ ttm_pool_alloc_case_desc);
+
+static void ttm_pool_alloc_basic(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ const struct ttm_pool_test_case *params = test->param_value;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct page *fst_page, *last_page;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int expected_num_pages = 1 << params->order;
+ size_t size = expected_num_pages * PAGE_SIZE;
+ int err;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
+ false);
+
+ KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
+ KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
+ KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+ fst_page = tt->pages[0];
+ last_page = tt->pages[tt->num_pages - 1];
+
+ if (params->order <= MAX_ORDER) {
+ if (params->use_dma_alloc) {
+ KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
+ } else {
+ KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
+ }
+ } else {
+ if (params->use_dma_alloc) {
+ KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+ KUNIT_ASSERT_NULL(test, (void *)last_page->private);
+ } else {
+ /*
+ * We expect to alloc one big block, followed by
+ * order 0 blocks
+ */
+ KUNIT_ASSERT_EQ(test, fst_page->private,
+ min_t(unsigned int, MAX_ORDER,
+ params->order));
+ KUNIT_ASSERT_EQ(test, last_page->private, 0);
+ }
+ }
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ const struct ttm_pool_test_case *params = test->param_value;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_buffer_object *bo;
+ dma_addr_t dma1, dma2;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int expected_num_pages = 1 << params->order;
+ size_t size = expected_num_pages * PAGE_SIZE;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, devs, size);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ err = ttm_sg_tt_init(tt, bo, 0, caching);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+ dma1 = tt->dma_address[0];
+ dma2 = tt->dma_address[tt->num_pages - 1];
+
+ KUNIT_ASSERT_NOT_NULL(test, (void *)dma1);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)dma2);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_caching_match(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 0;
+ size_t size = PAGE_SIZE;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, size, caching);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt_pool, *pt_tt;
+ enum ttm_caching tt_caching = ttm_uncached;
+ enum ttm_caching pool_caching = ttm_cached;
+ size_t size = PAGE_SIZE;
+ unsigned int order = 0;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, size, pool_caching);
+
+ pt_pool = &pool->caching[pool_caching].orders[order];
+ pt_tt = &pool->caching[tt_caching].orders[order];
+
+ tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_mismatch(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt_pool, *pt_tt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t fst_size = (1 << order) * PAGE_SIZE;
+ size_t snd_size = PAGE_SIZE;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, fst_size, caching);
+
+ pt_pool = &pool->caching[caching].orders[order];
+ pt_tt = &pool->caching[caching].orders[0];
+
+ tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_dma_alloc(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t size = (1 << order) * PAGE_SIZE;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+ ttm_pool_alloc(pool, tt, &simple_ctx);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_no_dma_alloc(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t size = (1 << order) * PAGE_SIZE;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
+ ttm_pool_alloc(pool, tt, &simple_ctx);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_fini_basic(struct kunit *test)
+{
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 0;
+ size_t size = PAGE_SIZE;
+
+ pool = ttm_pool_pre_populated(test, size, caching);
+ pt = &pool->caching[caching].orders[order];
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ ttm_pool_fini(pool);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+}
+
+static struct kunit_case ttm_pool_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
+ KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
+ ttm_pool_alloc_basic_gen_params),
+ KUNIT_CASE(ttm_pool_alloc_order_caching_match),
+ KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
+ KUNIT_CASE(ttm_pool_alloc_order_mismatch),
+ KUNIT_CASE(ttm_pool_free_dma_alloc),
+ KUNIT_CASE(ttm_pool_free_no_dma_alloc),
+ KUNIT_CASE(ttm_pool_fini_basic),
+ {}
+};
+
+static struct kunit_suite ttm_pool_test_suite = {
+ .name = "ttm_pool",
+ .init = ttm_pool_test_init,
+ .exit = ttm_pool_test_fini,
+ .test_cases = ttm_pool_test_cases,
+};
+
+kunit_test_suites(&ttm_pool_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 984aa8f0a542..0bb56d063536 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -259,7 +259,7 @@ static const struct of_device_id tve200_of_match[] = {
static struct platform_driver tve200_driver = {
.driver = {
.name = "tve200",
- .of_match_table = of_match_ptr(tve200_of_match),
+ .of_match_table = tve200_of_match,
},
.probe = tve200_probe,
.remove_new = tve200_remove,
diff --git a/drivers/media/test-drivers/vivid/Kconfig b/drivers/media/test-drivers/vivid/Kconfig
index 318799d317ba..5b08a5ad291e 100644
--- a/drivers/media/test-drivers/vivid/Kconfig
+++ b/drivers/media/test-drivers/vivid/Kconfig
@@ -3,11 +3,9 @@ config VIDEO_VIVID
tristate "Virtual Video Test Driver"
depends on VIDEO_DEV && !SPARC32 && !SPARC64 && FB
depends on HAS_DMA
+ select FB_IOMEM_HELPERS
select FONT_SUPPORT
select FONT_8x16
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
select VIDEO_V4L2_TPG
diff --git a/drivers/media/test-drivers/vivid/vivid-osd.c b/drivers/media/test-drivers/vivid/vivid-osd.c
index 051f1805a16d..5c931b94a7b5 100644
--- a/drivers/media/test-drivers/vivid/vivid-osd.c
+++ b/drivers/media/test-drivers/vivid/vivid-osd.c
@@ -246,12 +246,10 @@ static int vivid_fb_blank(int blank_mode, struct fb_info *info)
static const struct fb_ops vivid_fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = vivid_fb_check_var,
.fb_set_par = vivid_fb_set_par,
.fb_setcolreg = vivid_fb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_cursor = NULL,
.fb_ioctl = vivid_fb_ioctl,
.fb_pan_display = vivid_fb_pan_display,
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 5f8392b4f2a1..5d93ecc01f6a 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -64,9 +64,7 @@ config FB_MACMODES
config FB_GRVGA
tristate "Aeroflex Gaisler framebuffer support"
depends on FB && SPARC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
This enables support for the SVGACTRL framebuffer in the GRLIB IP library from Aeroflex Gaisler.
@@ -139,9 +137,7 @@ config FB_ARMCLCD
config FB_ACORN
bool "Acorn VIDC support"
depends on (FB = y) && ARM && ARCH_ACORN
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
This is the frame buffer device driver for the Acorn VIDC graphics
hardware found in Acorn RISC PCs and other ARM-based machines. If
@@ -179,9 +175,7 @@ config FB_IMX
depends on FB && HAVE_CLK && HAS_IOMEM
depends on ARCH_MXC || COMPILE_TEST
select LCD_CLASS_DEVICE
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
@@ -228,9 +222,7 @@ config FB_Q40
bool
depends on (FB = y) && Q40
default y
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
config FB_AMIGA
tristate "Amiga native chipset support"
@@ -271,9 +263,7 @@ config FB_AMIGA_AGA
config FB_FM2
bool "Amiga FrameMaster II/Rainbow II support"
depends on (FB = y) && ZORRO
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
This is the frame buffer device driver for the Amiga FrameMaster
card from BSC (exhibited 1992 but not shipped as a CBM product).
@@ -309,9 +299,7 @@ config FB_OF
depends on FB && PPC && (!PPC_PSERIES || PCI)
depends on !DRM_OFDRM
select APERTURE_HELPERS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select FB_MACMODES
help
Say Y if you want support with Open Firmware for your graphics
@@ -331,9 +319,7 @@ config FB_CONTROL
config FB_PLATINUM
bool "Apple \"platinum\" display support"
depends on (FB = y) && PPC_PMAC && PPC32
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select FB_MACMODES
help
This driver supports a frame buffer for the "platinum" graphics
@@ -342,9 +328,7 @@ config FB_PLATINUM
config FB_VALKYRIE
bool "Apple \"valkyrie\" display support"
depends on (FB = y) && (MAC || (PPC_PMAC && PPC32))
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select FB_MACMODES
help
This driver supports a frame buffer for the "valkyrie" graphics
@@ -353,9 +337,7 @@ config FB_VALKYRIE
config FB_CT65550
bool "Chips 65550 display support"
depends on (FB = y) && PPC32 && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Chips & Technologies
@@ -364,9 +346,7 @@ config FB_CT65550
config FB_ASILIANT
bool "Asiliant (Chips) 69000 display support"
depends on (FB = y) && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Asiliant 69030 chipset
@@ -419,9 +399,7 @@ config FB_STI
config FB_MAC
bool "Generic Macintosh display support"
depends on (FB = y) && MAC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select FB_MACMODES
config FB_HP300
@@ -458,9 +436,7 @@ config FB_UVESA
tristate "Userspace VESA VGA graphics support"
depends on FB && CONNECTOR
depends on !UML
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
help
This is the frame buffer driver for generic VBE 2.0 compliant
@@ -477,9 +453,7 @@ config FB_VESA
bool "VESA VGA graphics support"
depends on (FB = y) && X86
select APERTURE_HELPERS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select SYSFB
help
This is the frame buffer device driver for generic VESA 2.0
@@ -492,9 +466,7 @@ config FB_EFI
depends on (FB = y) && !IA64 && EFI
select APERTURE_HELPERS
select DRM_PANEL_ORIENTATION_QUIRKS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select SYSFB
help
This is the EFI frame buffer device driver. If the firmware on
@@ -639,9 +611,7 @@ config FB_XVR500
config FB_XVR2500
bool "Sun XVR-2500 3DLABS Wildcat support"
depends on (FB = y) && PCI && SPARC64
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This is the framebuffer device for the Sun XVR-2500 and similar
@@ -653,9 +623,7 @@ config FB_XVR2500
config FB_XVR1000
bool "Sun XVR-1000 support"
depends on (FB = y) && SPARC64
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
This is the framebuffer device for the Sun XVR-1000 and similar
graphics cards. The driver only works on sparc64 systems where
@@ -688,9 +656,7 @@ config FB_PVR2
config FB_OPENCORES
tristate "OpenCores VGA/LCD core 2.0 framebuffer support"
depends on FB && HAS_DMA
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
This enables support for the OpenCores VGA/LCD core.
@@ -717,9 +683,7 @@ config FB_ATMEL
depends on FB && OF && HAVE_CLK && HAS_IOMEM
depends on HAVE_FB_ATMEL || COMPILE_TEST
select FB_BACKLIGHT
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
@@ -823,10 +787,8 @@ config FB_RIVA_BACKLIGHT
config FB_I740
tristate "Intel740 support"
depends on FB && PCI
+ select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select VGASTATE
select VIDEO_NOMODESET
select FB_DDC
@@ -1104,10 +1066,8 @@ config FB_RADEON_DEBUG
config FB_ATY128
tristate "ATI Rage128 display support"
depends on FB && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select FB_BACKLIGHT if FB_ATY128_BACKLIGHT
+ select FB_IOMEM_HELPERS
select FB_MACMODES if PPC_PMAC
select VIDEO_NOMODESET
help
@@ -1326,9 +1286,7 @@ config FB_NEOMAGIC
config FB_KYRO
tristate "IMG Kyro support"
depends on FB && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Say Y here if you have a STG4000 / Kyro / PowerVR 3 based
@@ -1372,9 +1330,7 @@ config FB_VOODOO1
tristate "3Dfx Voodoo Graphics (sst1) support"
depends on FB && PCI
depends on FB_DEVICE
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
@@ -1457,9 +1413,7 @@ config FB_PM3
config FB_CARMINE
tristate "Fujitsu carmine frame buffer support"
depends on FB && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Fujitsu Carmine chip.
@@ -1551,9 +1505,7 @@ config FB_HIT
config FB_PMAG_AA
tristate "PMAG-AA TURBOchannel framebuffer support"
depends on FB && TC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Support for the PMAG-AA TURBOchannel framebuffer card (1280x1024x1)
used mainly in the MIPS-based DECstation series.
@@ -1561,9 +1513,7 @@ config FB_PMAG_AA
config FB_PMAG_BA
tristate "PMAG-BA TURBOchannel framebuffer support"
depends on FB && TC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Support for the PMAG-BA TURBOchannel framebuffer card (1024x864x8)
used mainly in the MIPS-based DECstation series.
@@ -1571,9 +1521,7 @@ config FB_PMAG_BA
config FB_PMAGB_B
tristate "PMAGB-B TURBOchannel framebuffer support"
depends on FB && TC
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Support for the PMAGB-B TURBOchannel framebuffer card used mainly
in the MIPS-based DECstation series. The card is currently only
@@ -1582,9 +1530,7 @@ config FB_PMAGB_B
config FB_MAXINE
bool "Maxine (Personal DECstation) onboard framebuffer support"
depends on (FB = y) && MACH_DECSTATION
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Support for the onboard framebuffer (1024x768x8) in the Personal
DECstation series (Personal DECstation 5000/20, /25, /33, /50,
@@ -1593,9 +1539,7 @@ config FB_MAXINE
config FB_G364
bool "G364 frame buffer support"
depends on (FB = y) && (MIPS_MAGNUM_4000 || OLIVETTI_M700)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
The G364 driver is the framebuffer used in MIPS Magnum 4000 and
Olivetti M700-10 systems.
@@ -1614,9 +1558,7 @@ config FB_PXA168
tristate "PXA168/910 LCD framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
depends on CPU_PXA168 || CPU_PXA910 || COMPILE_TEST
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Frame buffer driver for the built-in LCD controller in the Marvell
MMP processor.
@@ -1624,9 +1566,7 @@ config FB_PXA168
config FB_PXA
tristate "PXA LCD framebuffer support"
depends on FB && ARCH_PXA
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEOMODE_HELPERS if OF
select FB_MODE_HELPERS if OF
help
@@ -1677,10 +1617,8 @@ config PXA3XX_GCU
config FB_FSL_DIU
tristate "Freescale DIU framebuffer support"
depends on FB && FSL_SOC
+ select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select PPC_LIB_RHEAP
help
Framebuffer driver for the Freescale SoC DIU
@@ -1703,9 +1641,7 @@ config FB_S3C
tristate "Samsung S3C framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
depends on ARCH_S3C64XX || COMPILE_TEST
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Frame buffer driver for the built-in FB controller in the Samsung
SoC line such as the S3C6400 and S3C6410.
@@ -1774,9 +1710,7 @@ config FB_UDL
config FB_IBM_GXT4500
tristate "Framebuffer support for IBM GXT4000P/4500P/6000P/6500P adaptors"
depends on FB
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Say Y here to enable support for the IBM GXT4000P/6000P and
@@ -1808,9 +1742,7 @@ config FB_PS3_DEFAULT_SIZE_M
config FB_XILINX
tristate "Xilinx frame buffer support"
depends on FB && (MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Include support for the Xilinx ML300/ML403 reference design
framebuffer. ML300 carries a 640*480 LCD display on the board,
@@ -1820,9 +1752,7 @@ config FB_GOLDFISH
tristate "Goldfish Framebuffer"
depends on FB
depends on GOLDFISH || COMPILE_TEST
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Framebuffer driver for Goldfish Virtual Platform
@@ -1834,9 +1764,7 @@ config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Support for the SH7760/SH7763/SH7720/SH7721 integrated
(D)STN/TFT LCD Controller.
@@ -1849,10 +1777,8 @@ config FB_DA8XX
tristate "DA8xx/OMAP-L1xx/AM335x Framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
depends on ARCH_DAVINCI_DA8XX || SOC_AM33XX || COMPILE_TEST
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select FB_CFB_REV_PIXELS_IN_BYTE
+ select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
@@ -1907,9 +1833,7 @@ config FB_MB862XX
tristate "Fujitsu MB862xx GDC support"
depends on FB
depends on PCI || (OF && PPC)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
@@ -1967,9 +1891,7 @@ config FB_MX3
tristate "MX3 Framebuffer support"
depends on FB && MX3_IPU
select BACKLIGHT_CLASS_DEVICE
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
default y
help
This is a framebuffer device for the i.MX31 LCD Controller. So
@@ -2003,9 +1925,7 @@ config FB_SIMPLE
depends on FB
depends on !DRM_SIMPLEDRM
select APERTURE_HELPERS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Say Y if you want support for a simple frame-buffer.
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 8fec21dfca09..163d2c9f951c 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -605,13 +605,11 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
static const struct fb_ops acornfb_ops = {
.owner = THIS_MODULE,
+ FB_IOMEM_DEFAULT_OPS,
.fb_check_var = acornfb_check_var,
.fb_set_par = acornfb_set_par,
.fb_setcolreg = acornfb_setcolreg,
.fb_pan_display = acornfb_pan_display,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
/*
diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
index 29c232809d5e..bf3c116684dc 100644
--- a/drivers/video/fbdev/asiliantfb.c
+++ b/drivers/video/fbdev/asiliantfb.c
@@ -98,12 +98,10 @@ static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
static const struct fb_ops asiliantfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = asiliantfb_check_var,
.fb_set_par = asiliantfb_set_par,
.fb_setcolreg = asiliantfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
/* Calculate the ratios for the dot clocks without using a single long long
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 3021660b3e87..c75a62287ec4 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -806,14 +806,12 @@ static int atmel_lcdfb_blank(int blank_mode, struct fb_info *info)
static const struct fb_ops atmel_lcdfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = atmel_lcdfb_check_var,
.fb_set_par = atmel_lcdfb_set_par,
.fb_setcolreg = atmel_lcdfb_setcolreg,
.fb_blank = atmel_lcdfb_blank,
.fb_pan_display = atmel_lcdfb_pan_display,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static irqreturn_t atmel_lcdfb_interrupt(int irq, void *dev_id)
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index b44fc78ccd4f..f4de11f19235 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -504,6 +504,7 @@ static void aty128_bl_set_power(struct fb_info *info, int power);
static const struct fb_ops aty128fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = aty128fb_check_var,
.fb_set_par = aty128fb_set_par,
.fb_setcolreg = aty128fb_setcolreg,
@@ -511,9 +512,6 @@ static const struct fb_ops aty128fb_ops = {
.fb_blank = aty128fb_blank,
.fb_ioctl = aty128fb_ioctl,
.fb_sync = aty128fb_sync,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
/*
diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c
index 33a03f4ae025..e56065cdba97 100644
--- a/drivers/video/fbdev/carminefb.c
+++ b/drivers/video/fbdev/carminefb.c
@@ -530,10 +530,7 @@ static int init_hardware(struct carmine_hw *hw)
static const struct fb_ops carminefb_ops = {
.owner = THIS_MODULE,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = carmine_check_var,
.fb_set_par = carmine_set_par,
.fb_setcolreg = carmine_setcolreg,
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index d5f43454ccd7..b80711f13df8 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -82,13 +82,11 @@ static int chipsfb_blank(int blank, struct fb_info *info);
static const struct fb_ops chipsfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = chipsfb_check_var,
.fb_set_par = chipsfb_set_par,
.fb_setcolreg = chipsfb_setcolreg,
.fb_blank = chipsfb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static int chipsfb_check_var(struct fb_var_screeninfo *var,
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 988dedcf6be8..4ca70a1bdd3b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1295,14 +1295,12 @@ static int da8xxfb_set_par(struct fb_info *info)
static const struct fb_ops da8xx_fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = fb_check_var,
.fb_set_par = da8xxfb_set_par,
.fb_setcolreg = fb_setcolreg,
.fb_pan_display = da8xx_pan_display,
.fb_ioctl = fb_ioctl,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_blank = cfb_blank,
};
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 3391c8e84210..f9b4ddd592ce 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -277,11 +277,9 @@ static void efifb_destroy(struct fb_info *info)
static const struct fb_ops efifb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_destroy = efifb_destroy,
.fb_setcolreg = efifb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static int efifb_setup(char *options)
diff --git a/drivers/video/fbdev/fm2fb.c b/drivers/video/fbdev/fm2fb.c
index 4dcb9dd79bf8..25d2e716edf2 100644
--- a/drivers/video/fbdev/fm2fb.c
+++ b/drivers/video/fbdev/fm2fb.c
@@ -167,11 +167,9 @@ static int fm2fb_blank(int blank, struct fb_info *info);
static const struct fb_ops fm2fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = fm2fb_setcolreg,
.fb_blank = fm2fb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
/*
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index c62b48f27ba9..7fbd9f069ac2 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1451,13 +1451,11 @@ static int fsl_diu_release(struct fb_info *info, int user)
static const struct fb_ops fsl_diu_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = fsl_diu_check_var,
.fb_set_par = fsl_diu_set_par,
.fb_setcolreg = fsl_diu_setcolreg,
.fb_pan_display = fsl_diu_pan_display,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_ioctl = fsl_diu_ioctl,
.fb_open = fsl_diu_open,
.fb_release = fsl_diu_release,
diff --git a/drivers/video/fbdev/g364fb.c b/drivers/video/fbdev/g364fb.c
index 0825cbde116e..7a1013b22fa7 100644
--- a/drivers/video/fbdev/g364fb.c
+++ b/drivers/video/fbdev/g364fb.c
@@ -112,12 +112,10 @@ static int g364fb_blank(int blank, struct fb_info *info);
static const struct fb_ops g364fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_HELPERS,
.fb_setcolreg = g364fb_setcolreg,
.fb_pan_display = g364fb_pan_display,
.fb_blank = g364fb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
/*
diff --git a/drivers/video/fbdev/geode/Kconfig b/drivers/video/fbdev/geode/Kconfig
index b184085a78c2..9a49916e0492 100644
--- a/drivers/video/fbdev/geode/Kconfig
+++ b/drivers/video/fbdev/geode/Kconfig
@@ -13,9 +13,7 @@ config FB_GEODE
config FB_GEODE_LX
tristate "AMD Geode LX framebuffer support"
depends on FB && FB_GEODE
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
@@ -29,9 +27,7 @@ config FB_GEODE_LX
config FB_GEODE_GX
tristate "AMD Geode GX framebuffer support"
depends on FB && FB_GEODE
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
@@ -45,9 +41,7 @@ config FB_GEODE_GX
config FB_GEODE_GX1
tristate "AMD Geode GX1 framebuffer support"
depends on FB && FB_GEODE
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
diff --git a/drivers/video/fbdev/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index ddec35e3bbeb..a1919c1934ac 100644
--- a/drivers/video/fbdev/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
@@ -255,14 +255,11 @@ static int parse_panel_option(struct fb_info *info)
static const struct fb_ops gx1fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = gx1fb_check_var,
.fb_set_par = gx1fb_set_par,
.fb_setcolreg = gx1fb_setcolreg,
.fb_blank = gx1fb_blank,
- /* No HW acceleration for now. */
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static struct fb_info *gx1fb_init_fbinfo(struct device *dev)
diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index 4fb13790c528..af996634c1a9 100644
--- a/drivers/video/fbdev/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
@@ -268,14 +268,11 @@ static int gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
static const struct fb_ops gxfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = gxfb_check_var,
.fb_set_par = gxfb_set_par,
.fb_setcolreg = gxfb_setcolreg,
.fb_blank = gxfb_blank,
- /* No HW acceleration for now. */
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static struct fb_info *gxfb_init_fbinfo(struct device *dev)
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index b70b286f43e4..cad99f5b7fe8 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -392,14 +392,11 @@ static int lxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
static const struct fb_ops lxfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = lxfb_check_var,
.fb_set_par = lxfb_set_par,
.fb_setcolreg = lxfb_setcolreg,
.fb_blank = lxfb_blank,
- /* No HW acceleration for now. */
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static struct fb_info *lxfb_init_fbinfo(struct device *dev)
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index ef2528c3faa9..60c8a20d6fcd 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -162,14 +162,12 @@ static int goldfish_fb_blank(int blank, struct fb_info *info)
static const struct fb_ops goldfish_fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = goldfish_fb_check_var,
.fb_set_par = goldfish_fb_set_par,
.fb_setcolreg = goldfish_fb_setcolreg,
.fb_pan_display = goldfish_fb_pan_display,
.fb_blank = goldfish_fb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
diff --git a/drivers/video/fbdev/grvga.c b/drivers/video/fbdev/grvga.c
index 894ecfa45124..2fa9a274659c 100644
--- a/drivers/video/fbdev/grvga.c
+++ b/drivers/video/fbdev/grvga.c
@@ -253,13 +253,11 @@ static int grvga_pan_display(struct fb_var_screeninfo *var,
static const struct fb_ops grvga_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = grvga_check_var,
.fb_set_par = grvga_set_par,
.fb_setcolreg = grvga_setcolreg,
.fb_pan_display = grvga_pan_display,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit
};
static int grvga_parse_custom(char *options,
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index 8d0976578ddf..15a82c6b609e 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -602,14 +602,12 @@ static const struct fb_fix_screeninfo gxt4500_fix = {
static const struct fb_ops gxt4500_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = gxt4500_check_var,
.fb_set_par = gxt4500_set_par,
.fb_setcolreg = gxt4500_setcolreg,
.fb_pan_display = gxt4500_pan_display,
.fb_blank = gxt4500_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
/* PCI functions */
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 3f5becfe9fd5..1897e65ab703 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -994,14 +994,12 @@ static const struct fb_ops i740fb_ops = {
.owner = THIS_MODULE,
.fb_open = i740fb_open,
.fb_release = i740fb_release,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = i740fb_check_var,
.fb_set_par = i740fb_set_par,
.fb_setcolreg = i740fb_setcolreg,
.fb_blank = i740fb_blank,
.fb_pan_display = i740fb_pan_display,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
/* ------------------------------------------------------------------------- */
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 5b1ddce1f9d3..ee2b93bd231f 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -580,12 +580,10 @@ static int imxfb_blank(int blank, struct fb_info *info)
static const struct fb_ops imxfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = imxfb_check_var,
.fb_set_par = imxfb_set_par,
.fb_setcolreg = imxfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_blank = imxfb_blank,
};
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index 1109326ca3c4..af6c0581d3e2 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -661,13 +661,11 @@ static struct pci_driver kyrofb_pci_driver = {
static const struct fb_ops kyrofb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = kyrofb_check_var,
.fb_set_par = kyrofb_set_par,
.fb_setcolreg = kyrofb_setcolreg,
.fb_ioctl = kyrofb_ioctl,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index 5ca208d992cc..887fffdccd24 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -498,10 +498,8 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
static const struct fb_ops macfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = macfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static void __init macfb_setup(char *options)
diff --git a/drivers/video/fbdev/maxinefb.c b/drivers/video/fbdev/maxinefb.c
index 0ac1873b2acb..52528eb4dfb4 100644
--- a/drivers/video/fbdev/maxinefb.c
+++ b/drivers/video/fbdev/maxinefb.c
@@ -107,10 +107,8 @@ static int maxinefb_setcolreg(unsigned regno, unsigned red, unsigned green,
static const struct fb_ops maxinefb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = maxinefb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
int __init maxinefb_init(void)
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index daaa5880601f..05bc51305d94 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -407,14 +407,12 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd,
/* framebuffer ops */
static struct fb_ops mb862xxfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = mb862xxfb_check_var,
.fb_set_par = mb862xxfb_set_par,
.fb_setcolreg = mb862xxfb_setcolreg,
.fb_blank = mb862xxfb_blank,
.fb_pan_display = mb862xxfb_pan,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_ioctl = mb862xxfb_ioctl,
};
diff --git a/drivers/video/fbdev/mmp/fb/Kconfig b/drivers/video/fbdev/mmp/fb/Kconfig
index 0ec2e3fb9e17..b13882b34e79 100644
--- a/drivers/video/fbdev/mmp/fb/Kconfig
+++ b/drivers/video/fbdev/mmp/fb/Kconfig
@@ -2,9 +2,7 @@
config MMP_FB
tristate "fb driver for Marvell MMP Display Subsystem"
depends on FB
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
default y
help
fb driver for Marvell MMP Display Subsystem
diff --git a/drivers/video/fbdev/mmp/fb/mmpfb.c b/drivers/video/fbdev/mmp/fb/mmpfb.c
index ac9db01f29f2..42a87474bcea 100644
--- a/drivers/video/fbdev/mmp/fb/mmpfb.c
+++ b/drivers/video/fbdev/mmp/fb/mmpfb.c
@@ -454,14 +454,12 @@ static int mmpfb_blank(int blank, struct fb_info *info)
static const struct fb_ops mmpfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_blank = mmpfb_blank,
.fb_check_var = mmpfb_check_var,
.fb_set_par = mmpfb_set_par,
.fb_setcolreg = mmpfb_setcolreg,
.fb_pan_display = mmpfb_pan_display,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static int modes_setup(struct mmpfb_info *fbi)
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 3a053005d2b9..e6bafaba3460 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -1247,13 +1247,11 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
*/
static const struct fb_ops mx3fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_set_par = mx3fb_set_par,
.fb_check_var = mx3fb_check_var,
.fb_setcolreg = mx3fb_setcolreg,
.fb_pan_display = mx3fb_pan_display,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_blank = mx3fb_blank,
};
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index 7ebe794583e1..7dc305c67af8 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -287,10 +287,8 @@ static int ocfb_init_var(struct ocfb_dev *fbdev)
static const struct fb_ops ocfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = ocfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static int ocfb_probe(struct platform_device *pdev)
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 17f8238262be..dcb1b81d35db 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -293,13 +293,11 @@ static void offb_destroy(struct fb_info *info)
static const struct fb_ops offb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_destroy = offb_destroy,
.fb_setcolreg = offb_setcolreg,
.fb_set_par = offb_set_par,
.fb_blank = offb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static void __iomem *offb_map_reg(struct device_node *np, int index,
diff --git a/drivers/video/fbdev/omap/Kconfig b/drivers/video/fbdev/omap/Kconfig
index a6548283451f..f01278238d50 100644
--- a/drivers/video/fbdev/omap/Kconfig
+++ b/drivers/video/fbdev/omap/Kconfig
@@ -3,9 +3,7 @@ config FB_OMAP
tristate "OMAP frame buffer support"
depends on FB
depends on ARCH_OMAP1 || (ARM && COMPILE_TEST)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Frame buffer driver for OMAP based boards.
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 783bbe026207..f28cb90947a3 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1216,13 +1216,11 @@ static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
*/
static struct fb_ops omapfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_open = omapfb_open,
.fb_release = omapfb_release,
.fb_setcolreg = omapfb_setcolreg,
.fb_setcmap = omapfb_setcmap,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_blank = omapfb_blank,
.fb_ioctl = omapfb_ioctl,
.fb_check_var = omapfb_check_var,
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index b5af45e80fe1..4f9d8742ea0c 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -98,13 +98,11 @@ static int platinum_var_to_par(struct fb_var_screeninfo *var,
static const struct fb_ops platinumfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = platinumfb_check_var,
.fb_set_par = platinumfb_set_par,
.fb_setcolreg = platinumfb_setcolreg,
.fb_blank = platinumfb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
/*
diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c
index 7ee20db9ceb8..acfc8c70c410 100644
--- a/drivers/video/fbdev/pmag-aa-fb.c
+++ b/drivers/video/fbdev/pmag-aa-fb.c
@@ -149,10 +149,8 @@ static int aafb_blank(int blank, struct fb_info *info)
static const struct fb_ops aafb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_blank = aafb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_cursor = aafb_cursor,
};
diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index 20a1815279f7..1e010520b335 100644
--- a/drivers/video/fbdev/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
@@ -119,10 +119,8 @@ static int pmagbafb_setcolreg(unsigned int regno, unsigned int red,
static const struct fb_ops pmagbafb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = pmagbafb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c
index 4ab4d6c7a975..6432492467d1 100644
--- a/drivers/video/fbdev/pmagb-b-fb.c
+++ b/drivers/video/fbdev/pmagb-b-fb.c
@@ -123,10 +123,8 @@ static int pmagbbfb_setcolreg(unsigned int regno, unsigned int red,
static const struct fb_ops pmagbbfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = pmagbbfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 790aa231a593..adee34386580 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -543,14 +543,12 @@ static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id)
static const struct fb_ops pxa168fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = pxa168fb_check_var,
.fb_set_par = pxa168fb_set_par,
.fb_setcolreg = pxa168fb_setcolreg,
.fb_blank = pxa168fb_blank,
.fb_pan_display = pxa168fb_pan_display,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static void pxa168fb_init_mode(struct fb_info *info,
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index beffb0602a2c..fa943612c4e2 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -599,13 +599,11 @@ static int pxafb_blank(int blank, struct fb_info *info)
static const struct fb_ops pxafb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = pxafb_check_var,
.fb_set_par = pxafb_set_par,
.fb_pan_display = pxafb_pan_display,
.fb_setcolreg = pxafb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_blank = pxafb_blank,
};
diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c
index b180f0f6940e..1ff8fa176124 100644
--- a/drivers/video/fbdev/q40fb.c
+++ b/drivers/video/fbdev/q40fb.c
@@ -76,10 +76,8 @@ static int q40fb_setcolreg(unsigned regno, unsigned red, unsigned green,
static const struct fb_ops q40fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = q40fb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static int q40fb_probe(struct platform_device *dev)
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index c50b92c06c5d..2b85aad6a304 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1038,13 +1038,11 @@ static int s3c_fb_ioctl(struct fb_info *info, unsigned int cmd,
static const struct fb_ops s3c_fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = s3c_fb_check_var,
.fb_set_par = s3c_fb_set_par,
.fb_blank = s3c_fb_blank,
.fb_setcolreg = s3c_fb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_pan_display = s3c_fb_pan_display,
.fb_ioctl = s3c_fb_ioctl,
};
diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c
index db898112f9a5..08a4943dc541 100644
--- a/drivers/video/fbdev/sh7760fb.c
+++ b/drivers/video/fbdev/sh7760fb.c
@@ -345,13 +345,11 @@ static int sh7760fb_set_par(struct fb_info *info)
static const struct fb_ops sh7760fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_blank = sh7760fb_blank,
.fb_check_var = sh7760fb_check_var,
.fb_setcolreg = sh7760_setcolreg,
.fb_set_par = sh7760fb_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static void sh7760fb_free_mem(struct fb_info *info)
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 461e50c8dd1b..62f99f6fccd3 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -109,11 +109,9 @@ static void simplefb_destroy(struct fb_info *info)
static const struct fb_ops simplefb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_destroy = simplefb_destroy,
.fb_setcolreg = simplefb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static struct simplefb_format simplefb_formats[] = SIMPLEFB_FORMATS;
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index 8d2b1d60dd2e..f8ae54ca0cc3 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1310,12 +1310,10 @@ static int sstfb_setup(char *options)
static const struct fb_ops sstfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = sstfb_check_var,
.fb_set_par = sstfb_set_par,
.fb_setcolreg = sstfb_setcolreg,
- .fb_fillrect = cfb_fillrect, /* sstfb_fillrect */
- .fb_copyarea = cfb_copyarea, /* sstfb_copyarea */
- .fb_imageblit = cfb_imageblit,
.fb_ioctl = sstfb_ioctl,
};
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 76ff6375a659..3350f56058c4 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -61,10 +61,8 @@ static int gfb_setcolreg(unsigned regno,
static const struct fb_ops gfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = gfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static int gfb_set_fbinfo(struct gfb_info *gp)
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 8765add3a5be..47b85e4959dd 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -66,10 +66,8 @@ static int s3d_setcolreg(unsigned regno,
static const struct fb_ops s3d_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = s3d_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static int s3d_set_fbinfo(struct s3d_info *sp)
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index df2574d4ff30..a1a67830fbbc 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1416,13 +1416,11 @@ static struct fb_ops uvesafb_ops = {
.owner = THIS_MODULE,
.fb_open = uvesafb_open,
.fb_release = uvesafb_release,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = uvesafb_setcolreg,
.fb_setcmap = uvesafb_setcmap,
.fb_pan_display = uvesafb_pan_display,
.fb_blank = uvesafb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
.fb_check_var = uvesafb_check_var,
.fb_set_par = uvesafb_set_par,
};
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index fd4488777032..91d070ef6989 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -110,13 +110,11 @@ static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valk
static const struct fb_ops valkyriefb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = valkyriefb_check_var,
.fb_set_par = valkyriefb_set_par,
.fb_setcolreg = valkyriefb_setcolreg,
.fb_blank = valkyriefb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
/* Sets the video mode according to info->var */
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index 422a1c53decd..c0edceea0a79 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -201,12 +201,10 @@ static void vesafb_destroy(struct fb_info *info)
static struct fb_ops vesafb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_destroy = vesafb_destroy,
.fb_setcolreg = vesafb_setcolreg,
.fb_pan_display = vesafb_pan_display,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static int vesafb_setup(char *options)
diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c
index 768a281a8d2c..bf049a1a6ab2 100644
--- a/drivers/video/fbdev/xilinxfb.c
+++ b/drivers/video/fbdev/xilinxfb.c
@@ -251,11 +251,9 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi)
static const struct fb_ops xilinxfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = xilinx_fb_setcolreg,
.fb_blank = xilinx_fb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
/* ---------------------------------------------------------------------
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
index 73205afec162..e0462361adf9 100644
--- a/include/drm/drm_exec.h
+++ b/include/drm/drm_exec.h
@@ -3,6 +3,7 @@
#ifndef __DRM_EXEC_H__
#define __DRM_EXEC_H__
+#include <linux/compiler.h>
#include <linux/ww_mutex.h>
#define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0)
@@ -74,13 +75,12 @@ struct drm_exec {
* Since labels can't be defined local to the loops body we use a jump pointer
* to make sure that the retry is only used from within the loops body.
*/
-#define drm_exec_until_all_locked(exec) \
- for (void *__drm_exec_retry_ptr; ({ \
- __label__ __drm_exec_retry; \
-__drm_exec_retry: \
- __drm_exec_retry_ptr = &&__drm_exec_retry; \
- (void)__drm_exec_retry_ptr; \
- drm_exec_cleanup(exec); \
+#define drm_exec_until_all_locked(exec) \
+__PASTE(__drm_exec_, __LINE__): \
+ for (void *__drm_exec_retry_ptr; ({ \
+ __drm_exec_retry_ptr = &&__PASTE(__drm_exec_, __LINE__);\
+ (void)__drm_exec_retry_ptr; \
+ drm_exec_cleanup(exec); \
});)
/**
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index c0b13c43b459..bc9f6aa2f3fe 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -551,15 +551,17 @@ int drm_gem_evict(struct drm_gem_object *obj);
* @lock: the lock used to protect the gpuva list. The locking primitive
* must contain a dep_map field.
*
- * Call this if you're not proctecting access to the gpuva list
- * with the dma-resv lock, otherwise, drm_gem_gpuva_init() takes care
- * of initializing lock_dep_map for you.
+ * Call this if you're not proctecting access to the gpuva list with the
+ * dma-resv lock, but with a custom lock.
*/
#define drm_gem_gpuva_set_lock(obj, lock) \
- if (!(obj)->gpuva.lock_dep_map) \
+ if (!WARN((obj)->gpuva.lock_dep_map, \
+ "GEM GPUVA lock should be set only once.")) \
(obj)->gpuva.lock_dep_map = &(lock)->dep_map
#define drm_gem_gpuva_assert_lock_held(obj) \
- lockdep_assert(lock_is_held((obj)->gpuva.lock_dep_map))
+ lockdep_assert((obj)->gpuva.lock_dep_map ? \
+ lock_is_held((obj)->gpuva.lock_dep_map) : \
+ dma_resv_held((obj)->resv))
#else
#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
@@ -573,11 +575,12 @@ int drm_gem_evict(struct drm_gem_object *obj);
*
* Calling this function is only necessary for drivers intending to support the
* &drm_driver_feature DRIVER_GEM_GPUVA.
+ *
+ * See also drm_gem_gpuva_set_lock().
*/
static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
{
INIT_LIST_HEAD(&obj->gpuva.list);
- drm_gem_gpuva_set_lock(obj, &obj->resv->lock.base);
}
/**
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 8b113c384236..0223a41a64b2 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -355,8 +355,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
void ttm_bo_put(struct ttm_buffer_object *bo);
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 839820aed87e..a58a14c9f222 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -60,6 +60,7 @@ extern "C" {
#define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
#define DRM_IVPU_PARAM_TILE_CONFIG 11
#define DRM_IVPU_PARAM_SKU 12
+#define DRM_IVPU_PARAM_CAPABILITIES 13
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
@@ -68,6 +69,9 @@ extern "C" {
#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
+#define DRM_IVPU_CAP_METRIC_STREAMER 1
+#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2
+
/**
* struct drm_ivpu_param - Get/Set VPU parameters
*/
@@ -129,8 +133,10 @@ struct drm_ivpu_param {
__u64 value;
};
-#define DRM_IVPU_BO_HIGH_MEM 0x00000001
+#define DRM_IVPU_BO_SHAVE_MEM 0x00000001
+#define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM
#define DRM_IVPU_BO_MAPPABLE 0x00000002
+#define DRM_IVPU_BO_DMA_MEM 0x00000004
#define DRM_IVPU_BO_CACHED 0x00000000
#define DRM_IVPU_BO_UNCACHED 0x00010000
@@ -140,6 +146,7 @@ struct drm_ivpu_param {
#define DRM_IVPU_BO_FLAGS \
(DRM_IVPU_BO_HIGH_MEM | \
DRM_IVPU_BO_MAPPABLE | \
+ DRM_IVPU_BO_DMA_MEM | \
DRM_IVPU_BO_CACHE_MASK)
/**
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 853a327433d3..b1ad9d5ffce8 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -33,11 +33,51 @@
extern "C" {
#endif
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
+#define NOUVEAU_GETPARAM_PTIMER_TIME 14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
+struct drm_nouveau_getparam {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_nouveau_channel_alloc {
+ __u32 fb_ctxdma_handle;
+ __u32 tt_ctxdma_handle;
+
+ __s32 channel;
+ __u32 pushbuf_domains;
+
+ /* Notifier memory */
+ __u32 notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ __u32 handle;
+ __u32 grclass;
+ } subchan[8];
+ __u32 nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+ __s32 channel;
+};
+
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
+/* The BO will never be shared via import or export. */
+#define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
@@ -126,16 +166,228 @@ struct drm_nouveau_gem_cpu_fini {
__u32 handle;
};
-#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
+/**
+ * struct drm_nouveau_sync - sync object
+ *
+ * This structure serves as synchronization mechanism for (potentially)
+ * asynchronous operations such as EXEC or VM_BIND.
+ */
+struct drm_nouveau_sync {
+ /**
+ * @flags: the flags for a sync object
+ *
+ * The first 8 bits are used to determine the type of the sync object.
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
+#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
+#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
+ /**
+ * @handle: the handle of the sync object
+ */
+ __u32 handle;
+ /**
+ * @timeline_value:
+ *
+ * The timeline point of the sync object in case the syncobj is of
+ * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+};
+
+/**
+ * struct drm_nouveau_vm_init - GPU VA space init structure
+ *
+ * Used to initialize the GPU's VA space for a user client, telling the kernel
+ * which portion of the VA space is managed by the UMD and kernel respectively.
+ *
+ * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
+ * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
+ * with -ENOSYS.
+ */
+struct drm_nouveau_vm_init {
+ /**
+ * @kernel_managed_addr: start address of the kernel managed VA space
+ * region
+ */
+ __u64 kernel_managed_addr;
+ /**
+ * @kernel_managed_size: size of the kernel managed VA space region in
+ * bytes
+ */
+ __u64 kernel_managed_size;
+};
+
+/**
+ * struct drm_nouveau_vm_bind_op - VM_BIND operation
+ *
+ * This structure represents a single VM_BIND operation. UMDs should pass
+ * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
+ */
+struct drm_nouveau_vm_bind_op {
+ /**
+ * @op: the operation type
+ */
+ __u32 op;
+/**
+ * @DRM_NOUVEAU_VM_BIND_OP_MAP:
+ *
+ * Map a GEM object to the GPU's VA space. Optionally, the
+ * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
+ * create sparse mappings for the given range.
+ */
+#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
+/**
+ * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
+ *
+ * Unmap an existing mapping in the GPU's VA space. If the region the mapping
+ * is located in is a sparse region, new sparse mappings are created where the
+ * unmapped (memory backed) mapping was mapped previously. To remove a sparse
+ * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
+ */
+#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
+ /**
+ * @flags: the flags for a &drm_nouveau_vm_bind_op
+ */
+ __u32 flags;
+/**
+ * @DRM_NOUVEAU_VM_BIND_SPARSE:
+ *
+ * Indicates that an allocated VA space region should be sparse.
+ */
+#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
+ /**
+ * @handle: the handle of the DRM GEM object to map
+ */
+ __u32 handle;
+ /**
+ * @pad: 32 bit padding, should be 0
+ */
+ __u32 pad;
+ /**
+ * @addr:
+ *
+ * the address the VA space region or (memory backed) mapping should be mapped to
+ */
+ __u64 addr;
+ /**
+ * @bo_offset: the offset within the BO backing the mapping
+ */
+ __u64 bo_offset;
+ /**
+ * @range: the size of the requested mapping in bytes
+ */
+ __u64 range;
+};
+
+/**
+ * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
+ */
+struct drm_nouveau_vm_bind {
+ /**
+ * @op_count: the number of &drm_nouveau_vm_bind_op
+ */
+ __u32 op_count;
+ /**
+ * @flags: the flags for a &drm_nouveau_vm_bind ioctl
+ */
+ __u32 flags;
+/**
+ * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
+ *
+ * Indicates that the given VM_BIND operation should be executed asynchronously
+ * by the kernel.
+ *
+ * If this flag is not supplied the kernel executes the associated operations
+ * synchronously and doesn't accept any &drm_nouveau_sync objects.
+ */
+#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
+ /**
+ * @wait_count: the number of wait &drm_nouveau_syncs
+ */
+ __u32 wait_count;
+ /**
+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+ */
+ __u32 sig_count;
+ /**
+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+ */
+ __u64 wait_ptr;
+ /**
+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+ */
+ __u64 sig_ptr;
+ /**
+ * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
+ */
+ __u64 op_ptr;
+};
+
+/**
+ * struct drm_nouveau_exec_push - EXEC push operation
+ *
+ * This structure represents a single EXEC push operation. UMDs should pass an
+ * array of this structure via struct drm_nouveau_exec's &push_ptr field.
+ */
+struct drm_nouveau_exec_push {
+ /**
+ * @va: the virtual address of the push buffer mapping
+ */
+ __u64 va;
+ /**
+ * @va_len: the length of the push buffer mapping
+ */
+ __u64 va_len;
+};
+
+/**
+ * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
+ */
+struct drm_nouveau_exec {
+ /**
+ * @channel: the channel to execute the push buffer in
+ */
+ __u32 channel;
+ /**
+ * @push_count: the number of &drm_nouveau_exec_push ops
+ */
+ __u32 push_count;
+ /**
+ * @wait_count: the number of wait &drm_nouveau_syncs
+ */
+ __u32 wait_count;
+ /**
+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+ */
+ __u32 sig_count;
+ /**
+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+ */
+ __u64 wait_ptr;
+ /**
+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+ */
+ __u64 sig_ptr;
+ /**
+ * @push_ptr: pointer to &drm_nouveau_exec_push ops
+ */
+ __u64 push_ptr;
+};
+
+#define DRM_NOUVEAU_GETPARAM 0x00
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
+#define DRM_NOUVEAU_CHANNEL_FREE 0x03
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_NVIF 0x07
#define DRM_NOUVEAU_SVM_INIT 0x08
#define DRM_NOUVEAU_SVM_BIND 0x09
+#define DRM_NOUVEAU_VM_INIT 0x10
+#define DRM_NOUVEAU_VM_BIND 0x11
+#define DRM_NOUVEAU_EXEC 0x12
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
@@ -188,6 +440,10 @@ struct drm_nouveau_svm_bind {
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
+#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
@@ -197,6 +453,9 @@ struct drm_nouveau_svm_bind {
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
+#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
+#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
+#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
#if defined(__cplusplus)
}
#endif
diff --git a/samples/Kconfig b/samples/Kconfig
index bf49ed0d7362..b0ddf5f36738 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -210,9 +210,7 @@ config SAMPLE_VFIO_MDEV_MDPY
config SAMPLE_VFIO_MDEV_MDPY_FB
tristate "Build VFIO mdpy example guest fbdev driver"
depends on FB
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
Guest fbdev driver for the virtual display sample driver.
diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
index cda477b28685..4598bc28acd9 100644
--- a/samples/vfio-mdev/mdpy-fb.c
+++ b/samples/vfio-mdev/mdpy-fb.c
@@ -88,11 +88,9 @@ static void mdpy_fb_destroy(struct fb_info *info)
static const struct fb_ops mdpy_fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_destroy = mdpy_fb_destroy,
.fb_setcolreg = mdpy_fb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
};
static int mdpy_fb_probe(struct pci_dev *pdev,