summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2023-12-10 10:07:54 -0800
committerRob Clark <robdclark@chromium.org>2023-12-10 10:07:54 -0800
commitcbaf84e73811ed0ff7ff6d7f52b73fd7ed082d65 (patch)
tree307e1e7bd42f3b451182f3a548f2169aef3c6ddf /drivers
parenta08935fc859b22884dcb6b5126d3a986467101ce (diff)
parentfca9448ae2f5ddebd841c727ee86136e1b5cbd86 (diff)
Merge remote-tracking branch 'drm-misc/drm-misc-next' into msm-next
Backmerge drm-misc-next to pick up some dependencies for drm/msm patches, in particular: https://patchwork.freedesktop.org/patch/570219/?series=127251&rev=1 https://patchwork.freedesktop.org/series/123411/ Signed-off-by: Rob Clark <robdclark@chromium.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/drm_accel.c1
-rw-r--r--drivers/accel/ivpu/Kconfig11
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c57
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c49
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h18
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c79
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h1
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c678
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h75
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h20
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c105
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx_reg.h2
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c69
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c251
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.h33
-rw-r--r--drivers/accel/ivpu/ivpu_job.c99
-rw-r--r--drivers/accel/ivpu/ivpu_job.h4
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c38
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.h1
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c44
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c153
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h11
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c75
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h3
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h90
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h309
-rw-r--r--drivers/accel/qaic/Makefile3
-rw-r--r--drivers/accel/qaic/mhi_controller.c44
-rw-r--r--drivers/accel/qaic/mhi_controller.h2
-rw-r--r--drivers/accel/qaic/qaic.h21
-rw-r--r--drivers/accel/qaic/qaic_control.c7
-rw-r--r--drivers/accel/qaic/qaic_data.c147
-rw-r--r--drivers/accel/qaic/qaic_drv.c98
-rw-r--r--drivers/accel/qaic/qaic_timesync.c395
-rw-r--r--drivers/accel/qaic/qaic_timesync.h11
-rw-r--r--drivers/acpi/acpi_video.c2
-rw-r--r--drivers/acpi/device_pm.c13
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/ata/pata_isapnp.c3
-rw-r--r--drivers/auxdisplay/Kconfig10
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c10
-rw-r--r--drivers/auxdisplay/ht16k33.c10
-rw-r--r--drivers/block/nbd.c117
-rw-r--r--drivers/block/null_blk/main.c25
-rw-r--r--drivers/char/agp/Makefile6
-rw-r--r--drivers/char/agp/agp.h9
-rw-r--r--drivers/char/agp/backend.c11
-rw-r--r--drivers/char/agp/compat_ioctl.c291
-rw-r--r--drivers/char/agp/compat_ioctl.h106
-rw-r--r--drivers/char/agp/frontend.c1068
-rw-r--r--drivers/dma-buf/dma-buf.c4
-rw-r--r--drivers/dma-buf/dma-fence.c3
-rw-r--r--drivers/dma-buf/sw_sync.c82
-rw-r--r--drivers/dma-buf/sync_debug.h2
-rw-r--r--drivers/dma-buf/sync_file.c19
-rw-r--r--drivers/dpll/dpll_netlink.c17
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/qemu_fw_cfg.c2
-rw-r--r--drivers/gpu/drm/Kconfig36
-rw-r--r--drivers/gpu/drm/Makefile14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h22
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c50
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c10
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c29
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c5
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c10
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c263
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h114
-rw-r--r--drivers/gpu/drm/ast/ast_main.c244
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c88
-rw-r--r--drivers/gpu/drm/ast/ast_post.c73
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h12
-rw-r--r--drivers/gpu/drm/bridge/Kconfig17
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c54
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h4
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c140
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c163
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c22
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c58
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c6
-rw-r--r--drivers/gpu/drm/ci/xfails/requirements.txt6
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c161
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c234
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c451
-rw-r--r--drivers/gpu/drm/drm_atomic.c9
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c20
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c14
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c97
-rw-r--r--drivers/gpu/drm/drm_auth.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c44
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c1627
-rw-r--r--drivers/gpu/drm/drm_client.c12
-rw-r--r--drivers/gpu/drm/drm_connector.c6
-rw-r--r--drivers/gpu/drm/drm_context.c513
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c7
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h4
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c3
-rw-r--r--drivers/gpu/drm/drm_debugfs.c65
-rw-r--r--drivers/gpu/drm/drm_dma.c178
-rw-r--r--drivers/gpu/drm/drm_drv.c27
-rw-r--r--drivers/gpu/drm/drm_edid.c43
-rw-r--r--drivers/gpu/drm/drm_edid_load.c16
-rw-r--r--drivers/gpu/drm/drm_eld.c55
-rw-r--r--drivers/gpu/drm/drm_encoder.c4
-rw-r--r--drivers/gpu/drm/drm_file.c68
-rw-r--r--drivers/gpu/drm/drm_flip_work.c27
-rw-r--r--drivers/gpu/drm/drm_format_helper.c215
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c77
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c9
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c1168
-rw-r--r--drivers/gpu/drm/drm_hashtab.c203
-rw-r--r--drivers/gpu/drm/drm_internal.h23
-rw-r--r--drivers/gpu/drm/drm_ioc32.c613
-rw-r--r--drivers/gpu/drm/drm_ioctl.c96
-rw-r--r--drivers/gpu/drm/drm_irq.c204
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c32
-rw-r--r--drivers/gpu/drm/drm_legacy.h290
-rw-r--r--drivers/gpu/drm/drm_legacy_misc.c105
-rw-r--r--drivers/gpu/drm/drm_lock.c373
-rw-r--r--drivers/gpu/drm/drm_memory.c138
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c19
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c17
-rw-r--r--drivers/gpu/drm/drm_mode_object.c2
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c6
-rw-r--r--drivers/gpu/drm/drm_pci.c204
-rw-r--r--drivers/gpu/drm/drm_plane.c150
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c32
-rw-r--r--drivers/gpu/drm/drm_scatter.c220
-rw-r--r--drivers/gpu/drm/drm_syncobj.c64
-rw-r--r--drivers/gpu/drm/drm_vblank.c101
-rw-r--r--drivers/gpu/drm/drm_vm.c665
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c9
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c30
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile180
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c46
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c66
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c70
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c177
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c560
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c176
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c217
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h61
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c502
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c99
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_regs.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c662
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c171
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c187
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_buffer.c82
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_buffer.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c353
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c369
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c29
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c28
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c5
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c12
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h20
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c89
-rw-r--r--drivers/gpu/drm/i915/i915_params.h22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c27
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c29
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h9
-rw-r--r--drivers/gpu/drm/imagination/Kconfig18
-rw-r--r--drivers/gpu/drm/imagination/Makefile35
-rw-r--r--drivers/gpu/drm/imagination/pvr_ccb.c645
-rw-r--r--drivers/gpu/drm/imagination/pvr_ccb.h71
-rw-r--r--drivers/gpu/drm/imagination/pvr_cccb.c267
-rw-r--r--drivers/gpu/drm/imagination/pvr_cccb.h110
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.c464
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.h205
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.c53
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.h29
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c658
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h721
-rw-r--r--drivers/gpu/drm/imagination/pvr_device_info.c255
-rw-r--r--drivers/gpu/drm/imagination/pvr_device_info.h186
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c1501
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.h129
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c625
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.h195
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c1489
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.h509
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_info.h135
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c555
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.h14
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.c252
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.h48
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.c306
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.h13
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c471
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.h78
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c414
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.h170
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c549
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.h166
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c786
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.h161
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c2640
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.h108
-rw-r--r--drivers/gpu/drm/imagination/pvr_params.c147
-rw-r--r--drivers/gpu/drm/imagination/pvr_params.h72
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c433
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h41
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c1432
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h169
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h6193
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h159
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_defs.h179
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif.h2188
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h493
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h373
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h133
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h60
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h113
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h28
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h1648
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h258
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h108
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h78
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_heap_config.h113
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_meta.h356
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_mips.h335
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_mips_check.h58
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h136
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.c285
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.h75
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream_defs.c351
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream_defs.h16
-rw-r--r--drivers/gpu/drm/imagination/pvr_sync.c289
-rw-r--r--drivers/gpu/drm/imagination/pvr_sync.h84
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c1092
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h66
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c237
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.h22
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c9
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c15
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c5
-rw-r--r--drivers/gpu/drm/lima/lima_device.c2
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c4
-rw-r--r--drivers/gpu/drm/loongson/Kconfig1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c5
-rw-r--r--drivers/gpu/drm/meson/meson_dw_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/event.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c207
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c380
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/event.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c14
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c9
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c138
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c37
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c180
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c225
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c57
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c75
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c81
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h23
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c119
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c30
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c32
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c1
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c45
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c2
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c18
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c492
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c38
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h1
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c6
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c5
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c6
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c1
-rw-r--r--drivers/gpu/drm/tests/Makefile5
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c465
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c166
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c72
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c383
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c1904
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c12
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c138
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h3
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c16
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c54
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c11
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c6
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c3
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c10
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c17
-rw-r--r--drivers/gpu/drm/tiny/repaper.c10
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c44
-rw-r--r--drivers/gpu/drm/tiny/st7586.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c6
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c19
-rw-r--r--drivers/gpu/drm/v3d/Makefile4
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c51
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c178
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c50
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h165
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c779
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c93
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h94
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c397
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c1320
-rw-r--r--drivers/gpu/drm/v3d/v3d_sysfs.c69
-rw-r--r--drivers/gpu/drm/v3d/v3d_trace.h57
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c18
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c20
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c1
-rw-r--r--drivers/hid/hid-apple.c2
-rw-r--r--drivers/hid/hid-asus.c27
-rw-r--r--drivers/hid/hid-core.c12
-rw-r--r--drivers/hid/hid-debug.c3
-rw-r--r--drivers/hid/hid-glorious.c16
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-logitech-dj.c11
-rw-r--r--drivers/hid/hid-mcp2221.c4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-picolcd_fb.c1
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c16
-rw-r--r--drivers/i2c/busses/i2c-ocores.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa.c76
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c16
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/btree.c11
-rw-r--r--drivers/md/bcache/super.c4
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/writeback.c24
-rw-r--r--drivers/md/dm-bufio.c87
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-delay.c112
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-target.c23
-rw-r--r--drivers/md/dm-verity.h2
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/media/pci/ivtv/Kconfig4
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c6
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h2
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c8
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c42
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c45
-rw-r--r--drivers/net/ethernet/cortina/gemini.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c103
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c115
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c15
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c8
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c66
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v5.0.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c41
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/netkit.c22
-rw-r--r--drivers/net/ppp/ppp_synctty.c6
-rw-r--r--drivers/net/usb/aqc111.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/veth.c46
-rw-r--r--drivers/net/vrf.c38
-rw-r--r--drivers/net/wireguard/device.c4
-rw-r--r--drivers/net/wireguard/receive.c12
-rw-r--r--drivers/net/wireguard/send.c3
-rw-r--r--drivers/nfc/virtual_ncidev.c7
-rw-r--r--drivers/nvme/host/auth.c5
-rw-r--r--drivers/nvme/host/core.c21
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fc.c19
-rw-r--r--drivers/nvme/host/rdma.c1
-rw-r--r--drivers/nvme/host/tcp.c32
-rw-r--r--drivers/nvme/target/Kconfig4
-rw-r--r--drivers/nvme/target/configfs.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c4
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/parisc/power.c2
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/qualcomm/Kconfig2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c44
-rw-r--r--drivers/phy/realtek/Kconfig32
-rw-r--r--drivers/phy/realtek/Makefile3
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c1325
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c761
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c31
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c26
-rw-r--r--drivers/platform/x86/ideapad-laptop.c11
-rw-r--r--drivers/platform/x86/intel/telemetry/core.c4
-rw-r--r--drivers/ptp/ptp_chardev.c3
-rw-r--r--drivers/ptp/ptp_clock.c5
-rw-r--r--drivers/ptp/ptp_private.h8
-rw-r--r--drivers/ptp/ptp_sysfs.c3
-rw-r--r--drivers/s390/block/dasd.c24
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/net/Kconfig3
-rw-r--r--drivers/s390/net/ism_drv.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/scsi_debug.c9
-rw-r--r--drivers/scsi/sd.c53
-rw-r--r--drivers/soc/qcom/Kconfig1
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c33
-rw-r--r--drivers/staging/sm750fb/sm750.c65
-rw-r--r--drivers/thunderbolt/switch.c6
-rw-r--r--drivers/thunderbolt/tb.c12
-rw-r--r--drivers/ufs/core/ufs-mcq.c5
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c3
-rw-r--r--drivers/usb/core/config.c3
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/dwc2/hcd_intr.c15
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/drd.c2
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c69
-rw-r--r--drivers/usb/dwc3/dwc3-rtk.c8
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c13
-rw-r--r--drivers/usb/host/xhci-mtk.h2
-rw-r--r--drivers/usb/host/xhci-plat.c50
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c2
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h7
-rw-r--r--drivers/usb/misc/usb-ljca.c17
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/typec/mux/Kconfig2
-rw-r--r--drivers/usb/typec/mux/nb7vpq904m.c44
-rw-r--r--drivers/usb/typec/tcpm/Kconfig1
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c41
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c12
-rw-r--r--drivers/usb/typec/tipd/core.c14
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c4
-rw-r--r--drivers/vhost/vdpa.c1
-rw-r--r--drivers/video/fbdev/Kconfig50
-rw-r--r--drivers/video/fbdev/acornfb.c2
-rw-r--r--drivers/video/fbdev/amba-clcd.c2
-rw-r--r--drivers/video/fbdev/arcfb.c114
-rw-r--r--drivers/video/fbdev/au1100fb.c2
-rw-r--r--drivers/video/fbdev/au1200fb.c11
-rw-r--r--drivers/video/fbdev/clps711x-fb.c4
-rw-r--r--drivers/video/fbdev/core/Kconfig7
-rw-r--r--drivers/video/fbdev/core/Makefile2
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c3
-rw-r--r--drivers/video/fbdev/core/cfbfillrect.c3
-rw-r--r--drivers/video/fbdev/core/cfbimgblt.c3
-rw-r--r--drivers/video/fbdev/core/fb_chrdev.c68
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
-rw-r--r--drivers/video/fbdev/core/fb_io_fops.c36
-rw-r--r--drivers/video/fbdev/core/fb_sys_fops.c6
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c3
-rw-r--r--drivers/video/fbdev/core/sysfillrect.c3
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c3
-rw-r--r--drivers/video/fbdev/cyber2000fb.c9
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c2
-rw-r--r--drivers/video/fbdev/gbefb.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/ps3fb.c11
-rw-r--r--drivers/video/fbdev/sa1100fb.c2
-rw-r--r--drivers/video/fbdev/sbuslib.c5
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c16
-rw-r--r--drivers/video/fbdev/simplefb.c132
-rw-r--r--drivers/video/fbdev/sm712fb.c6
-rw-r--r--drivers/video/fbdev/smscufx.c2
-rw-r--r--drivers/video/fbdev/udlfb.c2
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c2
-rw-r--r--drivers/video/fbdev/vfb.c10
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c4
-rw-r--r--drivers/video/fbdev/wm8505fb.c2
-rw-r--r--drivers/virtio/virtio_pci_common.c6
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c7
-rw-r--r--drivers/xen/events/events_2l.c8
-rw-r--r--drivers/xen/events/events_base.c576
-rw-r--r--drivers/xen/events/events_internal.h1
-rw-r--r--drivers/xen/pcpu.c22
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/swiotlb-xen.c1
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c34
694 files changed, 49568 insertions, 19775 deletions
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index 294b572a9c33..24cac4c0274b 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -11,6 +11,7 @@
#include <linux/idr.h>
#include <drm/drm_accel.h>
+#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig
index 1a4c4ed9d113..682c53245286 100644
--- a/drivers/accel/ivpu/Kconfig
+++ b/drivers/accel/ivpu/Kconfig
@@ -1,16 +1,17 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_ACCEL_IVPU
- tristate "Intel VPU for Meteor Lake and newer"
+ tristate "Intel NPU (Neural Processing Unit)"
depends on DRM_ACCEL
depends on X86_64 && !UML
depends on PCI && PCI_MSI
select FW_LOADER
- select SHMEM
+ select DRM_GEM_SHMEM_HELPER
select GENERIC_ALLOCATOR
help
- Choose this option if you have a system that has an 14th generation Intel CPU
- or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated
- inference accelerator for Computer Vision and Deep Learning applications.
+ Choose this option if you have a system with an 14th generation
+ Intel CPU (Meteor Lake) or newer. Intel NPU (formerly called Intel VPU)
+ is a CPU-integrated inference accelerator for Computer Vision
+ and Deep Learning applications.
If "M" is selected, the module will be called intel_vpu.
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index ea453b985b49..19035230563d 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -14,6 +14,7 @@
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
+#include "ivpu_hw.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
@@ -115,6 +116,31 @@ static const struct drm_debugfs_info vdev_debugfs_list[] = {
{"reset_pending", reset_pending_show, 0},
};
+static ssize_t
+dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+ struct ivpu_fw_info *fw = vdev->fw;
+ u32 dvfs_mode;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, size, 0, &dvfs_mode);
+ if (ret < 0)
+ return ret;
+
+ fw->dvfs_mode = dvfs_mode;
+
+ ivpu_pm_schedule_recovery(vdev);
+
+ return size;
+}
+
+static const struct file_operations dvfs_mode_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = dvfs_mode_fops_write,
+};
+
static int fw_log_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = s->private;
@@ -152,6 +178,30 @@ static const struct file_operations fw_log_fops = {
};
static ssize_t
+fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf,
+ size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool_from_user(user_buf, size, &enable);
+ if (ret < 0)
+ return ret;
+
+ ivpu_hw_profiling_freq_drive(vdev, enable);
+ ivpu_pm_schedule_recovery(vdev);
+
+ return size;
+}
+
+static const struct file_operations fw_profiling_freq_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = fw_profiling_freq_fops_write,
+};
+
+static ssize_t
fw_trace_destination_mask_fops_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *pos)
{
@@ -280,6 +330,9 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_create_file("force_recovery", 0200, debugfs_root, vdev,
&ivpu_force_recovery_fops);
+ debugfs_create_file("dvfs_mode", 0200, debugfs_root, vdev,
+ &dvfs_mode_fops);
+
debugfs_create_file("fw_log", 0644, debugfs_root, vdev,
&fw_log_fops);
debugfs_create_file("fw_trace_destination_mask", 0200, debugfs_root, vdev,
@@ -291,4 +344,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
&ivpu_reset_engine_fops);
+
+ if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX)
+ debugfs_create_file("fw_profiling_freq_drive", 0200,
+ debugfs_root, vdev, &fw_profiling_freq_fops);
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 790603017653..64927682161b 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -31,8 +31,6 @@
__stringify(DRM_IVPU_DRIVER_MINOR) "."
#endif
-static const struct drm_driver driver;
-
static struct lock_class_key submitted_jobs_xa_lock_class_key;
int ivpu_dbg_mask;
@@ -41,7 +39,7 @@ MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
int ivpu_test_mode;
module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
-MODULE_PARM_DESC(test_mode, "Test mode: 0 - normal operation, 1 - fw unit test, 2 - null hw");
+MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
u8 ivpu_pll_min_ratio;
module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
@@ -93,8 +91,8 @@ static void file_priv_release(struct kref *ref)
ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
ivpu_cmdq_release_all(file_priv);
- ivpu_bo_remove_all_bos_from_context(&file_priv->ctx);
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+ ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx);
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
mutex_destroy(&file_priv->lock);
@@ -317,16 +315,14 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
unsigned long timeout;
int ret;
- if (ivpu_test_mode == IVPU_TEST_MODE_FW_TEST)
+ if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
return 0;
- ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG);
+ ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
while (1) {
- ret = ivpu_ipc_irq_handler(vdev);
- if (ret)
- break;
+ ivpu_ipc_irq_handler(vdev, NULL);
ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
break;
@@ -362,7 +358,7 @@ int ivpu_boot(struct ivpu_device *vdev)
int ret;
/* Update boot params located at first 4KB of FW memory */
- ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr);
+ ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
ret = ivpu_hw_boot_fw(vdev);
if (ret) {
@@ -414,7 +410,9 @@ static const struct drm_driver driver = {
.open = ivpu_open,
.postclose = ivpu_postclose,
- .gem_prime_import = ivpu_gem_prime_import,
+
+ .gem_create_object = ivpu_gem_create_object,
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
@@ -427,6 +425,13 @@ static const struct drm_driver driver = {
.minor = DRM_IVPU_DRIVER_MINOR,
};
+static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
+{
+ struct ivpu_device *vdev = arg;
+
+ return ivpu_ipc_irq_thread_handler(vdev);
+}
+
static int ivpu_irq_init(struct ivpu_device *vdev)
{
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
@@ -440,8 +445,8 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
vdev->irq = pci_irq_vector(pdev, 0);
- ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
- IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
+ ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
+ ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
if (ret)
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
@@ -533,6 +538,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
+ INIT_LIST_HEAD(&vdev->bo_list);
+
+ ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
+ if (ret)
+ goto err_xa_destroy;
ret = ivpu_pci_init(vdev);
if (ret)
@@ -550,7 +560,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
/* Power up early so the rest of init code can access VPU registers */
ret = ivpu_hw_power_up(vdev);
if (ret)
- goto err_xa_destroy;
+ goto err_power_down;
ret = ivpu_mmu_global_context_init(vdev);
if (ret)
@@ -574,20 +584,15 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
ivpu_pm_init(vdev);
- ret = ivpu_job_done_thread_init(vdev);
- if (ret)
- goto err_ipc_fini;
-
ret = ivpu_boot(vdev);
if (ret)
- goto err_job_done_thread_fini;
+ goto err_ipc_fini;
+ ivpu_job_done_consumer_init(vdev);
ivpu_pm_enable(vdev);
return 0;
-err_job_done_thread_fini:
- ivpu_job_done_thread_fini(vdev);
err_ipc_fini:
ivpu_ipc_fini(vdev);
err_fw_fini:
@@ -612,7 +617,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
- ivpu_job_done_thread_fini(vdev);
+ ivpu_job_done_consumer_fini(vdev);
ivpu_pm_cancel_recovery(vdev);
ivpu_ipc_fini(vdev);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 417ddeca8517..ebc4b84f27b2 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -17,9 +17,10 @@
#include <uapi/drm/ivpu_accel.h>
#include "ivpu_mmu_context.h"
+#include "ivpu_ipc.h"
#define DRIVER_NAME "intel_vpu"
-#define DRIVER_DESC "Driver for Intel Versatile Processing Unit (VPU)"
+#define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)"
#define DRIVER_DATE "20230117"
#define PCI_DEVICE_ID_MTL 0x7d1d
@@ -88,6 +89,7 @@ struct ivpu_wa_table {
bool d3hot_after_power_off;
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
+ bool disable_d0i3_msg;
};
struct ivpu_hw_info;
@@ -115,8 +117,11 @@ struct ivpu_device {
struct xarray context_xa;
struct xa_limit context_xa_limit;
+ struct mutex bo_list_lock; /* Protects bo_list */
+ struct list_head bo_list;
+
struct xarray submitted_jobs_xa;
- struct task_struct *job_done_thread;
+ struct ivpu_ipc_consumer job_done_consumer;
atomic64_t unique_id_counter;
@@ -126,6 +131,7 @@ struct ivpu_device {
int tdr;
int reschedule_suspend;
int autosuspend;
+ int d0i3_entry_msg;
} timeout;
};
@@ -148,9 +154,11 @@ extern u8 ivpu_pll_min_ratio;
extern u8 ivpu_pll_max_ratio;
extern bool ivpu_disable_mmu_cont_pages;
-#define IVPU_TEST_MODE_DISABLED 0
-#define IVPU_TEST_MODE_FW_TEST 1
-#define IVPU_TEST_MODE_NULL_HW 2
+#define IVPU_TEST_MODE_FW_TEST BIT(0)
+#define IVPU_TEST_MODE_NULL_HW BIT(1)
+#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2)
+#define IVPU_TEST_MODE_D0I3_MSG_DISABLE BIT(4)
+#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 691da521dde5..6576232f3e67 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -33,12 +33,17 @@
#define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31)
-#define IVPU_FW_CHECK_API(vdev, fw_hdr, name, min_major) \
+/* Check if FW API is compatible with the driver */
+#define IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, name, min_major) \
ivpu_fw_check_api(vdev, fw_hdr, #name, \
VPU_##name##_API_VER_INDEX, \
VPU_##name##_API_VER_MAJOR, \
VPU_##name##_API_VER_MINOR, min_major)
+/* Check if API version is lower that the given version */
+#define IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, name, major, minor) \
+ ivpu_fw_check_api_ver_lt(vdev, fw_hdr, #name, VPU_##name##_API_VER_INDEX, major, minor)
+
static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
@@ -105,6 +110,19 @@ ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw
return 0;
}
+static bool
+ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
+ const char *str, int index, u16 major, u16 minor)
+{
+ u16 fw_major = (u16)(fw_hdr->api_version[index] >> 16);
+ u16 fw_minor = (u16)(fw_hdr->api_version[index]);
+
+ if (fw_major < major || (fw_major == major && fw_minor < minor))
+ return true;
+
+ return false;
+}
+
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
@@ -164,9 +182,9 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
(const char *)fw_hdr + VPU_FW_HEADER_SIZE);
- if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
+ if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, BOOT, 3))
return -EINVAL;
- if (IVPU_FW_CHECK_API(vdev, fw_hdr, JSM, 3))
+ if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3))
return -EINVAL;
fw->runtime_addr = runtime_addr;
@@ -182,6 +200,8 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
fw->trace_hw_component_mask = -1;
+ fw->dvfs_mode = 0;
+
ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
@@ -195,6 +215,24 @@ static void ivpu_fw_release(struct ivpu_device *vdev)
release_firmware(vdev->fw->file);
}
+/* Initialize workarounds that depend on FW version */
+static void
+ivpu_fw_init_wa(struct ivpu_device *vdev)
+{
+ const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data;
+
+ if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) ||
+ (ivpu_hw_gen(vdev) > IVPU_HW_37XX) ||
+ (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE))
+ vdev->wa.disable_d0i3_msg = true;
+
+ /* Force enable the feature for testing purposes */
+ if (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_ENABLE)
+ vdev->wa.disable_d0i3_msg = false;
+
+ IVPU_PRINT_WA(disable_d0i3_msg);
+}
+
static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
@@ -248,7 +286,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
if (fw->shave_nn_size) {
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
- fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
+ fw->shave_nn_size, DRM_IVPU_BO_WC);
if (!fw->mem_shave_nn) {
ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
ret = -ENOMEM;
@@ -297,6 +335,8 @@ int ivpu_fw_init(struct ivpu_device *vdev)
if (ret)
goto err_fw_release;
+ ivpu_fw_init_wa(vdev);
+
ret = ivpu_fw_mem_init(vdev);
if (ret)
goto err_fw_release;
@@ -422,14 +462,31 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->punit_telemetry_sram_size);
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
boot_params->vpu_telemetry_enable);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.dvfs_mode = %u\n",
+ boot_params->dvfs_mode);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_delayed_entry = %d\n",
+ boot_params->d0i3_delayed_entry);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n",
+ boot_params->d0i3_residency_time_us);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
+ boot_params->d0i3_entry_vpu_ts);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
{
struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx;
- /* In case of warm boot we only have to reset the entrypoint addr */
+ /* In case of warm boot only update variable params */
if (!ivpu_fw_is_cold_boot(vdev)) {
+ boot_params->d0i3_residency_time_us =
+ ktime_us_delta(ktime_get_boottime(), vdev->hw->d0i3_entry_host_ts);
+ boot_params->d0i3_entry_vpu_ts = vdev->hw->d0i3_entry_vpu_ts;
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n",
+ boot_params->d0i3_residency_time_us);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
+ boot_params->d0i3_entry_vpu_ts);
+
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
wmb(); /* Flush WC buffers after writing save_restore_ret_address */
@@ -443,6 +500,13 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
/*
+ * This param is a debug firmware feature. It switches default clock
+ * to higher resolution one for fine-grained and more accurate firmware
+ * task profiling.
+ */
+ boot_params->perf_clk_frequency = ivpu_hw_profiling_freq_get(vdev);
+
+ /*
* Uncached region of VPU address space, covers IPC buffers, job queues
* and log buffers, programmable to L2$ Uncached by VPU MTRR
*/
@@ -493,6 +557,11 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
+ boot_params->dvfs_mode = vdev->fw->dvfs_mode;
+ if (!IVPU_WA(disable_d0i3_msg))
+ boot_params->d0i3_delayed_entry = 1;
+ boot_params->d0i3_residency_time_us = 0;
+ boot_params->d0i3_entry_vpu_ts = 0;
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 10ae2847f0ef..66b60fa161b5 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -27,6 +27,7 @@ struct ivpu_fw_info {
u32 trace_level;
u32 trace_destination_mask;
u64 trace_hw_component_mask;
+ u32 dvfs_mode;
};
int ivpu_fw_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index c91852f2edc8..1dda4f38ea25 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -20,215 +20,18 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
-MODULE_IMPORT_NS(DMA_BUF);
-
static const struct drm_gem_object_funcs ivpu_gem_funcs;
-static struct lock_class_key prime_bo_lock_class_key;
-
-static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo)
-{
- /* Pages are managed by the underlying dma-buf */
- return 0;
-}
-
-static void prime_free_pages_locked(struct ivpu_bo *bo)
-{
- /* Pages are managed by the underlying dma-buf */
-}
-
-static int prime_map_pages_locked(struct ivpu_bo *bo)
-{
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- struct sg_table *sgt;
-
- sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
- ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt));
- return PTR_ERR(sgt);
- }
-
- bo->sgt = sgt;
- return 0;
-}
-
-static void prime_unmap_pages_locked(struct ivpu_bo *bo)
-{
- dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
- bo->sgt = NULL;
-}
-
-static const struct ivpu_bo_ops prime_ops = {
- .type = IVPU_BO_TYPE_PRIME,
- .name = "prime",
- .alloc_pages = prime_alloc_pages_locked,
- .free_pages = prime_free_pages_locked,
- .map_pages = prime_map_pages_locked,
- .unmap_pages = prime_unmap_pages_locked,
-};
-
-static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
-{
- int npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
- struct page **pages;
-
- pages = drm_gem_get_pages(&bo->base);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
- if (bo->flags & DRM_IVPU_BO_WC)
- set_pages_array_wc(pages, npages);
- else if (bo->flags & DRM_IVPU_BO_UNCACHED)
- set_pages_array_uc(pages, npages);
-
- bo->pages = pages;
- return 0;
-}
-
-static void shmem_free_pages_locked(struct ivpu_bo *bo)
-{
- if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
- set_pages_array_wb(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
-
- drm_gem_put_pages(&bo->base, bo->pages, true, false);
- bo->pages = NULL;
-}
-
-static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
-{
- int npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- struct sg_table *sgt;
- int ret;
-
- sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages);
- if (IS_ERR(sgt)) {
- ivpu_err(vdev, "Failed to allocate sgtable\n");
- return PTR_ERR(sgt);
- }
-
- ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0);
- if (ret) {
- ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
- goto err_free_sgt;
- }
-
- bo->sgt = sgt;
- return 0;
-
-err_free_sgt:
- kfree(sgt);
- return ret;
-}
-
-static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo)
-{
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
-
- dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0);
- sg_free_table(bo->sgt);
- kfree(bo->sgt);
- bo->sgt = NULL;
-}
-
-static const struct ivpu_bo_ops shmem_ops = {
- .type = IVPU_BO_TYPE_SHMEM,
- .name = "shmem",
- .alloc_pages = shmem_alloc_pages_locked,
- .free_pages = shmem_free_pages_locked,
- .map_pages = ivpu_bo_map_pages_locked,
- .unmap_pages = ivpu_bo_unmap_pages_locked,
-};
-
-static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
-{
- unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
- struct page **pages;
- int ret;
-
- pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- for (i = 0; i < npages; i++) {
- pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (!pages[i]) {
- ret = -ENOMEM;
- goto err_free_pages;
- }
- cond_resched();
- }
-
- bo->pages = pages;
- return 0;
-
-err_free_pages:
- while (i--)
- put_page(pages[i]);
- kvfree(pages);
- return ret;
-}
-
-static void internal_free_pages_locked(struct ivpu_bo *bo)
-{
- unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
-
- if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
- set_pages_array_wb(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
-
- for (i = 0; i < npages; i++)
- put_page(bo->pages[i]);
-
- kvfree(bo->pages);
- bo->pages = NULL;
-}
-
-static const struct ivpu_bo_ops internal_ops = {
- .type = IVPU_BO_TYPE_INTERNAL,
- .name = "internal",
- .alloc_pages = internal_alloc_pages_locked,
- .free_pages = internal_free_pages_locked,
- .map_pages = ivpu_bo_map_pages_locked,
- .unmap_pages = ivpu_bo_unmap_pages_locked,
-};
-
-static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo)
-{
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- int ret;
-
- lockdep_assert_held(&bo->lock);
- drm_WARN_ON(&vdev->drm, bo->sgt);
-
- ret = bo->ops->alloc_pages(bo);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret);
- return ret;
- }
-
- ret = bo->ops->map_pages(bo);
- if (ret) {
- ivpu_err(vdev, "Failed to map pages for BO: %d", ret);
- goto err_free_pages;
- }
- return ret;
-
-err_free_pages:
- bo->ops->free_pages(bo);
- return ret;
-}
-
-static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo)
+static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
- mutex_lock(&bo->lock);
-
- WARN_ON(!bo->sgt);
- bo->ops->unmap_pages(bo);
- WARN_ON(bo->sgt);
- bo->ops->free_pages(bo);
- WARN_ON(bo->pages);
-
- mutex_unlock(&bo->lock);
+ if (bo->ctx)
+ ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u ctx %d vpu_addr 0x%llx mmu_mapped %d\n",
+ action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
+ bo->handle, bo->ctx->id, bo->vpu_addr, bo->mmu_mapped);
+ else
+ ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u (not added to context)\n",
+ action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
+ bo->handle);
}
/*
@@ -245,21 +48,24 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
mutex_lock(&bo->lock);
- if (!bo->vpu_addr) {
- ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n",
- bo->ctx->id, bo->handle);
+ ivpu_dbg_bo(vdev, bo, "pin");
+
+ if (!bo->ctx) {
+ ivpu_err(vdev, "vpu_addr not allocated for BO %d\n", bo->handle);
ret = -EINVAL;
goto unlock;
}
- if (!bo->sgt) {
- ret = ivpu_bo_alloc_and_map_pages_locked(bo);
- if (ret)
+ if (!bo->mmu_mapped) {
+ struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
goto unlock;
- }
+ }
- if (!bo->mmu_mapped) {
- ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt,
+ ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
ivpu_bo_is_snooped(bo));
if (ret) {
ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
@@ -281,248 +87,213 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
int ret;
- if (!range) {
- if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
- range = &vdev->hw->ranges.shave;
- else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
- range = &vdev->hw->ranges.dma;
- else
- range = &vdev->hw->ranges.user;
- }
+ mutex_lock(&bo->lock);
- mutex_lock(&ctx->lock);
- ret = ivpu_mmu_context_insert_node_locked(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
+ ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
bo->ctx = ctx;
bo->vpu_addr = bo->mm_node.start;
- list_add_tail(&bo->ctx_node, &ctx->bo_list);
+ } else {
+ ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
}
- mutex_unlock(&ctx->lock);
+
+ ivpu_dbg_bo(vdev, bo, "alloc");
+
+ mutex_unlock(&bo->lock);
return ret;
}
-static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo)
+static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- struct ivpu_mmu_context *ctx = bo->ctx;
- ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
- ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
+ lockdep_assert_held(&bo->lock);
- mutex_lock(&bo->lock);
+ ivpu_dbg_bo(vdev, bo, "unbind");
+
+ /* TODO: dma_unmap */
if (bo->mmu_mapped) {
- drm_WARN_ON(&vdev->drm, !bo->sgt);
- ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt);
+ drm_WARN_ON(&vdev->drm, !bo->ctx);
+ drm_WARN_ON(&vdev->drm, !bo->vpu_addr);
+ drm_WARN_ON(&vdev->drm, !bo->base.sgt);
+ ivpu_mmu_context_unmap_sgt(vdev, bo->ctx, bo->vpu_addr, bo->base.sgt);
bo->mmu_mapped = false;
}
- mutex_lock(&ctx->lock);
- list_del(&bo->ctx_node);
- bo->vpu_addr = 0;
- bo->ctx = NULL;
- ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node);
- mutex_unlock(&ctx->lock);
+ if (bo->ctx) {
+ ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node);
+ bo->vpu_addr = 0;
+ bo->ctx = NULL;
+ }
+}
+static void ivpu_bo_unbind(struct ivpu_bo *bo)
+{
+ mutex_lock(&bo->lock);
+ ivpu_bo_unbind_locked(bo);
mutex_unlock(&bo->lock);
}
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx)
+void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
- struct ivpu_bo *bo, *tmp;
+ struct ivpu_bo *bo;
+
+ if (drm_WARN_ON(&vdev->drm, !ctx))
+ return;
- list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node)
- ivpu_bo_free_vpu_addr(bo);
+ mutex_lock(&vdev->bo_list_lock);
+ list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
+ mutex_lock(&bo->lock);
+ if (bo->ctx == ctx)
+ ivpu_bo_unbind_locked(bo);
+ mutex_unlock(&bo->lock);
+ }
+ mutex_unlock(&vdev->bo_list_lock);
}
-static struct ivpu_bo *
-ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context,
- u64 size, u32 flags, const struct ivpu_bo_ops *ops,
- const struct ivpu_addr_range *range, u64 user_ptr)
+struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size)
{
struct ivpu_bo *bo;
- int ret = 0;
-
- if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size)))
- return ERR_PTR(-EINVAL);
- switch (flags & DRM_IVPU_BO_CACHE_MASK) {
- case DRM_IVPU_BO_CACHED:
- case DRM_IVPU_BO_UNCACHED:
- case DRM_IVPU_BO_WC:
- break;
- default:
+ if (size == 0 || !PAGE_ALIGNED(size))
return ERR_PTR(-EINVAL);
- }
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
- mutex_init(&bo->lock);
- bo->base.funcs = &ivpu_gem_funcs;
- bo->flags = flags;
- bo->ops = ops;
- bo->user_ptr = user_ptr;
-
- if (ops->type == IVPU_BO_TYPE_SHMEM)
- ret = drm_gem_object_init(&vdev->drm, &bo->base, size);
- else
- drm_gem_private_object_init(&vdev->drm, &bo->base, size);
-
- if (ret) {
- ivpu_err(vdev, "Failed to initialize drm object\n");
- goto err_free;
- }
-
- if (flags & DRM_IVPU_BO_MAPPABLE) {
- ret = drm_gem_create_mmap_offset(&bo->base);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate mmap offset\n");
- goto err_release;
- }
- }
-
- if (mmu_context) {
- ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range);
- if (ret) {
- ivpu_err(vdev, "Failed to add BO to context: %d\n", ret);
- goto err_release;
- }
- }
+ bo->base.base.funcs = &ivpu_gem_funcs;
+ bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
- return bo;
+ INIT_LIST_HEAD(&bo->bo_list_node);
+ mutex_init(&bo->lock);
-err_release:
- drm_gem_object_release(&bo->base);
-err_free:
- kfree(bo);
- return ERR_PTR(ret);
+ return &bo->base.base;
}
-static void ivpu_bo_free(struct drm_gem_object *obj)
+static struct ivpu_bo *
+ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
{
- struct ivpu_bo *bo = to_ivpu_bo(obj);
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
-
- if (bo->ctx)
- ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
- bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
- else
- ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n",
- (bool)bo->sgt, bo->mmu_mapped);
-
- drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
+ struct drm_gem_shmem_object *shmem;
+ struct ivpu_bo *bo;
- vunmap(bo->kvaddr);
+ switch (flags & DRM_IVPU_BO_CACHE_MASK) {
+ case DRM_IVPU_BO_CACHED:
+ case DRM_IVPU_BO_WC:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
- if (bo->ctx)
- ivpu_bo_free_vpu_addr(bo);
+ shmem = drm_gem_shmem_create(&vdev->drm, size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
- if (bo->sgt)
- ivpu_bo_unmap_and_free_pages(bo);
+ bo = to_ivpu_bo(&shmem->base);
+ bo->base.map_wc = flags & DRM_IVPU_BO_WC;
+ bo->flags = flags;
- if (bo->base.import_attach)
- drm_prime_gem_destroy(&bo->base, bo->sgt);
+ mutex_lock(&vdev->bo_list_lock);
+ list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ mutex_unlock(&vdev->bo_list_lock);
- drm_gem_object_release(&bo->base);
+ ivpu_dbg(vdev, BO, "create: vpu_addr 0x%llx size %zu flags 0x%x\n",
+ bo->vpu_addr, bo->base.base.size, flags);
- mutex_destroy(&bo->lock);
- kfree(bo);
+ return bo;
}
-static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_bo *bo = to_ivpu_bo(obj);
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
-
- ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s",
- bo->ctx->id, bo->handle, bo->vpu_addr, ivpu_bo_size(bo), bo->ops->name);
+ struct ivpu_addr_range *range;
- if (obj->import_attach) {
- /* Drop the reference drm_gem_mmap_obj() acquired.*/
- drm_gem_object_put(obj);
- vma->vm_private_data = NULL;
- return dma_buf_mmap(obj->dma_buf, vma, 0);
- }
-
- vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND);
- vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags));
+ if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
+ range = &vdev->hw->ranges.shave;
+ else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
+ range = &vdev->hw->ranges.dma;
+ else
+ range = &vdev->hw->ranges.user;
- return 0;
+ return ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, range);
}
-static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj)
+static void ivpu_bo_free(struct drm_gem_object *obj)
{
+ struct ivpu_device *vdev = to_ivpu_device(obj->dev);
struct ivpu_bo *bo = to_ivpu_bo(obj);
- loff_t npages = obj->size >> PAGE_SHIFT;
- int ret = 0;
- mutex_lock(&bo->lock);
+ mutex_lock(&vdev->bo_list_lock);
+ list_del(&bo->bo_list_node);
+ mutex_unlock(&vdev->bo_list_lock);
- if (!bo->sgt)
- ret = ivpu_bo_alloc_and_map_pages_locked(bo);
+ drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
- mutex_unlock(&bo->lock);
+ ivpu_dbg_bo(vdev, bo, "free");
- if (ret)
- return ERR_PTR(ret);
+ ivpu_bo_unbind(bo);
+ mutex_destroy(&bo->lock);
- return drm_prime_pages_to_sg(obj->dev, bo->pages, npages);
+ drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+ drm_gem_shmem_free(&bo->base);
}
-static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct ivpu_bo *bo = to_ivpu_bo(obj);
- loff_t npages = obj->size >> PAGE_SHIFT;
- pgoff_t page_offset;
- struct page *page;
- vm_fault_t ret;
- int err;
-
- mutex_lock(&bo->lock);
-
- if (!bo->sgt) {
- err = ivpu_bo_alloc_and_map_pages_locked(bo);
- if (err) {
- ret = vmf_error(err);
- goto unlock;
- }
- }
-
- /* We don't use vmf->pgoff since that has the fake offset */
- page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- if (page_offset >= npages) {
- ret = VM_FAULT_SIGBUS;
- } else {
- page = bo->pages[page_offset];
- ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
- }
-
-unlock:
- mutex_unlock(&bo->lock);
+static const struct dma_buf_ops ivpu_bo_dmabuf_ops = {
+ .cache_sgt_mapping = true,
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
- return ret;
+static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags)
+{
+ struct drm_device *dev = obj->dev;
+ struct dma_buf_export_info exp_info = {
+ .exp_name = KBUILD_MODNAME,
+ .owner = dev->driver->fops->owner,
+ .ops = &ivpu_bo_dmabuf_ops,
+ .size = obj->size,
+ .flags = flags,
+ .priv = obj,
+ .resv = obj->resv,
+ };
+ void *sgt;
+
+ /*
+ * Make sure that pages are allocated and dma-mapped before exporting the bo.
+ * DMA-mapping is required if the bo will be imported to the same device.
+ */
+ sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj));
+ if (IS_ERR(sgt))
+ return sgt;
+
+ return drm_gem_dmabuf_export(dev, &exp_info);
}
-static const struct vm_operations_struct ivpu_vm_ops = {
- .fault = ivpu_vm_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_bo_free,
- .mmap = ivpu_bo_mmap,
- .vm_ops = &ivpu_vm_ops,
- .get_sg_table = ivpu_bo_get_sg_table,
+ .open = ivpu_bo_open,
+ .export = ivpu_bo_export,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
};
-int
-ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
@@ -537,23 +308,20 @@ ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (size == 0)
return -EINVAL;
- bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0);
+ bo = ivpu_bo_create(vdev, size, args->flags);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
- ret = drm_gem_handle_create(file, &bo->base, &bo->handle);
+ ret = drm_gem_handle_create(file, &bo->base.base, &bo->handle);
if (!ret) {
args->vpu_addr = bo->vpu_addr;
args->handle = bo->handle;
}
- drm_gem_object_put(&bo->base);
-
- ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n",
- file_priv->ctx.id, bo->vpu_addr, ivpu_bo_size(bo), bo->flags);
+ drm_gem_object_put(&bo->base.base);
return ret;
}
@@ -563,8 +331,8 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
{
const struct ivpu_addr_range *range;
struct ivpu_addr_range fixed_range;
+ struct iosys_map map;
struct ivpu_bo *bo;
- pgprot_t prot;
int ret;
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
@@ -578,81 +346,42 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
range = &vdev->hw->ranges.global;
}
- bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
+ bo = ivpu_bo_create(vdev, size, flags);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
bo, vpu_addr, size, flags);
return NULL;
}
- ret = ivpu_bo_pin(bo);
+ ret = ivpu_bo_alloc_vpu_addr(bo, &vdev->gctx, range);
if (ret)
goto err_put;
- if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
- drm_clflush_pages(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
-
- if (bo->flags & DRM_IVPU_BO_WC)
- set_pages_array_wc(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
- else if (bo->flags & DRM_IVPU_BO_UNCACHED)
- set_pages_array_uc(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
-
- prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
- bo->kvaddr = vmap(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT, VM_MAP, prot);
- if (!bo->kvaddr) {
- ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n");
+ ret = ivpu_bo_pin(bo);
+ if (ret)
goto err_put;
- }
- ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n",
- bo->vpu_addr, ivpu_bo_size(bo), flags);
+ ret = drm_gem_shmem_vmap(&bo->base, &map);
+ if (ret)
+ goto err_put;
return bo;
err_put:
- drm_gem_object_put(&bo->base);
+ drm_gem_object_put(&bo->base.base);
return NULL;
}
void ivpu_bo_free_internal(struct ivpu_bo *bo)
{
- drm_gem_object_put(&bo->base);
-}
-
-struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
-{
- struct ivpu_device *vdev = to_ivpu_device(dev);
- struct dma_buf_attachment *attach;
- struct ivpu_bo *bo;
-
- attach = dma_buf_attach(buf, dev->dev);
- if (IS_ERR(attach))
- return ERR_CAST(attach);
-
- get_dma_buf(buf);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
- bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0);
- if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size);
- goto err_detach;
- }
-
- lockdep_set_class(&bo->lock, &prime_bo_lock_class_key);
-
- bo->base.import_attach = attach;
-
- return &bo->base;
-
-err_detach:
- dma_buf_detach(buf, attach);
- dma_buf_put(buf);
- return ERR_CAST(bo);
+ drm_gem_shmem_vunmap(&bo->base, &map);
+ drm_gem_object_put(&bo->base.base);
}
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
- struct ivpu_device *vdev = to_ivpu_device(dev);
struct drm_ivpu_bo_info *args = data;
struct drm_gem_object *obj;
struct ivpu_bo *bo;
@@ -665,21 +394,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
bo = to_ivpu_bo(obj);
mutex_lock(&bo->lock);
-
- if (!bo->ctx) {
- ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret);
- goto unlock;
- }
- }
-
args->flags = bo->flags;
args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
args->vpu_addr = bo->vpu_addr;
args->size = obj->size;
-unlock:
mutex_unlock(&bo->lock);
+
drm_gem_object_put(obj);
return ret;
}
@@ -714,41 +434,41 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
{
unsigned long dma_refcount = 0;
- if (bo->base.dma_buf && bo->base.dma_buf->file)
- dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
+ mutex_lock(&bo->lock);
+
+ if (bo->base.base.dma_buf && bo->base.base.dma_buf->file)
+ dma_refcount = atomic_long_read(&bo->base.base.dma_buf->file->f_count);
+
+ drm_printf(p, "%-3u %-6d 0x%-12llx %-10lu 0x%-8x %-4u %-8lu",
+ bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.base.size,
+ bo->flags, kref_read(&bo->base.base.refcount), dma_refcount);
+
+ if (bo->base.base.import_attach)
+ drm_printf(p, " imported");
+
+ if (bo->base.pages)
+ drm_printf(p, " has_pages");
- drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n",
- bo->ctx->id, bo->handle, bo->vpu_addr, ivpu_bo_size(bo),
- kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
+ if (bo->mmu_mapped)
+ drm_printf(p, " mmu_mapped");
+
+ drm_printf(p, "\n");
+
+ mutex_unlock(&bo->lock);
}
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
{
struct ivpu_device *vdev = to_ivpu_device(dev);
- struct ivpu_file_priv *file_priv;
- unsigned long ctx_id;
struct ivpu_bo *bo;
- drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n",
- "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type");
+ drm_printf(p, "%-3s %-6s %-14s %-10s %-10s %-4s %-8s %s\n",
+ "ctx", "handle", "vpu_addr", "size", "flags", "refs", "dma_refs", "attribs");
- mutex_lock(&vdev->gctx.lock);
- list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node)
+ mutex_lock(&vdev->bo_list_lock);
+ list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
ivpu_bo_print_info(bo, p);
- mutex_unlock(&vdev->gctx.lock);
-
- xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
- file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
- if (!file_priv)
- continue;
-
- mutex_lock(&file_priv->ctx.lock);
- list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node)
- ivpu_bo_print_info(bo, p);
- mutex_unlock(&file_priv->ctx.lock);
-
- ivpu_file_priv_put(&file_priv);
- }
+ mutex_unlock(&vdev->bo_list_lock);
}
void ivpu_bo_list_print(struct drm_device *dev)
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index a0b4d4a32b3b..d75cad0d3c74 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -6,84 +6,52 @@
#define __IVPU_GEM_H__
#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
-struct dma_buf;
-struct ivpu_bo_ops;
struct ivpu_file_priv;
struct ivpu_bo {
- struct drm_gem_object base;
- const struct ivpu_bo_ops *ops;
-
+ struct drm_gem_shmem_object base;
struct ivpu_mmu_context *ctx;
- struct list_head ctx_node;
+ struct list_head bo_list_node;
struct drm_mm_node mm_node;
- struct mutex lock; /* Protects: pages, sgt, mmu_mapped */
- struct sg_table *sgt;
- struct page **pages;
- bool mmu_mapped;
-
- void *kvaddr;
+ struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
u64 vpu_addr;
u32 handle;
u32 flags;
- uintptr_t user_ptr;
- u32 job_status;
-};
-
-enum ivpu_bo_type {
- IVPU_BO_TYPE_SHMEM = 1,
- IVPU_BO_TYPE_INTERNAL,
- IVPU_BO_TYPE_PRIME,
-};
-
-struct ivpu_bo_ops {
- enum ivpu_bo_type type;
- const char *name;
- int (*alloc_pages)(struct ivpu_bo *bo);
- void (*free_pages)(struct ivpu_bo *bo);
- int (*map_pages)(struct ivpu_bo *bo);
- void (*unmap_pages)(struct ivpu_bo *bo);
+ u32 job_status; /* Valid only for command buffer */
+ bool mmu_mapped;
};
int ivpu_bo_pin(struct ivpu_bo *bo);
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx);
-void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
-void ivpu_bo_list_print(struct drm_device *dev);
+void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
-struct ivpu_bo *
-ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
+struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
+struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
void ivpu_bo_free_internal(struct ivpu_bo *bo);
-struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
-void ivpu_bo_unmap_sgt_and_remove_from_context(struct ivpu_bo *bo);
int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
+void ivpu_bo_list_print(struct drm_device *dev);
+
static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj)
{
- return container_of(obj, struct ivpu_bo, base);
+ return container_of(obj, struct ivpu_bo, base.base);
}
static inline void *ivpu_bo_vaddr(struct ivpu_bo *bo)
{
- return bo->kvaddr;
+ return bo->base.vaddr;
}
static inline size_t ivpu_bo_size(struct ivpu_bo *bo)
{
- return bo->base.size;
-}
-
-static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset)
-{
- if (offset > ivpu_bo_size(bo) || !bo->pages)
- return NULL;
-
- return bo->pages[offset / PAGE_SIZE];
+ return bo->base.base.size;
}
static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
@@ -96,20 +64,9 @@ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
-static inline pgprot_t ivpu_bo_pgprot(struct ivpu_bo *bo, pgprot_t prot)
-{
- if (bo->flags & DRM_IVPU_BO_WC)
- return pgprot_writecombine(prot);
-
- if (bo->flags & DRM_IVPU_BO_UNCACHED)
- return pgprot_noncached(prot);
-
- return prot;
-}
-
static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
{
- return to_ivpu_device(bo->base.dev);
+ return to_ivpu_device(bo->base.base.dev);
}
static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 1079e06255ba..b2909168a0a6 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -15,8 +15,11 @@ struct ivpu_hw_ops {
int (*power_down)(struct ivpu_device *vdev);
int (*reset)(struct ivpu_device *vdev);
bool (*is_idle)(struct ivpu_device *vdev);
+ int (*wait_for_idle)(struct ivpu_device *vdev);
void (*wdt_disable)(struct ivpu_device *vdev);
void (*diagnose_failure)(struct ivpu_device *vdev);
+ u32 (*profiling_freq_get)(struct ivpu_device *vdev);
+ void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
@@ -58,6 +61,8 @@ struct ivpu_hw_info {
u32 sku;
u16 config;
int dma_bits;
+ ktime_t d0i3_entry_host_ts;
+ u64 d0i3_entry_vpu_ts;
};
extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
@@ -85,6 +90,11 @@ static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
return vdev->hw->ops->is_idle(vdev);
};
+static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->wait_for_idle(vdev);
+};
+
static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
{
ivpu_dbg(vdev, PM, "HW power down\n");
@@ -104,6 +114,16 @@ static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
vdev->hw->ops->wdt_disable(vdev);
};
+static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->profiling_freq_get(vdev);
+};
+
+static inline void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
+{
+ return vdev->hw->ops->profiling_freq_drive(vdev, enable);
+};
+
/* Register indirect accesses */
static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
{
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 5c0246b9e522..e33dfe3089af 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -29,6 +29,7 @@
#define PLL_REF_CLK_FREQ (50 * 1000000)
#define PLL_SIMULATION_FREQ (10 * 1000000)
+#define PLL_PROF_CLK_FREQ (38400 * 1000)
#define PLL_DEFAULT_EPP_VALUE 0x80
#define TIM_SAFE_ENABLE 0xf1d0dead
@@ -37,7 +38,7 @@
#define TIMEOUT_US (150 * USEC_PER_MSEC)
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
-#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC)
+#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
@@ -90,6 +91,7 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
vdev->timeout.tdr = 2000;
vdev->timeout.reschedule_suspend = 10;
vdev->timeout.autosuspend = 10;
+ vdev->timeout.d0i3_entry_msg = 5;
}
static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
@@ -502,6 +504,16 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
return ret;
}
+static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
+{
+ ivpu_boot_dpu_active_drive(vdev, false);
+ ivpu_boot_pwr_island_isolation_drive(vdev, true);
+ ivpu_boot_pwr_island_trickle_drive(vdev, false);
+ ivpu_boot_pwr_island_drive(vdev, false);
+
+ return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
+}
+
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
@@ -600,25 +612,17 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
- int ret;
- u32 val;
-
- if (IVPU_WA(punit_disabled))
- return 0;
+ int ret = 0;
- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
- if (ret) {
- ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
- return ret;
+ if (ivpu_boot_pwr_domain_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable power domain\n");
+ ret = -EIO;
}
- val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
- val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
- REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
-
- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
- if (ret)
- ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+ if (ivpu_pll_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable PLL\n");
+ ret = -EIO;
+ }
return ret;
}
@@ -651,10 +655,6 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
- ret = ivpu_hw_37xx_reset(vdev);
- if (ret)
- ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
-
ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@@ -718,15 +718,28 @@ static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
}
+static int ivpu_hw_37xx_wait_for_idle(struct ivpu_device *vdev)
+{
+ return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
+}
+
+static void ivpu_hw_37xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev)
+{
+ vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
+ vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
+}
+
static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
- if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
- ivpu_err(vdev, "Failed to reset the VPU\n");
+ ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev);
- if (ivpu_pll_disable(vdev)) {
- ivpu_err(vdev, "Failed to disable PLL\n");
+ if (!ivpu_hw_37xx_is_idle(vdev))
+ ivpu_warn(vdev, "VPU not idle during power down\n");
+
+ if (ivpu_hw_37xx_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset VPU\n");
ret = -EIO;
}
@@ -756,6 +769,16 @@ static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
}
+static u32 ivpu_hw_37xx_profiling_freq_get(struct ivpu_device *vdev)
+{
+ return PLL_PROF_CLK_FREQ;
+}
+
+static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
+{
+ /* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
+}
+
static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
@@ -867,17 +890,20 @@ static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
}
/* Handler for IRQs from VPU core (irqV) */
-static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
+static bool ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread)
{
u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ if (!status)
+ return false;
+
REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
ivpu_mmu_irq_evtq_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
- ivpu_ipc_irq_handler(vdev);
+ ivpu_ipc_irq_handler(vdev, wake_thread);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
@@ -894,17 +920,17 @@ static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
- return status;
+ return true;
}
/* Handler for IRQs from Buttress core (irqB) */
-static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
+static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
{
u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
bool schedule_recovery = false;
- if (status == 0)
- return 0;
+ if (!status)
+ return false;
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
@@ -940,23 +966,27 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
- return status;
+ return true;
}
static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
{
struct ivpu_device *vdev = ptr;
- u32 ret_irqv, ret_irqb;
+ bool irqv_handled, irqb_handled, wake_thread = false;
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
- ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
- ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
+ irqv_handled = ivpu_hw_37xx_irqv_handler(vdev, irq, &wake_thread);
+ irqb_handled = ivpu_hw_37xx_irqb_handler(vdev, irq);
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
- return IRQ_RETVAL(ret_irqb | ret_irqv);
+ if (wake_thread)
+ return IRQ_WAKE_THREAD;
+ if (irqv_handled || irqb_handled)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
}
static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
@@ -993,11 +1023,14 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.info_init = ivpu_hw_37xx_info_init,
.power_up = ivpu_hw_37xx_power_up,
.is_idle = ivpu_hw_37xx_is_idle,
+ .wait_for_idle = ivpu_hw_37xx_wait_for_idle,
.power_down = ivpu_hw_37xx_power_down,
.reset = ivpu_hw_37xx_reset,
.boot_fw = ivpu_hw_37xx_boot_fw,
.wdt_disable = ivpu_hw_37xx_wdt_disable,
.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
+ .profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
+ .profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
index 4083beb5e9db..f6fec1919202 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
@@ -240,6 +240,8 @@
#define VPU_37XX_CPU_SS_TIM_GEN_CONFIG 0x06021008u
#define VPU_37XX_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
+#define VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT 0x06029000u
+
#define VPU_37XX_CPU_SS_DOORBELL_0 0x06300000u
#define VPU_37XX_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index e691c49c9841..eba2fdef2ace 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -39,6 +39,7 @@
#define TIMEOUT_US (150 * USEC_PER_MSEC)
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
+#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
#define WEIGHTS_DEFAULT 0xf711f711u
#define WEIGHTS_ATS_DEFAULT 0x0000f711u
@@ -139,18 +140,21 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
vdev->timeout.tdr = 2000000;
vdev->timeout.reschedule_suspend = 1000;
vdev->timeout.autosuspend = -1;
+ vdev->timeout.d0i3_entry_msg = 500;
} else if (ivpu_is_simics(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 10000;
vdev->timeout.reschedule_suspend = 10;
vdev->timeout.autosuspend = -1;
+ vdev->timeout.d0i3_entry_msg = 100;
} else {
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
vdev->timeout.reschedule_suspend = 10;
vdev->timeout.autosuspend = 10;
+ vdev->timeout.d0i3_entry_msg = 5;
}
}
@@ -824,12 +828,6 @@ static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
{
int ret;
- ret = ivpu_hw_40xx_reset(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to reset HW: %d\n", ret);
- return ret;
- }
-
ret = ivpu_hw_40xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@@ -898,10 +896,23 @@ static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev)
REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val);
}
+static int ivpu_hw_40xx_wait_for_idle(struct ivpu_device *vdev)
+{
+ return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
+}
+
+static void ivpu_hw_40xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev)
+{
+ vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
+ vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT);
+}
+
static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
+ ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
+
if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev))
ivpu_warn(vdev, "Failed to reset the VPU\n");
@@ -933,6 +944,19 @@ static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev)
REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
}
+static u32 ivpu_hw_40xx_profiling_freq_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->pll.profiling_freq;
+}
+
+static void ivpu_hw_40xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
+{
+ if (enable)
+ vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
+ else
+ vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+}
+
/* Register indirect accesses */
static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
{
@@ -1023,13 +1047,12 @@ static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
}
/* Handler for IRQs from VPU core (irqV) */
-static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
+static bool ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread)
{
u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
- irqreturn_t ret = IRQ_NONE;
if (!status)
- return IRQ_NONE;
+ return false;
REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
@@ -1037,7 +1060,7 @@ static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
ivpu_mmu_irq_evtq_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
- ret |= ivpu_ipc_irq_handler(vdev);
+ ivpu_ipc_irq_handler(vdev, wake_thread);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
@@ -1054,17 +1077,17 @@ static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
ivpu_hw_40xx_irq_noc_firewall_handler(vdev);
- return ret;
+ return true;
}
/* Handler for IRQs from Buttress core (irqB) */
-static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
+static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
{
bool schedule_recovery = false;
u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
- if (status == 0)
- return IRQ_NONE;
+ if (!status)
+ return false;
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
@@ -1116,26 +1139,27 @@ static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
- return IRQ_HANDLED;
+ return true;
}
static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
{
+ bool irqv_handled, irqb_handled, wake_thread = false;
struct ivpu_device *vdev = ptr;
- irqreturn_t ret = IRQ_NONE;
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
- ret |= ivpu_hw_40xx_irqv_handler(vdev, irq);
- ret |= ivpu_hw_40xx_irqb_handler(vdev, irq);
+ irqv_handled = ivpu_hw_40xx_irqv_handler(vdev, irq, &wake_thread);
+ irqb_handled = ivpu_hw_40xx_irqb_handler(vdev, irq);
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
- if (ret & IRQ_WAKE_THREAD)
+ if (wake_thread)
return IRQ_WAKE_THREAD;
-
- return ret;
+ if (irqv_handled || irqb_handled)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
}
static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev)
@@ -1185,11 +1209,14 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.info_init = ivpu_hw_40xx_info_init,
.power_up = ivpu_hw_40xx_power_up,
.is_idle = ivpu_hw_40xx_is_idle,
+ .wait_for_idle = ivpu_hw_40xx_wait_for_idle,
.power_down = ivpu_hw_40xx_power_down,
.reset = ivpu_hw_40xx_reset,
.boot_fw = ivpu_hw_40xx_boot_fw,
.wdt_disable = ivpu_hw_40xx_wdt_disable,
.diagnose_failure = ivpu_hw_40xx_diagnose_failure,
+ .profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
+ .profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index a4ca40b184d4..e86621f16f85 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -5,7 +5,7 @@
#include <linux/genalloc.h>
#include <linux/highmem.h>
-#include <linux/kthread.h>
+#include <linux/pm_runtime.h>
#include <linux/wait.h>
#include "ivpu_drv.h"
@@ -17,19 +17,12 @@
#include "ivpu_pm.h"
#define IPC_MAX_RX_MSG 128
-#define IS_KTHREAD() (get_current()->flags & PF_KTHREAD)
struct ivpu_ipc_tx_buf {
struct ivpu_ipc_hdr ipc;
struct vpu_jsm_msg jsm;
};
-struct ivpu_ipc_rx_msg {
- struct list_head link;
- struct ivpu_ipc_hdr *ipc_hdr;
- struct vpu_jsm_msg *jsm_msg;
-};
-
static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
{
@@ -139,8 +132,49 @@ static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
}
-void
-ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, u32 channel)
+static void
+ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_rx_msg *rx_msg;
+
+ lockdep_assert_held(&ipc->cons_lock);
+ lockdep_assert_irqs_disabled();
+
+ rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
+ if (!rx_msg) {
+ ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
+ return;
+ }
+
+ atomic_inc(&ipc->rx_msg_count);
+
+ rx_msg->ipc_hdr = ipc_hdr;
+ rx_msg->jsm_msg = jsm_msg;
+ rx_msg->callback = cons->rx_callback;
+
+ if (rx_msg->callback) {
+ list_add_tail(&rx_msg->link, &ipc->cb_msg_list);
+ } else {
+ spin_lock(&cons->rx_lock);
+ list_add_tail(&rx_msg->link, &cons->rx_msg_list);
+ spin_unlock(&cons->rx_lock);
+ wake_up(&cons->rx_msg_wq);
+ }
+}
+
+static void
+ivpu_ipc_rx_msg_del(struct ivpu_device *vdev, struct ivpu_ipc_rx_msg *rx_msg)
+{
+ list_del(&rx_msg->link);
+ ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
+ atomic_dec(&vdev->ipc->rx_msg_count);
+ kfree(rx_msg);
+}
+
+void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ u32 channel, ivpu_ipc_rx_callback_t rx_callback)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
@@ -148,13 +182,15 @@ ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
cons->channel = channel;
cons->tx_vpu_addr = 0;
cons->request_id = 0;
- spin_lock_init(&cons->rx_msg_lock);
+ cons->aborted = false;
+ cons->rx_callback = rx_callback;
+ spin_lock_init(&cons->rx_lock);
INIT_LIST_HEAD(&cons->rx_msg_list);
init_waitqueue_head(&cons->rx_msg_wq);
- spin_lock_irq(&ipc->cons_list_lock);
+ spin_lock_irq(&ipc->cons_lock);
list_add_tail(&cons->link, &ipc->cons_list);
- spin_unlock_irq(&ipc->cons_list_lock);
+ spin_unlock_irq(&ipc->cons_lock);
}
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
@@ -162,18 +198,14 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg, *r;
- spin_lock_irq(&ipc->cons_list_lock);
+ spin_lock_irq(&ipc->cons_lock);
list_del(&cons->link);
- spin_unlock_irq(&ipc->cons_list_lock);
-
- spin_lock_irq(&cons->rx_msg_lock);
- list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) {
- list_del(&rx_msg->link);
- ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
- atomic_dec(&ipc->rx_msg_count);
- kfree(rx_msg);
- }
- spin_unlock_irq(&cons->rx_msg_lock);
+ spin_unlock_irq(&ipc->cons_lock);
+
+ spin_lock_irq(&cons->rx_lock);
+ list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
+ ivpu_ipc_rx_msg_del(vdev, rx_msg);
+ spin_unlock_irq(&cons->rx_lock);
ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
}
@@ -202,52 +234,61 @@ unlock:
return ret;
}
+static bool ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
+{
+ bool ret;
+
+ spin_lock_irq(&cons->rx_lock);
+ ret = !list_empty(&cons->rx_msg_list) || cons->aborted;
+ spin_unlock_irq(&cons->rx_lock);
+
+ return ret;
+}
+
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_buf,
- struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms)
+ struct vpu_jsm_msg *jsm_msg, unsigned long timeout_ms)
{
- struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg;
int wait_ret, ret = 0;
+ if (drm_WARN_ONCE(&vdev->drm, cons->rx_callback, "Consumer works only in async mode\n"))
+ return -EINVAL;
+
wait_ret = wait_event_timeout(cons->rx_msg_wq,
- (IS_KTHREAD() && kthread_should_stop()) ||
- !list_empty(&cons->rx_msg_list),
+ ivpu_ipc_rx_need_wakeup(cons),
msecs_to_jiffies(timeout_ms));
- if (IS_KTHREAD() && kthread_should_stop())
- return -EINTR;
-
if (wait_ret == 0)
return -ETIMEDOUT;
- spin_lock_irq(&cons->rx_msg_lock);
+ spin_lock_irq(&cons->rx_lock);
+ if (cons->aborted) {
+ spin_unlock_irq(&cons->rx_lock);
+ return -ECANCELED;
+ }
rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
if (!rx_msg) {
- spin_unlock_irq(&cons->rx_msg_lock);
+ spin_unlock_irq(&cons->rx_lock);
return -EAGAIN;
}
- list_del(&rx_msg->link);
- spin_unlock_irq(&cons->rx_msg_lock);
if (ipc_buf)
memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
if (rx_msg->jsm_msg) {
- u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*ipc_payload));
+ u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg));
if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
ret = -EBADMSG;
}
- if (ipc_payload)
- memcpy(ipc_payload, rx_msg->jsm_msg, size);
+ if (jsm_msg)
+ memcpy(jsm_msg, rx_msg->jsm_msg, size);
}
- ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
- atomic_dec(&ipc->rx_msg_count);
- kfree(rx_msg);
-
+ ivpu_ipc_rx_msg_del(vdev, rx_msg);
+ spin_unlock_irq(&cons->rx_lock);
return ret;
}
@@ -260,7 +301,7 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
struct ivpu_ipc_consumer cons;
int ret;
- ivpu_ipc_consumer_add(vdev, &cons, channel);
+ ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
ret = ivpu_ipc_send(vdev, &cons, req);
if (ret) {
@@ -285,23 +326,19 @@ consumer_del:
return ret;
}
-int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp_type,
- struct vpu_jsm_msg *resp, u32 channel,
- unsigned long timeout_ms)
+int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms)
{
struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
struct vpu_jsm_msg hb_resp;
int ret, hb_ret;
- ret = ivpu_rpm_get(vdev);
- if (ret < 0)
- return ret;
+ drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
- ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp_type, resp,
- channel, timeout_ms);
+ ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms);
if (ret != -ETIMEDOUT)
- goto rpm_put;
+ return ret;
hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
@@ -311,7 +348,21 @@ int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
ivpu_pm_schedule_recovery(vdev);
}
-rpm_put:
+ return ret;
+}
+
+int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms)
+{
+ int ret;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms);
+
ivpu_rpm_put(vdev);
return ret;
}
@@ -329,35 +380,7 @@ ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons
return false;
}
-static void
-ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
- struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
-{
- struct ivpu_ipc_info *ipc = vdev->ipc;
- struct ivpu_ipc_rx_msg *rx_msg;
- unsigned long flags;
-
- lockdep_assert_held(&ipc->cons_list_lock);
-
- rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
- if (!rx_msg) {
- ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
- return;
- }
-
- atomic_inc(&ipc->rx_msg_count);
-
- rx_msg->ipc_hdr = ipc_hdr;
- rx_msg->jsm_msg = jsm_msg;
-
- spin_lock_irqsave(&cons->rx_msg_lock, flags);
- list_add_tail(&rx_msg->link, &cons->rx_msg_list);
- spin_unlock_irqrestore(&cons->rx_msg_lock, flags);
-
- wake_up(&cons->rx_msg_wq);
-}
-
-int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
+void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_consumer *cons;
@@ -375,7 +398,7 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
if (vpu_addr == REG_IO_ERROR) {
ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
- return -EIO;
+ return;
}
ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
@@ -405,15 +428,15 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
dispatched = false;
- spin_lock_irqsave(&ipc->cons_list_lock, flags);
+ spin_lock_irqsave(&ipc->cons_lock, flags);
list_for_each_entry(cons, &ipc->cons_list, link) {
if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
- ivpu_ipc_dispatch(vdev, cons, ipc_hdr, jsm_msg);
+ ivpu_ipc_rx_msg_add(vdev, cons, ipc_hdr, jsm_msg);
dispatched = true;
break;
}
}
- spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
+ spin_unlock_irqrestore(&ipc->cons_lock, flags);
if (!dispatched) {
ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
@@ -421,7 +444,28 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
}
- return 0;
+ if (wake_thread)
+ *wake_thread = !list_empty(&ipc->cb_msg_list);
+}
+
+irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_rx_msg *rx_msg, *r;
+ struct list_head cb_msg_list;
+
+ INIT_LIST_HEAD(&cb_msg_list);
+
+ spin_lock_irq(&ipc->cons_lock);
+ list_splice_tail_init(&ipc->cb_msg_list, &cb_msg_list);
+ spin_unlock_irq(&ipc->cons_lock);
+
+ list_for_each_entry_safe(rx_msg, r, &cb_msg_list, link) {
+ rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
+ ivpu_ipc_rx_msg_del(vdev, rx_msg);
+ }
+
+ return IRQ_HANDLED;
}
int ivpu_ipc_init(struct ivpu_device *vdev)
@@ -456,10 +500,10 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
goto err_free_rx;
}
+ spin_lock_init(&ipc->cons_lock);
INIT_LIST_HEAD(&ipc->cons_list);
- spin_lock_init(&ipc->cons_list_lock);
+ INIT_LIST_HEAD(&ipc->cb_msg_list);
drmm_mutex_init(&vdev->drm, &ipc->lock);
-
ivpu_ipc_reset(vdev);
return 0;
@@ -472,6 +516,13 @@ err_free_tx:
void ivpu_ipc_fini(struct ivpu_device *vdev)
{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ drm_WARN_ON(&vdev->drm, ipc->on);
+ drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list));
+ drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
+ drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
+
ivpu_ipc_mem_fini(vdev);
}
@@ -488,16 +539,27 @@ void ivpu_ipc_disable(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_consumer *cons, *c;
- unsigned long flags;
+ struct ivpu_ipc_rx_msg *rx_msg, *r;
+
+ drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
mutex_lock(&ipc->lock);
ipc->on = false;
mutex_unlock(&ipc->lock);
- spin_lock_irqsave(&ipc->cons_list_lock, flags);
- list_for_each_entry_safe(cons, c, &ipc->cons_list, link)
+ spin_lock_irq(&ipc->cons_lock);
+ list_for_each_entry_safe(cons, c, &ipc->cons_list, link) {
+ spin_lock(&cons->rx_lock);
+ if (!cons->rx_callback)
+ cons->aborted = true;
+ list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
+ ivpu_ipc_rx_msg_del(vdev, rx_msg);
+ spin_unlock(&cons->rx_lock);
wake_up(&cons->rx_msg_wq);
- spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
+ }
+ spin_unlock_irq(&ipc->cons_lock);
+
+ drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
}
void ivpu_ipc_reset(struct ivpu_device *vdev)
@@ -505,6 +567,7 @@ void ivpu_ipc_reset(struct ivpu_device *vdev)
struct ivpu_ipc_info *ipc = vdev->ipc;
mutex_lock(&ipc->lock);
+ drm_WARN_ON(&vdev->drm, ipc->on);
memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx));
memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx));
diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
index 68f5b6668e00..40ca3cc4e61f 100644
--- a/drivers/accel/ivpu/ivpu_ipc.h
+++ b/drivers/accel/ivpu/ivpu_ipc.h
@@ -42,13 +42,26 @@ struct ivpu_ipc_hdr {
u8 status;
} __packed __aligned(IVPU_IPC_ALIGNMENT);
+typedef void (*ivpu_ipc_rx_callback_t)(struct ivpu_device *vdev,
+ struct ivpu_ipc_hdr *ipc_hdr,
+ struct vpu_jsm_msg *jsm_msg);
+
+struct ivpu_ipc_rx_msg {
+ struct list_head link;
+ struct ivpu_ipc_hdr *ipc_hdr;
+ struct vpu_jsm_msg *jsm_msg;
+ ivpu_ipc_rx_callback_t callback;
+};
+
struct ivpu_ipc_consumer {
struct list_head link;
u32 channel;
u32 tx_vpu_addr;
u32 request_id;
+ bool aborted;
+ ivpu_ipc_rx_callback_t rx_callback;
- spinlock_t rx_msg_lock; /* Protects rx_msg_list */
+ spinlock_t rx_lock; /* Protects rx_msg_list and aborted */
struct list_head rx_msg_list;
wait_queue_head_t rx_msg_wq;
};
@@ -60,8 +73,9 @@ struct ivpu_ipc_info {
atomic_t rx_msg_count;
- spinlock_t cons_list_lock; /* Protects cons_list */
+ spinlock_t cons_lock; /* Protects cons_list and cb_msg_list */
struct list_head cons_list;
+ struct list_head cb_msg_list;
atomic_t request_id;
struct mutex lock; /* Lock on status */
@@ -75,19 +89,22 @@ void ivpu_ipc_enable(struct ivpu_device *vdev);
void ivpu_ipc_disable(struct ivpu_device *vdev);
void ivpu_ipc_reset(struct ivpu_device *vdev);
-int ivpu_ipc_irq_handler(struct ivpu_device *vdev);
+void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread);
+irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
- u32 channel);
+ u32 channel, ivpu_ipc_rx_callback_t callback);
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
- struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *ipc_payload,
+ struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
unsigned long timeout_ms);
+int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms);
int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp_type,
- struct vpu_jsm_msg *resp, u32 channel,
- unsigned long timeout_ms);
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms);
#endif /* __IVPU_IPC_H__ */
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 8983e3a4fdf9..7206cf9cdb4a 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -7,7 +7,6 @@
#include <linux/bitfield.h>
#include <linux/highmem.h>
-#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <uapi/drm/ivpu_accel.h>
@@ -24,10 +23,6 @@
#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
#define JOB_MAX_BUFFER_COUNT 65535
-static unsigned int ivpu_tdr_timeout_ms;
-module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, uint, 0644);
-MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
-
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
{
ivpu_hw_reg_db_set(vdev, cmdq->db_id);
@@ -196,6 +191,8 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
entry->batch_buf_addr = job->cmd_buf_vpu_addr;
entry->job_id = job->job_id;
entry->flags = 0;
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
+ entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
wmb(); /* Ensure that tail is updated after filling entry */
header->tail = next_entry;
wmb(); /* Flush WC buffer for jobq header */
@@ -264,7 +261,7 @@ static void job_release(struct kref *ref)
for (i = 0; i < job->bo_count; i++)
if (job->bos[i])
- drm_gem_object_put(&job->bos[i]->base);
+ drm_gem_object_put(&job->bos[i]->base.base);
dma_fence_put(job->done_fence);
ivpu_file_priv_put(&job->file_priv);
@@ -340,23 +337,12 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
+ ivpu_stop_job_timeout_detection(vdev);
+
job_put(job);
return 0;
}
-static void ivpu_job_done_message(struct ivpu_device *vdev, void *msg)
-{
- struct vpu_ipc_msg_payload_job_done *payload;
- struct vpu_jsm_msg *job_ret_msg = msg;
- int ret;
-
- payload = (struct vpu_ipc_msg_payload_job_done *)&job_ret_msg->payload;
-
- ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
- if (ret)
- ivpu_err(vdev, "Failed to finish job %d: %d\n", payload->job_id, ret);
-}
-
void ivpu_jobs_abort_all(struct ivpu_device *vdev)
{
struct ivpu_job *job;
@@ -398,11 +384,13 @@ static int ivpu_direct_job_submission(struct ivpu_job *job)
if (ret)
goto err_xa_erase;
+ ivpu_start_job_timeout_detection(vdev);
+
ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
job->engine_idx, cmdq->jobq->header.tail);
- if (ivpu_test_mode == IVPU_TEST_MODE_NULL_HW) {
+ if (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW) {
ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
cmdq->jobq->header.head = cmdq->jobq->header.tail;
wmb(); /* Flush WC buffer for jobq header */
@@ -448,7 +436,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
}
bo = job->bos[CMD_BUF_IDX];
- if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) {
+ if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
ivpu_warn(vdev, "Buffer is already in use\n");
return -EBUSY;
}
@@ -468,7 +456,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
}
for (i = 0; i < buf_count; i++) {
- ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
+ ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
if (ret) {
ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
goto unlock_reservations;
@@ -477,7 +465,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
for (i = 0; i < buf_count; i++) {
usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
- dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, usage);
+ dma_resv_add_fence(job->bos[i]->base.base.resv, job->done_fence, usage);
}
unlock_reservations:
@@ -562,61 +550,36 @@ free_handles:
return ret;
}
-static int ivpu_job_done_thread(void *arg)
+static void
+ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
+ struct vpu_jsm_msg *jsm_msg)
{
- struct ivpu_device *vdev = (struct ivpu_device *)arg;
- struct ivpu_ipc_consumer cons;
- struct vpu_jsm_msg jsm_msg;
- bool jobs_submitted;
- unsigned int timeout;
+ struct vpu_ipc_msg_payload_job_done *payload;
int ret;
- ivpu_dbg(vdev, JOB, "Started %s\n", __func__);
-
- ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET);
-
- while (!kthread_should_stop()) {
- timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
- jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa);
- ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout);
- if (!ret) {
- ivpu_job_done_message(vdev, &jsm_msg);
- } else if (ret == -ETIMEDOUT) {
- if (jobs_submitted && !xa_empty(&vdev->submitted_jobs_xa)) {
- ivpu_err(vdev, "TDR detected, timeout %d ms", timeout);
- ivpu_hw_diagnose_failure(vdev);
- ivpu_pm_schedule_recovery(vdev);
- }
- }
+ if (!jsm_msg) {
+ ivpu_err(vdev, "IPC message has no JSM payload\n");
+ return;
}
- ivpu_ipc_consumer_del(vdev, &cons);
-
- ivpu_jobs_abort_all(vdev);
+ if (jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
+ ivpu_err(vdev, "Invalid JSM message result: %d\n", jsm_msg->result);
+ return;
+ }
- ivpu_dbg(vdev, JOB, "Stopped %s\n", __func__);
- return 0;
+ payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
+ ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
+ if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
+ ivpu_start_job_timeout_detection(vdev);
}
-int ivpu_job_done_thread_init(struct ivpu_device *vdev)
+void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
{
- struct task_struct *thread;
-
- thread = kthread_run(&ivpu_job_done_thread, (void *)vdev, "ivpu_job_done_thread");
- if (IS_ERR(thread)) {
- ivpu_err(vdev, "Failed to start job completion thread\n");
- return -EIO;
- }
-
- get_task_struct(thread);
- wake_up_process(thread);
-
- vdev->job_done_thread = thread;
-
- return 0;
+ ivpu_ipc_consumer_add(vdev, &vdev->job_done_consumer,
+ VPU_IPC_CHAN_JOB_RET, ivpu_job_done_callback);
}
-void ivpu_job_done_thread_fini(struct ivpu_device *vdev)
+void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
{
- kthread_stop_put(vdev->job_done_thread);
+ ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 5514c2d8a609..45a2f2ec82e5 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -59,8 +59,8 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
-int ivpu_job_done_thread_init(struct ivpu_device *vdev);
-void ivpu_job_done_thread_fini(struct ivpu_device *vdev);
+void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
+void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 0c2fe7142024..8cea0dd731b9 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -4,6 +4,7 @@
*/
#include "ivpu_drv.h"
+#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
@@ -36,6 +37,17 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
@@ -65,6 +77,12 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
}
#undef IVPU_CASE_TO_STR
@@ -243,3 +261,23 @@ int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
}
+
+int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (IVPU_WA(disable_d0i3_msg))
+ return 0;
+
+ req.payload.pwr_d0i3_enter.send_response = 1;
+
+ ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE,
+ &resp, VPU_IPC_CHAN_GEN_CMD,
+ vdev->timeout.d0i3_entry_msg);
+ if (ret)
+ return ret;
+
+ return ivpu_hw_wait_for_idle(vdev);
+}
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h
index 66979a948c7c..ae75e5dbcc41 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.h
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.h
@@ -22,4 +22,5 @@ int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destinati
int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
u64 trace_hw_component_mask);
int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid);
+int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev);
#endif
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 2538c78fbebe..2228c44b115f 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -230,7 +230,12 @@
(REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \
(REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT)))
-static char *ivpu_mmu_event_to_str(u32 cmd)
+#define IVPU_MMU_CERROR_NONE 0x0
+#define IVPU_MMU_CERROR_ILL 0x1
+#define IVPU_MMU_CERROR_ABT 0x2
+#define IVPU_MMU_CERROR_ATC_INV_SYNC 0x3
+
+static const char *ivpu_mmu_event_to_str(u32 cmd)
{
switch (cmd) {
case IVPU_MMU_EVT_F_UUT:
@@ -276,6 +281,22 @@ static char *ivpu_mmu_event_to_str(u32 cmd)
}
}
+static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
+{
+ switch (err) {
+ case IVPU_MMU_CERROR_NONE:
+ return "No CMDQ Error";
+ case IVPU_MMU_CERROR_ILL:
+ return "Illegal command";
+ case IVPU_MMU_CERROR_ABT:
+ return "External abort on CMDQ read";
+ case IVPU_MMU_CERROR_ATC_INV_SYNC:
+ return "Sync failed to complete ATS invalidation";
+ default:
+ return "Unknown CMDQ Error";
+ }
+}
+
static void ivpu_mmu_config_check(struct ivpu_device *vdev)
{
u32 val_ref;
@@ -479,10 +500,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
u64 val;
int ret;
- val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC) |
- FIELD_PREP(IVPU_MMU_CMD_SYNC_0_CS, 0x2) |
- FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) |
- FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf);
+ val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC);
ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
if (ret)
@@ -492,8 +510,15 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
- if (ret)
- ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
+ if (ret) {
+ u32 err;
+
+ val = REGV_RD32(IVPU_MMU_REG_CMDQ_CONS);
+ err = REG_GET_FLD(IVPU_MMU_REG_CMDQ_CONS, ERR, val);
+
+ ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret,
+ ivpu_mmu_cmdq_err_to_str(err));
+ }
return ret;
}
@@ -750,9 +775,12 @@ int ivpu_mmu_init(struct ivpu_device *vdev)
ivpu_dbg(vdev, MMU, "Init..\n");
- drmm_mutex_init(&vdev->drm, &mmu->lock);
ivpu_mmu_config_check(vdev);
+ ret = drmm_mutex_init(&vdev->drm, &mmu->lock);
+ if (ret)
+ return ret;
+
ret = ivpu_mmu_structs_alloc(vdev);
if (ret)
return ret;
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index c1050a2df954..12a8c09d4547 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -5,6 +5,9 @@
#include <linux/bitfield.h>
#include <linux/highmem.h>
+#include <linux/set_memory.h>
+
+#include <drm/drm_cache.h>
#include "ivpu_drv.h"
#include "ivpu_hw.h"
@@ -39,12 +42,57 @@
#define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
+static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma)
+{
+ dma_addr_t dma_addr;
+ struct page *page;
+ void *cpu;
+
+ page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ set_pages_array_wc(&page, 1);
+
+ dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(vdev->drm.dev, dma_addr))
+ goto err_free_page;
+
+ cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (!cpu)
+ goto err_dma_unmap_page;
+
+
+ *dma = dma_addr;
+ return cpu;
+
+err_dma_unmap_page:
+ dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+err_free_page:
+ put_page(page);
+ return NULL;
+}
+
+static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
+{
+ struct page *page;
+
+ if (cpu_addr) {
+ page = vmalloc_to_page(cpu_addr);
+ vunmap(cpu_addr);
+ dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ set_pages_array_wb(&page, 1);
+ put_page(page);
+ }
+}
+
static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
dma_addr_t pgd_dma;
- pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
- GFP_KERNEL);
+ pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
if (!pgtable->pgd_dma_ptr)
return -ENOMEM;
@@ -53,13 +101,6 @@ static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtab
return 0;
}
-static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
-{
- if (cpu_addr)
- dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr,
- dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK);
-}
-
static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
int pgd_idx, pud_idx, pmd_idx;
@@ -84,19 +125,19 @@ static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgt
pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
- ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma);
+ ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma);
}
kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
- ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
+ ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
}
kfree(pgtable->pmd_ptrs[pgd_idx]);
kfree(pgtable->pte_ptrs[pgd_idx]);
- ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
+ ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
}
- ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
+ ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
}
static u64*
@@ -108,7 +149,7 @@ ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
if (pud_dma_ptr)
return pud_dma_ptr;
- pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL);
+ pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma);
if (!pud_dma_ptr)
return NULL;
@@ -131,7 +172,7 @@ err_free_pmd_ptrs:
kfree(pgtable->pmd_ptrs[pgd_idx]);
err_free_pud_dma_ptr:
- ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
+ ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
return NULL;
}
@@ -145,7 +186,7 @@ ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
if (pmd_dma_ptr)
return pmd_dma_ptr;
- pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
+ pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma);
if (!pmd_dma_ptr)
return NULL;
@@ -160,7 +201,7 @@ ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
return pmd_dma_ptr;
err_free_pmd_dma_ptr:
- ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
+ ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
return NULL;
}
@@ -174,7 +215,7 @@ ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
if (pte_dma_ptr)
return pte_dma_ptr;
- pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
+ pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma);
if (!pte_dma_ptr)
return NULL;
@@ -249,38 +290,6 @@ static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_ad
ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
}
-static void
-ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
-{
- struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable;
- u64 end_addr = vpu_addr + size;
-
- /* Align to PMD entry (2 MB) */
- vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
-
- while (vpu_addr < end_addr) {
- int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
- u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE;
-
- while (vpu_addr < end_addr && vpu_addr < pud_end) {
- int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
- u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
-
- while (vpu_addr < end_addr && vpu_addr < pmd_end) {
- int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
-
- clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx],
- IVPU_MMU_PGTABLE_SIZE);
- vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
- }
- clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx],
- IVPU_MMU_PGTABLE_SIZE);
- }
- clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE);
- }
- clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE);
-}
-
static int
ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
@@ -327,6 +336,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 prot;
u64 i;
+ if (drm_WARN_ON(&vdev->drm, !ctx))
+ return -EINVAL;
+
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
return -EINVAL;
@@ -349,10 +361,11 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
mutex_unlock(&ctx->lock);
return ret;
}
- ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
vpu_addr += size;
}
+ /* Ensure page table modifications are flushed from wc buffers to memory */
+ wmb();
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
@@ -369,8 +382,8 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
int ret;
u64 i;
- if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
- ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr);
+ if (drm_WARN_ON(&vdev->drm, !ctx))
+ return;
mutex_lock(&ctx->lock);
@@ -378,10 +391,11 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
size_t size = sg_dma_len(sg) + sg->offset;
ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
- ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
vpu_addr += size;
}
+ /* Ensure page table modifications are flushed from wc buffers to memory */
+ wmb();
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
@@ -390,28 +404,34 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
}
int
-ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
- const struct ivpu_addr_range *range,
- u64 size, struct drm_mm_node *node)
+ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
+ u64 size, struct drm_mm_node *node)
{
- lockdep_assert_held(&ctx->lock);
+ int ret;
+
+ WARN_ON(!range);
+ mutex_lock(&ctx->lock);
if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
- if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
- range->start, range->end, DRM_MM_INSERT_BEST))
- return 0;
+ ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
+ range->start, range->end, DRM_MM_INSERT_BEST);
+ if (!ret)
+ goto unlock;
}
- return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
- range->start, range->end, DRM_MM_INSERT_BEST);
+ ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
+ range->start, range->end, DRM_MM_INSERT_BEST);
+unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
}
void
-ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
+ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
{
- lockdep_assert_held(&ctx->lock);
-
+ mutex_lock(&ctx->lock);
drm_mm_remove_node(node);
+ mutex_unlock(&ctx->lock);
}
static int
@@ -421,7 +441,6 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
int ret;
mutex_init(&ctx->lock);
- INIT_LIST_HEAD(&ctx->bo_list);
ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
if (ret) {
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index f15d8c630d8a..535db3a1fc74 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -23,10 +23,9 @@ struct ivpu_mmu_pgtable {
};
struct ivpu_mmu_context {
- struct mutex lock; /* protects: mm, pgtable, bo_list */
+ struct mutex lock; /* Protects: mm, pgtable */
struct drm_mm mm;
struct ivpu_mmu_pgtable pgtable;
- struct list_head bo_list;
u32 id;
};
@@ -39,11 +38,9 @@ int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context
void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
-int ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
- const struct ivpu_addr_range *range,
- u64 size, struct drm_mm_node *node);
-void ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx,
- struct drm_mm_node *node);
+int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
+ u64 size, struct drm_mm_node *node);
+void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 0ace218783c8..0af8864cb3b5 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -15,6 +15,7 @@
#include "ivpu_fw.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
+#include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h"
#include "ivpu_pm.h"
@@ -22,6 +23,10 @@ static bool ivpu_disable_recovery;
module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected");
+static unsigned long ivpu_tdr_timeout_ms;
+module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
+
#define PM_RESCHEDULE_LIMIT 5
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
@@ -69,27 +74,31 @@ retry:
ret = ivpu_hw_power_up(vdev);
if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
- return ret;
+ goto err_power_down;
}
ret = ivpu_mmu_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
- ivpu_hw_power_down(vdev);
- return ret;
+ goto err_power_down;
}
ret = ivpu_boot(vdev);
- if (ret) {
- ivpu_mmu_disable(vdev);
- ivpu_hw_power_down(vdev);
- if (!ivpu_fw_is_cold_boot(vdev)) {
- ivpu_warn(vdev, "Failed to resume the FW: %d. Retrying cold boot..\n", ret);
- ivpu_pm_prepare_cold_boot(vdev);
- goto retry;
- } else {
- ivpu_err(vdev, "Failed to resume the FW: %d\n", ret);
- }
+ if (ret)
+ goto err_mmu_disable;
+
+ return 0;
+
+err_mmu_disable:
+ ivpu_mmu_disable(vdev);
+err_power_down:
+ ivpu_hw_power_down(vdev);
+
+ if (!ivpu_fw_is_cold_boot(vdev)) {
+ ivpu_pm_prepare_cold_boot(vdev);
+ goto retry;
+ } else {
+ ivpu_err(vdev, "Failed to resume the FW: %d\n", ret);
}
return ret;
@@ -136,6 +145,31 @@ void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
}
}
+static void ivpu_job_timeout_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
+ struct ivpu_device *vdev = pm->vdev;
+ unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
+
+ ivpu_err(vdev, "TDR detected, timeout %lu ms", timeout_ms);
+ ivpu_hw_diagnose_failure(vdev);
+
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
+{
+ unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
+
+ /* No-op if already queued */
+ queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms));
+}
+
+void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
+{
+ cancel_delayed_work_sync(&vdev->pm->job_timeout_work);
+}
+
int ivpu_pm_suspend_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
@@ -153,6 +187,8 @@ int ivpu_pm_suspend_cb(struct device *dev)
}
}
+ ivpu_jsm_pwr_d0i3_enter(vdev);
+
ivpu_suspend(vdev);
ivpu_pm_prepare_warm_boot(vdev);
@@ -188,6 +224,7 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
+ bool hw_is_idle = true;
int ret;
ivpu_dbg(vdev, PM, "Runtime suspend..\n");
@@ -200,11 +237,16 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
return -EAGAIN;
}
+ if (!vdev->pm->suspend_reschedule_counter)
+ hw_is_idle = false;
+ else if (ivpu_jsm_pwr_d0i3_enter(vdev))
+ hw_is_idle = false;
+
ret = ivpu_suspend(vdev);
if (ret)
ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
- if (!vdev->pm->suspend_reschedule_counter) {
+ if (!hw_is_idle) {
ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n");
ivpu_pm_prepare_cold_boot(vdev);
} else {
@@ -250,9 +292,6 @@ int ivpu_rpm_get_if_active(struct ivpu_device *vdev)
{
int ret;
- ivpu_dbg(vdev, RPM, "rpm_get_if_active count %d\n",
- atomic_read(&vdev->drm.dev->power.usage_count));
-
ret = pm_runtime_get_if_active(vdev->drm.dev, false);
drm_WARN_ON(&vdev->drm, ret < 0);
@@ -307,6 +346,7 @@ void ivpu_pm_init(struct ivpu_device *vdev)
atomic_set(&pm->in_reset, 0);
INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
+ INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
if (ivpu_disable_recovery)
delay = -1;
@@ -321,6 +361,7 @@ void ivpu_pm_init(struct ivpu_device *vdev)
void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
{
+ drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
cancel_work_sync(&vdev->pm->recovery_work);
}
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index 044db150be07..97c6e0b0aa42 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -12,6 +12,7 @@ struct ivpu_device;
struct ivpu_pm_info {
struct ivpu_device *vdev;
+ struct delayed_work job_timeout_work;
struct work_struct recovery_work;
atomic_t in_reset;
atomic_t reset_counter;
@@ -37,5 +38,7 @@ int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev);
void ivpu_rpm_put(struct ivpu_device *vdev);
void ivpu_pm_schedule_recovery(struct ivpu_device *vdev);
+void ivpu_start_job_timeout_detection(struct ivpu_device *vdev);
+void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
#endif /* __IVPU_PM_H__ */
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
index 6b71be92ba65..04c954258563 100644
--- a/drivers/accel/ivpu/vpu_boot_api.h
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -11,7 +11,10 @@
* The bellow values will be used to construct the version info this way:
* fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
* VPU_BOOT_API_VER_MINOR;
- * VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes.
+ * VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes
+ * This information is collected by using vpuip_2/application/vpuFirmware/make_std_fw_image.py
+ * If a header is missing this info we ignore the header, if a header is missing or contains
+ * partial info a build error will be generated.
*/
/*
@@ -24,12 +27,12 @@
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
-#define VPU_BOOT_API_VER_MINOR 12
+#define VPU_BOOT_API_VER_MINOR 20
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_BOOT_API_VER_PATCH 2
+#define VPU_BOOT_API_VER_PATCH 4
/*
* Index in the API version table
@@ -63,6 +66,12 @@ struct vpu_firmware_header {
/* Size of memory require for firmware execution */
u32 runtime_size;
u32 shave_nn_fw_size;
+ /* Size of primary preemption buffer. */
+ u32 preemption_buffer_1_size;
+ /* Size of secondary preemption buffer. */
+ u32 preemption_buffer_2_size;
+ /* Space reserved for future preemption-related fields. */
+ u32 preemption_reserved[6];
};
/*
@@ -89,6 +98,14 @@ enum VPU_BOOT_L2_CACHE_CFG_TYPE {
VPU_BOOT_L2_CACHE_CFG_NUM = 2
};
+/** VPU MCA ECC signalling mode. By default, no signalling is used */
+enum VPU_BOOT_MCA_ECC_SIGNAL_TYPE {
+ VPU_BOOT_MCA_ECC_NONE = 0,
+ VPU_BOOT_MCA_ECC_CORR = 1,
+ VPU_BOOT_MCA_ECC_FATAL = 2,
+ VPU_BOOT_MCA_ECC_BOTH = 3
+};
+
/**
* Logging destinations.
*
@@ -131,9 +148,11 @@ enum vpu_trace_destination {
#define VPU_TRACE_PROC_BIT_ACT_SHV_3 22
#define VPU_TRACE_PROC_NO_OF_HW_DEVS 23
-/* KMB HW component IDs are sequential, so define first and last IDs. */
-#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_LRT
-#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_SHV_15
+/* VPU 30xx HW component IDs are sequential, so define first and last IDs. */
+#define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT
+#define VPU_TRACE_PROC_BIT_30XX_LAST VPU_TRACE_PROC_BIT_SHV_15
+#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_30XX_FIRST
+#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_30XX_LAST
struct vpu_boot_l2_cache_config {
u8 use;
@@ -148,6 +167,25 @@ struct vpu_warm_boot_section {
u32 is_clear_op;
};
+/*
+ * When HW scheduling mode is enabled, a present period is defined.
+ * It will be used by VPU to swap between normal and focus priorities
+ * to prevent starving of normal priority band (when implemented).
+ * Host must provide a valid value at boot time in
+ * `vpu_focus_present_timer_ms`. If the value provided by the host is not within the
+ * defined range a default value will be used. Here we define the min. and max.
+ * allowed values and the and default value of the present period. Units are milliseconds.
+ */
+#define VPU_PRESENT_CALL_PERIOD_MS_DEFAULT 50
+#define VPU_PRESENT_CALL_PERIOD_MS_MIN 16
+#define VPU_PRESENT_CALL_PERIOD_MS_MAX 10000
+
+/**
+ * Macros to enable various operation modes within the VPU.
+ * To be defined as part of 32 bit mask.
+ */
+#define VPU_OP_MODE_SURVIVABILITY 0x1
+
struct vpu_boot_params {
u32 magic;
u32 vpu_id;
@@ -218,6 +256,7 @@ struct vpu_boot_params {
* the threshold will not be logged); applies to every enabled logging
* destination and loggable HW component. See 'mvLog_t' enum for acceptable
* values.
+ * TODO: EISW-33556: Move log level definition (mvLog_t) to this file.
*/
u32 default_trace_level;
u32 boot_type;
@@ -249,7 +288,36 @@ struct vpu_boot_params {
u32 temp_sensor_period_ms;
/** PLL ratio for efficient clock frequency */
u32 pn_freq_pll_ratio;
- u32 pad4[28];
+ /** DVFS Mode: Default: 0, Max Performance: 1, On Demand: 2, Power Save: 3 */
+ u32 dvfs_mode;
+ /**
+ * Depending on DVFS Mode:
+ * On-demand: Default if 0.
+ * Bit 0-7 - uint8_t: Highest residency percent
+ * Bit 8-15 - uint8_t: High residency percent
+ * Bit 16-23 - uint8_t: Low residency percent
+ * Bit 24-31 - uint8_t: Lowest residency percent
+ * Bit 32-35 - unsigned 4b: PLL Ratio increase amount on highest residency
+ * Bit 36-39 - unsigned 4b: PLL Ratio increase amount on high residency
+ * Bit 40-43 - unsigned 4b: PLL Ratio decrease amount on low residency
+ * Bit 44-47 - unsigned 4b: PLL Ratio decrease amount on lowest frequency
+ * Bit 48-55 - uint8_t: Period (ms) for residency decisions
+ * Bit 56-63 - uint8_t: Averaging windows (as multiples of period. Max: 30 decimal)
+ * Power Save/Max Performance: Unused
+ */
+ u64 dvfs_param;
+ /**
+ * D0i3 delayed entry
+ * Bit0: Disable CPU state save on D0i2 entry flow.
+ * 0: Every D0i2 entry saves state. Save state IPC message ignored.
+ * 1: IPC message required to save state on D0i3 entry flow.
+ */
+ u32 d0i3_delayed_entry;
+ /* Time spent by VPU in D0i3 state */
+ u64 d0i3_residency_time_us;
+ /* Value of VPU perf counter at the time of entering D0i3 state . */
+ u64 d0i3_entry_vpu_ts;
+ u32 pad4[20];
/* Warm boot information: 0x400 - 0x43F */
u32 warm_boot_sections_count;
u32 warm_boot_start_address_reference;
@@ -274,8 +342,12 @@ struct vpu_boot_params {
u32 vpu_scheduling_mode;
/* Present call period in milliseconds. */
u32 vpu_focus_present_timer_ms;
- /* Unused/reserved: 0x478 - 0xFFF */
- u32 pad6[738];
+ /* VPU ECC Signaling */
+ u32 vpu_uses_ecc_mca_signal;
+ /* Values defined by VPU_OP_MODE* macros */
+ u32 vpu_operation_mode;
+ /* Unused/reserved: 0x480 - 0xFFF */
+ u32 pad6[736];
};
/*
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 2949ec8365bd..7da7622742be 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -22,12 +22,12 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 0
+#define VPU_JSM_API_VER_MINOR 15
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_JSM_API_VER_PATCH 1
+#define VPU_JSM_API_VER_PATCH 0
/*
* Index in the API version table
@@ -84,11 +84,13 @@
* Job flags bit masks.
*/
#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001
+#define VPU_JOB_FLAGS_PRIVATE_DATA_MASK 0xFF000000
/*
* Sizes of the reserved areas in jobs, in bytes.
*/
-#define VPU_JOB_RESERVED_BYTES 16
+#define VPU_JOB_RESERVED_BYTES 8
+
/*
* Sizes of the reserved areas in job queues, in bytes.
*/
@@ -109,6 +111,20 @@
#define VPU_DYNDBG_CMD_MAX_LEN 96
/*
+ * For HWS command queue scheduling, we can prioritise command queues inside the
+ * same process with a relative in-process priority. Valid values for relative
+ * priority are given below - max and min.
+ */
+#define VPU_HWS_COMMAND_QUEUE_MAX_IN_PROCESS_PRIORITY 7
+#define VPU_HWS_COMMAND_QUEUE_MIN_IN_PROCESS_PRIORITY -7
+
+/*
+ * For HWS priority scheduling, we can have multiple realtime priority bands.
+ * They are numbered 0 to a MAX.
+ */
+#define VPU_HWS_MAX_REALTIME_PRIORITY_LEVEL 31U
+
+/*
* Job format.
*/
struct vpu_job_queue_entry {
@@ -117,8 +133,14 @@ struct vpu_job_queue_entry {
u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */
u64 root_page_table_addr; /**< Address of root page table to use for this job */
u64 root_page_table_update_counter; /**< Page tables update events counter */
- u64 preemption_buffer_address; /**< Address of the preemption buffer to use for this job */
- u64 preemption_buffer_size; /**< Size of the preemption buffer to use for this job */
+ u64 primary_preempt_buf_addr;
+ /**< Address of the primary preemption buffer to use for this job */
+ u32 primary_preempt_buf_size;
+ /**< Size of the primary preemption buffer to use for this job */
+ u32 secondary_preempt_buf_size;
+ /**< Size of secondary preemption buffer to use for this job */
+ u64 secondary_preempt_buf_addr;
+ /**< Address of secondary preemption buffer to use for this job */
u8 reserved_0[VPU_JOB_RESERVED_BYTES];
};
@@ -153,6 +175,46 @@ enum vpu_trace_entity_type {
};
/*
+ * HWS specific log buffer header details.
+ * Total size is 32 bytes.
+ */
+struct vpu_hws_log_buffer_header {
+ /* Written by VPU after adding a log entry. Initialised by host to 0. */
+ u32 first_free_entry_index;
+ /* Incremented by VPU every time the VPU overwrites the 0th entry;
+ * initialised by host to 0.
+ */
+ u32 wraparound_count;
+ /*
+ * This is the number of buffers that can be stored in the log buffer provided by the host.
+ * It is written by host before passing buffer to VPU. VPU should consider it read-only.
+ */
+ u64 num_of_entries;
+ u64 reserved[2];
+};
+
+/*
+ * HWS specific log buffer entry details.
+ * Total size is 32 bytes.
+ */
+struct vpu_hws_log_buffer_entry {
+ /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
+ u64 vpu_timestamp;
+ /*
+ * Operation type:
+ * 0 - context state change
+ * 1 - queue new work
+ * 2 - queue unwait sync object
+ * 3 - queue no more work
+ * 4 - queue wait sync object
+ */
+ u32 operation_type;
+ u32 reserved;
+ /* Operation data depends on operation type */
+ u64 operation_data[2];
+};
+
+/*
* Host <-> VPU IPC messages types.
*/
enum vpu_ipc_msg_type {
@@ -228,6 +290,23 @@ enum vpu_ipc_msg_type {
* deallocated or reassigned to another context.
*/
VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117,
+ /** Control command: Log buffer setting */
+ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118,
+ /* Control command: Suspend command queue. */
+ VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119,
+ /* Control command: Resume command queue */
+ VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a,
+ /* Control command: Resume engine after reset */
+ VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b,
+ /* Control command: Enable survivability/DCT mode */
+ VPU_JSM_MSG_DCT_ENABLE = 0x111c,
+ /* Control command: Disable survivability/DCT mode */
+ VPU_JSM_MSG_DCT_DISABLE = 0x111d,
+ /**
+ * Dump VPU state. To be used for debug purposes only.
+ * NOTE: Please introduce new ASYNC commands before this one. *
+ */
+ VPU_JSM_MSG_STATE_DUMP = 0x11FF,
/* IPC Host -> Device, General commands */
VPU_JSM_MSG_GENERAL_CMD = 0x1200,
VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD,
@@ -236,6 +315,10 @@ enum vpu_ipc_msg_type {
* Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
*/
VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201,
+ /**
+ * Perform the save procedure for the D0i3 entry
+ */
+ VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
/* IPC Device -> Host, Job completion */
VPU_JSM_MSG_JOB_DONE = 0x2100,
/* IPC Device -> Host, Async command completion */
@@ -304,11 +387,35 @@ enum vpu_ipc_msg_type {
VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216,
/** Response to control command: Set context scheduling properties */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217,
+ /** Response to control command: Log buffer setting */
+ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218,
+ /* IPC Device -> Host, HWS notify index entry of log buffer written */
+ VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219,
+ /* IPC Device -> Host, HWS completion of a context suspend request */
+ VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a,
+ /* Response to control command: Resume command queue */
+ VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b,
+ /* Response to control command: Resume engine command response */
+ VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c,
+ /* Response to control command: Enable survivability/DCT mode */
+ VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d,
+ /* Response to control command: Disable survivability/DCT mode */
+ VPU_JSM_MSG_DCT_DISABLE_DONE = 0x221e,
+ /**
+ * Response to state dump control command.
+ * NOTE: Please introduce new ASYNC responses before this one. *
+ */
+ VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
/* IPC Device -> Host, General command completion */
VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300,
VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE,
/** Response to VPU_JSM_MSG_DYNDBG_CONTROL. */
VPU_JSM_MSG_DYNDBG_CONTROL_RSP = 0x2301,
+ /**
+ * Acknowledgment of completion of the save procedure initiated by
+ * VPU_JSM_MSG_PWR_D0I3_ENTER
+ */
+ VPU_JSM_MSG_PWR_D0I3_ENTER_DONE = 0x2302,
};
enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED };
@@ -593,12 +700,12 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* Default quantum in 100ns units for scheduling across processes
* within a priority band
*/
- u64 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
/*
* Default grace period in 100ns units for processes that preempt each
* other within a priority band
*/
- u64 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
/*
* For normal priority band, specifies the target VPU percentage
* in situations when it's starved by the focus band.
@@ -608,32 +715,51 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
u32 reserved_0;
};
-/* HWS create command queue request */
+/*
+ * @brief HWS create command queue request.
+ * Host will create a command queue via this command.
+ * Note: Cmdq group is a handle of an object which
+ * may contain one or more command queues.
+ * @see VPU_JSM_MSG_CREATE_CMD_QUEUE
+ * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
+ */
struct vpu_ipc_msg_payload_hws_create_cmdq {
/* Process id */
u64 process_id;
/* Host SSID */
u32 host_ssid;
- /* Zero Padding */
- u32 reserved;
+ /* Engine for which queue is being created */
+ u32 engine_idx;
+ /*
+ * Cmdq group may be set to 0 or equal to
+ * cmdq_id while each priority band contains
+ * only single engine instances.
+ */
+ u64 cmdq_group;
/* Command queue id */
u64 cmdq_id;
/* Command queue base */
u64 cmdq_base;
/* Command queue size */
u32 cmdq_size;
- /* Reserved */
+ /* Zero padding */
u32 reserved_0;
};
-/* HWS create command queue response */
+/*
+ * @brief HWS create command queue response.
+ * @see VPU_JSM_MSG_CREATE_CMD_QUEUE
+ * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
+ */
struct vpu_ipc_msg_payload_hws_create_cmdq_rsp {
/* Process id */
u64 process_id;
/* Host SSID */
u32 host_ssid;
- /* Zero Padding */
- u32 reserved;
+ /* Engine for which queue is being created */
+ u32 engine_idx;
+ /* Command queue group */
+ u64 cmdq_group;
/* Command queue id */
u64 cmdq_id;
};
@@ -661,7 +787,7 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
/* Inside realtime band assigns a further priority */
u32 realtime_priority_level;
/* Priority relative to other contexts in the same process */
- u32 in_process_priority;
+ s32 in_process_priority;
/* Zero padding / Reserved */
u32 reserved_1;
/* Context quantum relative to other contexts of same priority in the same process */
@@ -694,6 +820,123 @@ struct vpu_jsm_hws_register_db {
u64 cmdq_size;
};
+/*
+ * @brief Structure to set another buffer to be used for scheduling-related logging.
+ * The size of the logging buffer and the number of entries is defined as part of the
+ * buffer itself as described next.
+ * The log buffer received from the host is made up of;
+ * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'.
+ * The header contains the number of log entries in the buffer.
+ * - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in
+ * 'struct vpu_hws_log_buffer_entry'.
+ * The entry contains the VPU timestamp, operation type and data.
+ * The host should provide the notify index value of log buffer to VPU. This is a
+ * value defined within the log buffer and when written to will generate the
+ * scheduling log notification.
+ * The host should set engine_idx and vpu_log_buffer_va to 0 to disable logging
+ * for a particular engine.
+ * VPU will handle one log buffer for each of supported engines.
+ * VPU should allow the logging to consume one host_ssid.
+ * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
+ * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP
+ * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
+ */
+struct vpu_ipc_msg_payload_hws_set_scheduling_log {
+ /* Engine ordinal */
+ u32 engine_idx;
+ /* Host SSID */
+ u32 host_ssid;
+ /*
+ * VPU log buffer virtual address.
+ * Set to 0 to disable logging for this engine.
+ */
+ u64 vpu_log_buffer_va;
+ /*
+ * Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
+ * is generated when an event log is written to this index.
+ */
+ u64 notify_index;
+};
+
+/*
+ * @brief The scheduling log notification is generated by VPU when it writes
+ * an event into the log buffer at the notify_index. VPU notifies host with
+ * VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous
+ * message from VPU to host.
+ * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
+ * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
+ */
+struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
+ /* Engine ordinal */
+ u32 engine_idx;
+ /* Zero Padding */
+ u32 reserved_0;
+};
+
+/*
+ * @brief HWS suspend command queue request and done structure.
+ * Host will request the suspend of contexts and VPU will;
+ * - Suspend all work on this context
+ * - Preempt any running work
+ * - Asynchronously perform the above and return success immediately once
+ * all items above are started successfully
+ * - Notify the host of completion of these operations via
+ * VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
+ * - Reject any other context operations on a context with an in-flight
+ * suspend request running
+ * Same structure used when VPU notifies host of completion of a context suspend
+ * request. The ids and suspend fence value reported in this command will match
+ * the one in the request from the host to suspend the context. Once suspend is
+ * complete, VPU will not access any data relating to this command queue until
+ * it is resumed.
+ * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ
+ * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
+ */
+struct vpu_ipc_msg_payload_hws_suspend_cmdq {
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved_0;
+ /* Command queue id */
+ u64 cmdq_id;
+ /*
+ * Suspend fence value - reported by the VPU suspend context
+ * completed once suspend is complete.
+ */
+ u64 suspend_fence_value;
+};
+
+/*
+ * @brief HWS Resume command queue request / response structure.
+ * Host will request the resume of a context;
+ * - VPU will resume all work on this context
+ * - Scheduler will allow this context to be scheduled
+ * @see VPU_JSM_MSG_HWS_RESUME_CMDQ
+ * @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP
+ */
+struct vpu_ipc_msg_payload_hws_resume_cmdq {
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved_0;
+ /* Command queue id */
+ u64 cmdq_id;
+};
+
+/*
+ * @brief HWS Resume engine request / response structure.
+ * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume.
+ * Host shall send this command to resume scheduling of any valid queue.
+ * @see VPU_JSM_MSG_HWS_RESUME_ENGINE
+ * @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE
+ */
+struct vpu_ipc_msg_payload_hws_resume_engine {
+ /* Engine to be resumed */
+ u32 engine_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
/**
* Payload for VPU_JSM_MSG_TRACE_SET_CONFIG[_RSP] and
* VPU_JSM_MSG_TRACE_GET_CONFIG_RSP messages.
@@ -938,6 +1181,35 @@ struct vpu_ipc_msg_payload_dyndbg_control {
char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN];
};
+/**
+ * Payload for VPU_JSM_MSG_PWR_D0I3_ENTER
+ *
+ * This is a bi-directional payload.
+ */
+struct vpu_ipc_msg_payload_pwr_d0i3_enter {
+ /**
+ * 0: VPU_JSM_MSG_PWR_D0I3_ENTER_DONE is not sent to the host driver
+ * The driver will poll for D0i2 Idle state transitions.
+ * 1: VPU_JSM_MSG_PWR_D0I3_ENTER_DONE is sent after VPU state save is complete
+ */
+ u32 send_response;
+ u32 reserved_0;
+};
+
+/**
+ * Payload for VPU_JSM_MSG_DCT_ENABLE message.
+ *
+ * Default values for DCT active/inactive times are 5.3ms and 30ms respectively,
+ * corresponding to a 85% duty cycle. This payload allows the host to tune these
+ * values according to application requirements.
+ */
+struct vpu_ipc_msg_payload_pwr_dct_control {
+ /** Duty cycle active time in microseconds */
+ u32 dct_active_us;
+ /** Duty cycle inactive time in microseconds */
+ u32 dct_inactive_us;
+};
+
/*
* Payloads union, used to define complete message format.
*/
@@ -974,6 +1246,13 @@ union vpu_ipc_msg_payload {
struct vpu_ipc_msg_payload_hws_destroy_cmdq hws_destroy_cmdq;
struct vpu_ipc_msg_payload_hws_set_context_sched_properties
hws_set_context_sched_properties;
+ struct vpu_ipc_msg_payload_hws_set_scheduling_log hws_set_scheduling_log;
+ struct vpu_ipc_msg_payload_hws_scheduling_log_notification hws_scheduling_log_notification;
+ struct vpu_ipc_msg_payload_hws_suspend_cmdq hws_suspend_cmdq;
+ struct vpu_ipc_msg_payload_hws_resume_cmdq hws_resume_cmdq;
+ struct vpu_ipc_msg_payload_hws_resume_engine hws_resume_engine;
+ struct vpu_ipc_msg_payload_pwr_d0i3_enter pwr_d0i3_enter;
+ struct vpu_ipc_msg_payload_pwr_dct_control pwr_dct_control;
};
/*
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 2418418f7a50..3f7f6dfde7f2 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -9,4 +9,5 @@ qaic-y := \
mhi_controller.o \
qaic_control.o \
qaic_data.o \
- qaic_drv.o
+ qaic_drv.o \
+ qaic_timesync.o
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index 5036e58e7235..832464f2833a 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -348,7 +348,7 @@ static struct mhi_channel_config aic100_channels[] = {
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
- .ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS,
+ .ee_mask = MHI_CH_EE_SBL,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
@@ -364,7 +364,39 @@ static struct mhi_channel_config aic100_channels[] = {
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
- .ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 22,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .num = 23,
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
@@ -437,7 +469,7 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback re
pci_err(qdev->pdev, "Fatal error received from device. Attempting to recover\n");
/* this event occurs in non-atomic context */
if (reason == MHI_CB_SYS_ERROR)
- qaic_dev_reset_clean_local_state(qdev, true);
+ qaic_dev_reset_clean_local_state(qdev);
}
static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
@@ -468,7 +500,7 @@ static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
}
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq)
+ int mhi_irq, bool shared_msi)
{
struct mhi_controller *mhi_cntrl;
int ret;
@@ -500,6 +532,10 @@ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, voi
return ERR_PTR(-ENOMEM);
mhi_cntrl->irq[0] = mhi_irq;
+
+ if (shared_msi) /* MSI shared with data path, no IRQF_NO_SUSPEND */
+ mhi_cntrl->irq_flags = IRQF_SHARED;
+
mhi_cntrl->fw_image = "qcom/aic100/sbl.bin";
/* use latest configured timeout */
diff --git a/drivers/accel/qaic/mhi_controller.h b/drivers/accel/qaic/mhi_controller.h
index 2ae45d768e24..500e7f4af2af 100644
--- a/drivers/accel/qaic/mhi_controller.h
+++ b/drivers/accel/qaic/mhi_controller.h
@@ -8,7 +8,7 @@
#define MHICONTROLLERQAIC_H_
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq);
+ int mhi_irq, bool shared_msi);
void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up);
void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl);
void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl);
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index e3f4c30f3ffd..582836f9538f 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -31,6 +31,15 @@
#define to_drm(qddev) (&(qddev)->drm)
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
+enum __packed dev_states {
+ /* Device is offline or will be very soon */
+ QAIC_OFFLINE,
+ /* Device is booting, not clear if it's in a usable state */
+ QAIC_BOOT,
+ /* Device is fully operational */
+ QAIC_ONLINE,
+};
+
extern bool datapath_polling;
struct qaic_user {
@@ -121,8 +130,10 @@ struct qaic_device {
struct workqueue_struct *cntl_wq;
/* Synchronizes all the users of device during cleanup */
struct srcu_struct dev_lock;
- /* true: Device under reset; false: Device not under reset */
- bool in_reset;
+ /* Track the state of the device during resets */
+ enum dev_states dev_state;
+ /* true: single MSI is used to operate device */
+ bool single_msi;
/*
* true: A tx MHI transaction has failed and a rx buffer is still queued
* in control device. Such a buffer is considered lost rx buffer
@@ -137,6 +148,10 @@ struct qaic_device {
u32 (*gen_crc)(void *msg);
/* Validate the CRC of a control message */
bool (*valid_crc)(void *msg);
+ /* MHI "QAIC_TIMESYNC" channel device */
+ struct mhi_device *qts_ch;
+ /* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
+ struct workqueue_struct *qts_wq;
};
struct qaic_drm_device {
@@ -268,7 +283,7 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
void release_dbc(struct qaic_device *qdev, u32 dbc_id);
void wake_all_cntl(struct qaic_device *qdev);
-void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset);
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index 388abd40024b..9e8a8cbadf6b 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -1022,7 +1022,8 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
int xfer_count = 0;
int retry_count;
- if (qdev->in_reset) {
+ /* Allow QAIC_BOOT state since we need to check control protocol version */
+ if (qdev->dev_state == QAIC_OFFLINE) {
mutex_unlock(&qdev->cntl_mutex);
return ERR_PTR(-ENODEV);
}
@@ -1138,7 +1139,7 @@ static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrapper
if (!list_is_first(&wrapper->list, &wrappers->list))
kref_put(&wrapper->ref_count, free_wrapper);
- wrapper = add_wrapper(wrappers, offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
+ wrapper = add_wrapper(wrappers, sizeof(*wrapper));
if (!wrapper)
return -ENOMEM;
@@ -1306,7 +1307,7 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return -ENODEV;
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 4a8e43a7a6a4..cf2898eda7ae 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -51,6 +51,7 @@
})
#define NUM_EVENTS 128
#define NUM_DELAYS 10
+#define fifo_at(base, offset) ((base) + (offset) * get_dbc_req_elem_size())
static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */
module_param(wait_exec_default_timeout_ms, uint, 0600);
@@ -451,7 +452,7 @@ static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 s
* later
*/
buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE;
- max_order = min(MAX_ORDER - 1, get_order(size));
+ max_order = min(MAX_ORDER, get_order(size));
} else {
/* allocate a single page for book keeping */
nr_pages = 1;
@@ -689,7 +690,7 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -748,7 +749,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -969,7 +970,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1058,6 +1059,16 @@ unlock_usr_srcu:
return ret;
}
+static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size)
+{
+ u32 avail = head - tail - 1;
+
+ if (head <= tail)
+ avail += q_size;
+
+ return avail;
+}
+
static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id,
u32 head, u32 *ptail)
{
@@ -1066,27 +1077,20 @@ static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slic
u32 tail = *ptail;
u32 avail;
- avail = head - tail;
- if (head <= tail)
- avail += dbc->nelem;
-
- --avail;
-
+ avail = fifo_space_avail(head, tail, dbc->nelem);
if (avail < slice->nents)
return -EAGAIN;
if (tail + slice->nents > dbc->nelem) {
avail = dbc->nelem - tail;
avail = min_t(u32, avail, slice->nents);
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * avail);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail);
reqs += avail;
avail = slice->nents - avail;
if (avail)
memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail);
} else {
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * slice->nents);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents);
}
*ptail = (tail + slice->nents) % dbc->nelem;
@@ -1094,46 +1098,31 @@ static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slic
return 0;
}
-/*
- * Based on the value of resize we may only need to transmit first_n
- * entries and the last entry, with last_bytes to send from the last entry.
- * Note that first_n could be 0.
- */
static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice,
- u64 resize, u32 dbc_id, u32 head, u32 *ptail)
+ u64 resize, struct dma_bridge_chan *dbc, u32 head,
+ u32 *ptail)
{
- struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
struct dbc_req *reqs = slice->reqs;
struct dbc_req *last_req;
u32 tail = *ptail;
- u64 total_bytes;
u64 last_bytes;
u32 first_n;
u32 avail;
- int ret;
- int i;
-
- avail = head - tail;
- if (head <= tail)
- avail += dbc->nelem;
- --avail;
+ avail = fifo_space_avail(head, tail, dbc->nelem);
- total_bytes = 0;
- for (i = 0; i < slice->nents; i++) {
- total_bytes += le32_to_cpu(reqs[i].len);
- if (total_bytes >= resize)
+ /*
+ * After this for loop is complete, first_n represents the index
+ * of the last DMA request of this slice that needs to be
+ * transferred after resizing and last_bytes represents DMA size
+ * of that request.
+ */
+ last_bytes = resize;
+ for (first_n = 0; first_n < slice->nents; first_n++)
+ if (last_bytes > le32_to_cpu(reqs[first_n].len))
+ last_bytes -= le32_to_cpu(reqs[first_n].len);
+ else
break;
- }
-
- if (total_bytes < resize) {
- /* User space should have used the full buffer path. */
- ret = -EINVAL;
- return ret;
- }
-
- first_n = i;
- last_bytes = i ? resize + le32_to_cpu(reqs[i].len) - total_bytes : resize;
if (avail < (first_n + 1))
return -EAGAIN;
@@ -1142,22 +1131,21 @@ static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_sli
if (tail + first_n > dbc->nelem) {
avail = dbc->nelem - tail;
avail = min_t(u32, avail, first_n);
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * avail);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail);
last_req = reqs + avail;
avail = first_n - avail;
if (avail)
memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail);
} else {
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * first_n);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * first_n);
}
}
- /* Copy over the last entry. Here we need to adjust len to the left over
+ /*
+ * Copy over the last entry. Here we need to adjust len to the left over
* size, and set src and dst to the entry it is copied to.
*/
- last_req = dbc->req_q_base + (tail + first_n) % dbc->nelem * get_dbc_req_elem_size();
+ last_req = fifo_at(dbc->req_q_base, (tail + first_n) % dbc->nelem);
memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs));
/*
@@ -1168,6 +1156,9 @@ static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_sli
last_req->len = cpu_to_le32((u32)last_bytes);
last_req->src_addr = reqs[first_n].src_addr;
last_req->dest_addr = reqs[first_n].dest_addr;
+ if (!last_bytes)
+ /* Disable DMA transfer */
+ last_req->cmd = GENMASK(7, 2) & reqs[first_n].cmd;
*ptail = (tail + first_n + 1) % dbc->nelem;
@@ -1227,26 +1218,17 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
bo->req_id = dbc->next_req_id++;
list_for_each_entry(slice, &bo->slices, slice) {
- /*
- * If this slice does not fall under the given
- * resize then skip this slice and continue the loop
- */
- if (is_partial && pexec[i].resize && pexec[i].resize <= slice->offset)
- continue;
-
for (j = 0; j < slice->nents; j++)
slice->reqs[j].req_id = cpu_to_le16(bo->req_id);
- /*
- * If it is a partial execute ioctl call then check if
- * resize has cut this slice short then do a partial copy
- * else do complete copy
- */
- if (is_partial && pexec[i].resize &&
- pexec[i].resize < slice->offset + slice->size)
+ if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset))
+ /* Configure the slice for no DMA transfer */
+ ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail);
+ else if (is_partial && pexec[i].resize < slice->offset + slice->size)
+ /* Configure the slice to be partially DMA transferred */
ret = copy_partial_exec_reqs(qdev, slice,
- pexec[i].resize - slice->offset,
- dbc->id, head, tail);
+ pexec[i].resize - slice->offset, dbc,
+ head, tail);
else
ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
if (ret) {
@@ -1359,7 +1341,7 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1466,6 +1448,16 @@ irqreturn_t dbc_irq_handler(int irq, void *data)
rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (datapath_polling) {
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ /*
+ * Normally datapath_polling will not have irqs enabled, but
+ * when running with only one MSI the interrupt is shared with
+ * MHI so it cannot be disabled. Return ASAP instead.
+ */
+ return IRQ_HANDLED;
+ }
+
if (!dbc->usr) {
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_HANDLED;
@@ -1488,7 +1480,8 @@ irqreturn_t dbc_irq_handler(int irq, void *data)
return IRQ_NONE;
}
- disable_irq_nosync(irq);
+ if (!dbc->qdev->single_msi)
+ disable_irq_nosync(irq);
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_WAKE_THREAD;
}
@@ -1504,7 +1497,7 @@ void irq_polling_work(struct work_struct *work)
rcu_id = srcu_read_lock(&dbc->ch_lock);
while (1) {
- if (dbc->qdev->in_reset) {
+ if (dbc->qdev->dev_state != QAIC_ONLINE) {
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return;
}
@@ -1559,12 +1552,12 @@ irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
u32 tail;
rcu_id = srcu_read_lock(&dbc->ch_lock);
+ qdev = dbc->qdev;
head = readl(dbc->dbc_base + RSPHP_OFF);
if (head == U32_MAX) /* PCI link error */
goto error_out;
- qdev = dbc->qdev;
read_fifo:
if (!event_count) {
@@ -1645,14 +1638,14 @@ read_fifo:
goto read_fifo;
normal_out:
- if (likely(!datapath_polling))
+ if (!qdev->single_msi && likely(!datapath_polling))
enable_irq(irq);
- else
+ else if (unlikely(datapath_polling))
schedule_work(&dbc->poll_work);
/* checking the fifo and enabling irqs is a race, missed event check */
tail = readl(dbc->dbc_base + RSPTP_OFF);
if (tail != U32_MAX && head != tail) {
- if (likely(!datapath_polling))
+ if (!qdev->single_msi && likely(!datapath_polling))
disable_irq_nosync(irq);
goto read_fifo;
}
@@ -1661,9 +1654,9 @@ normal_out:
error_out:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
- if (likely(!datapath_polling))
+ if (!qdev->single_msi && likely(!datapath_polling))
enable_irq(irq);
- else
+ else if (unlikely(datapath_polling))
schedule_work(&dbc->poll_work);
return IRQ_HANDLED;
@@ -1694,7 +1687,7 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1763,7 +1756,7 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1854,7 +1847,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 6f58095767df..2a313eb69b12 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -8,6 +8,7 @@
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/kobject.h>
#include <linux/kref.h>
#include <linux/mhi.h>
#include <linux/module.h>
@@ -27,6 +28,7 @@
#include "mhi_controller.h"
#include "qaic.h"
+#include "qaic_timesync.h"
MODULE_IMPORT_NS(DMA_BUF);
@@ -42,9 +44,6 @@ MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
static bool link_up;
static DEFINE_IDA(qaic_usrs);
-static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id);
-static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id);
-
static void free_usr(struct kref *kref)
{
struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
@@ -63,7 +62,7 @@ static int qaic_open(struct drm_device *dev, struct drm_file *file)
int ret;
rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto dev_unlock;
}
@@ -120,7 +119,7 @@ static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
if (qddev) {
qdev = qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (!qdev->in_reset) {
+ if (qdev->dev_state == QAIC_ONLINE) {
qaic_release_usr(qdev, usr);
for (i = 0; i < qdev->num_dbc; ++i)
if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
@@ -182,13 +181,6 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
qddev->partition_id = partition_id;
- /*
- * drm_dev_unregister() sets the driver data to NULL and
- * drm_dev_register() does not update the driver data. During a SOC
- * reset drm dev is unregistered and registered again leaving the
- * driver data to NULL.
- */
- dev_set_drvdata(to_accel_kdev(qddev), drm->accel);
ret = drm_dev_register(drm, 0);
if (ret)
pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
@@ -202,7 +194,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
struct drm_device *drm = to_drm(qddev);
struct qaic_user *usr;
- drm_dev_get(drm);
drm_dev_unregister(drm);
qddev->partition_id = 0;
/*
@@ -231,7 +222,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
mutex_lock(&qddev->users_mutex);
}
mutex_unlock(&qddev->users_mutex);
- drm_dev_put(drm);
}
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
@@ -253,8 +243,6 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
- qdev->in_reset = false;
-
dev_set_drvdata(&mhi_dev->dev, qdev);
qdev->cntl_ch = mhi_dev;
@@ -264,6 +252,7 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
return ret;
}
+ qdev->dev_state = QAIC_BOOT;
ret = get_cntl_version(qdev, NULL, &major, &minor);
if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
@@ -271,8 +260,8 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
ret = -EINVAL;
goto close_control;
}
-
- ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+ qdev->dev_state = QAIC_ONLINE;
+ kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_ONLINE);
return ret;
@@ -290,7 +279,8 @@ static void qaic_notify_reset(struct qaic_device *qdev)
{
int i;
- qdev->in_reset = true;
+ kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_OFFLINE);
+ qdev->dev_state = QAIC_OFFLINE;
/* wake up any waiters to avoid waiting for timeouts at sync */
wake_all_cntl(qdev);
for (i = 0; i < qdev->num_dbc; ++i)
@@ -298,21 +288,15 @@ static void qaic_notify_reset(struct qaic_device *qdev)
synchronize_srcu(&qdev->dev_lock);
}
-void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
{
int i;
qaic_notify_reset(qdev);
- /* remove drmdevs to prevent new users from coming in */
- qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
-
/* start tearing things down */
for (i = 0; i < qdev->num_dbc; ++i)
release_dbc(qdev, i);
-
- if (exit_reset)
- qdev->in_reset = false;
}
static void cleanup_qdev(struct qaic_device *qdev)
@@ -324,6 +308,7 @@ static void cleanup_qdev(struct qaic_device *qdev)
cleanup_srcu_struct(&qdev->dev_lock);
pci_set_drvdata(qdev->pdev, NULL);
destroy_workqueue(qdev->cntl_wq);
+ destroy_workqueue(qdev->qts_wq);
}
static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -336,6 +321,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
if (!qdev)
return NULL;
+ qdev->dev_state = QAIC_OFFLINE;
if (id->device == PCI_DEV_AIC100) {
qdev->num_dbc = 16;
qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
@@ -347,6 +333,12 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
if (!qdev->cntl_wq)
return NULL;
+ qdev->qts_wq = alloc_workqueue("qaic_ts", WQ_UNBOUND, 0);
+ if (!qdev->qts_wq) {
+ destroy_workqueue(qdev->cntl_wq);
+ return NULL;
+ }
+
pci_set_drvdata(pdev, qdev);
qdev->pdev = pdev;
@@ -424,14 +416,24 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
int i;
/* Managed release since we use pcim_enable_device */
- ret = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
- if (ret < 0)
- return ret;
+ ret = pci_alloc_irq_vectors(pdev, 32, 32, PCI_IRQ_MSI);
+ if (ret == -ENOSPC) {
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0)
+ return ret;
- if (ret < 32) {
- pci_err(pdev, "%s: Requested 32 MSIs. Obtained %d MSIs which is less than the 32 required.\n",
- __func__, ret);
- return -ENODEV;
+ /*
+ * Operate in one MSI mode. All interrupts will be directed to
+ * MSI0; every interrupt will wake up all the interrupt handlers
+ * (MHI and DBC[0-15]). Since the interrupt is now shared, it is
+ * not disabled during DBC threaded handler, but only one thread
+ * will be allowed to run per DBC, so while it can be
+ * interrupted, it shouldn't race with itself.
+ */
+ qdev->single_msi = true;
+ pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n");
+ } else if (ret < 0) {
+ return ret;
}
mhi_irq = pci_irq_vector(pdev, 0);
@@ -439,15 +441,17 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
return mhi_irq;
for (i = 0; i < qdev->num_dbc; ++i) {
- ret = devm_request_threaded_irq(&pdev->dev, pci_irq_vector(pdev, i + 1),
+ ret = devm_request_threaded_irq(&pdev->dev,
+ pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1),
dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED,
"qaic_dbc", &qdev->dbc[i]);
if (ret)
return ret;
if (datapath_polling) {
- qdev->dbc[i].irq = pci_irq_vector(pdev, i + 1);
- disable_irq_nosync(qdev->dbc[i].irq);
+ qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1);
+ if (!qdev->single_msi)
+ disable_irq_nosync(qdev->dbc[i].irq);
INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
}
}
@@ -479,14 +483,21 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto cleanup_qdev;
}
- qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq);
+ ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+ if (ret)
+ goto cleanup_qdev;
+
+ qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
+ qdev->single_msi);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
- goto cleanup_qdev;
+ goto cleanup_drm_dev;
}
return 0;
+cleanup_drm_dev:
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
cleanup_qdev:
cleanup_qdev(qdev);
return ret;
@@ -499,7 +510,8 @@ static void qaic_pci_remove(struct pci_dev *pdev)
if (!qdev)
return;
- qaic_dev_reset_clean_local_state(qdev, false);
+ qaic_dev_reset_clean_local_state(qdev);
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
cleanup_qdev(qdev);
}
@@ -522,14 +534,13 @@ static void qaic_pci_reset_prepare(struct pci_dev *pdev)
qaic_notify_reset(qdev);
qaic_mhi_start_reset(qdev->mhi_cntrl);
- qaic_dev_reset_clean_local_state(qdev, false);
+ qaic_dev_reset_clean_local_state(qdev);
}
static void qaic_pci_reset_done(struct pci_dev *pdev)
{
struct qaic_device *qdev = pci_get_drvdata(pdev);
- qdev->in_reset = false;
qaic_mhi_reset_done(qdev->mhi_cntrl);
}
@@ -586,6 +597,10 @@ static int __init qaic_init(void)
goto free_pci;
}
+ ret = qaic_timesync_init();
+ if (ret)
+ pr_debug("qaic: qaic_timesync_init failed %d\n", ret);
+
return 0;
free_pci:
@@ -611,6 +626,7 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_timesync_deinit();
mhi_driver_unregister(&qaic_mhi_driver);
pci_unregister_driver(&qaic_pci_driver);
}
diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c
new file mode 100644
index 000000000000..301f4462d51b
--- /dev/null
+++ b/drivers/accel/qaic/qaic_timesync.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/time64.h>
+#include <linux/timer.h>
+
+#include "qaic.h"
+#include "qaic_timesync.h"
+
+#define QTIMER_REG_OFFSET 0xa28
+#define QAIC_TIMESYNC_SIGNATURE 0x55aa
+#define QAIC_CONV_QTIMER_TO_US(qtimer) (mul_u64_u32_div(qtimer, 10, 192))
+
+static unsigned int timesync_delay_ms = 1000; /* 1 sec default */
+module_param(timesync_delay_ms, uint, 0600);
+MODULE_PARM_DESC(timesync_delay_ms, "Delay in ms between two consecutive timesync operations");
+
+enum qts_msg_type {
+ QAIC_TS_CMD_TO_HOST,
+ QAIC_TS_SYNC_REQ,
+ QAIC_TS_ACK_TO_HOST,
+ QAIC_TS_MSG_TYPE_MAX
+};
+
+/**
+ * struct qts_hdr - Timesync message header structure.
+ * @signature: Unique signature to identify the timesync message.
+ * @reserved_1: Reserved for future use.
+ * @reserved_2: Reserved for future use.
+ * @msg_type: sub-type of the timesync message.
+ * @reserved_3: Reserved for future use.
+ */
+struct qts_hdr {
+ __le16 signature;
+ __le16 reserved_1;
+ u8 reserved_2;
+ u8 msg_type;
+ __le16 reserved_3;
+} __packed;
+
+/**
+ * struct qts_timeval - Structure to carry time information.
+ * @tv_sec: Seconds part of the time.
+ * @tv_usec: uS (microseconds) part of the time.
+ */
+struct qts_timeval {
+ __le64 tv_sec;
+ __le64 tv_usec;
+} __packed;
+
+/**
+ * struct qts_host_time_sync_msg_data - Structure to denote the timesync message.
+ * @header: Header of the timesync message.
+ * @data: Time information.
+ */
+struct qts_host_time_sync_msg_data {
+ struct qts_hdr header;
+ struct qts_timeval data;
+} __packed;
+
+/**
+ * struct mqts_dev - MHI QAIC Timesync Control device.
+ * @qdev: Pointer to the root device struct driven by QAIC driver.
+ * @mhi_dev: Pointer to associated MHI device.
+ * @timer: Timer handle used for timesync.
+ * @qtimer_addr: Device QTimer register pointer.
+ * @buff_in_use: atomic variable to track if the sync_msg buffer is in use.
+ * @dev: Device pointer to qdev->pdev->dev stored for easy access.
+ * @sync_msg: Buffer used to send timesync message over MHI.
+ */
+struct mqts_dev {
+ struct qaic_device *qdev;
+ struct mhi_device *mhi_dev;
+ struct timer_list timer;
+ void __iomem *qtimer_addr;
+ atomic_t buff_in_use;
+ struct device *dev;
+ struct qts_host_time_sync_msg_data *sync_msg;
+};
+
+struct qts_resp_msg {
+ struct qts_hdr hdr;
+} __packed;
+
+struct qts_resp {
+ struct qts_resp_msg data;
+ struct work_struct work;
+ struct qaic_device *qdev;
+};
+
+#ifdef readq
+static u64 read_qtimer(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+#else
+static u64 read_qtimer(const volatile void __iomem *addr)
+{
+ u64 low, high;
+
+ low = readl(addr);
+ high = readl(addr + sizeof(u32));
+ return low | (high << 32);
+}
+#endif
+
+static void qaic_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ dev_dbg(mqtsdev->dev, "%s status: %d xfer_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ atomic_set(&mqtsdev->buff_in_use, 0);
+}
+
+static void qaic_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ dev_err(mqtsdev->dev, "%s no data expected on dl channel\n", __func__);
+}
+
+static void qaic_timesync_timer(struct timer_list *t)
+{
+ struct mqts_dev *mqtsdev = from_timer(mqtsdev, t, timer);
+ struct qts_host_time_sync_msg_data *sync_msg;
+ u64 device_qtimer_us;
+ u64 device_qtimer;
+ u64 host_time_us;
+ u64 offset_us;
+ u64 host_sec;
+ int ret;
+
+ if (atomic_read(&mqtsdev->buff_in_use)) {
+ dev_dbg(mqtsdev->dev, "%s buffer not free, schedule next cycle\n", __func__);
+ goto mod_timer;
+ }
+ atomic_set(&mqtsdev->buff_in_use, 1);
+
+ sync_msg = mqtsdev->sync_msg;
+ sync_msg->header.signature = cpu_to_le16(QAIC_TIMESYNC_SIGNATURE);
+ sync_msg->header.msg_type = QAIC_TS_SYNC_REQ;
+ /* Read host UTC time and convert to uS*/
+ host_time_us = div_u64(ktime_get_real_ns(), NSEC_PER_USEC);
+ device_qtimer = read_qtimer(mqtsdev->qtimer_addr);
+ device_qtimer_us = QAIC_CONV_QTIMER_TO_US(device_qtimer);
+ /* Offset between host UTC and device time */
+ offset_us = host_time_us - device_qtimer_us;
+
+ host_sec = div_u64(offset_us, USEC_PER_SEC);
+ sync_msg->data.tv_usec = cpu_to_le64(offset_us - host_sec * USEC_PER_SEC);
+ sync_msg->data.tv_sec = cpu_to_le64(host_sec);
+ ret = mhi_queue_buf(mqtsdev->mhi_dev, DMA_TO_DEVICE, sync_msg, sizeof(*sync_msg), MHI_EOT);
+ if (ret && (ret != -EAGAIN)) {
+ dev_err(mqtsdev->dev, "%s unable to queue to mhi:%d\n", __func__, ret);
+ return;
+ } else if (ret == -EAGAIN) {
+ atomic_set(&mqtsdev->buff_in_use, 0);
+ }
+
+mod_timer:
+ ret = mod_timer(t, jiffies + msecs_to_jiffies(timesync_delay_ms));
+ if (ret)
+ dev_err(mqtsdev->dev, "%s mod_timer error:%d\n", __func__, ret);
+}
+
+static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct mqts_dev *mqtsdev;
+ struct timer_list *timer;
+ int ret;
+
+ mqtsdev = kzalloc(sizeof(*mqtsdev), GFP_KERNEL);
+ if (!mqtsdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ timer = &mqtsdev->timer;
+ mqtsdev->mhi_dev = mhi_dev;
+ mqtsdev->qdev = qdev;
+ mqtsdev->dev = &qdev->pdev->dev;
+
+ mqtsdev->sync_msg = kzalloc(sizeof(*mqtsdev->sync_msg), GFP_KERNEL);
+ if (!mqtsdev->sync_msg) {
+ ret = -ENOMEM;
+ goto free_mqts_dev;
+ }
+ atomic_set(&mqtsdev->buff_in_use, 0);
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ goto free_sync_msg;
+
+ /* Qtimer register pointer */
+ mqtsdev->qtimer_addr = qdev->bar_0 + QTIMER_REG_OFFSET;
+ timer_setup(timer, qaic_timesync_timer, 0);
+ timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms);
+ add_timer(timer);
+ dev_set_drvdata(&mhi_dev->dev, mqtsdev);
+
+ return 0;
+
+free_sync_msg:
+ kfree(mqtsdev->sync_msg);
+free_mqts_dev:
+ kfree(mqtsdev);
+out:
+ return ret;
+};
+
+static void qaic_timesync_remove(struct mhi_device *mhi_dev)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ del_timer_sync(&mqtsdev->timer);
+ mhi_unprepare_from_transfer(mqtsdev->mhi_dev);
+ kfree(mqtsdev->sync_msg);
+ kfree(mqtsdev);
+}
+
+static const struct mhi_device_id qaic_timesync_match_table[] = {
+ { .chan = "QAIC_TIMESYNC_PERIODIC"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(mhi, qaic_timesync_match_table);
+
+static struct mhi_driver qaic_timesync_driver = {
+ .id_table = qaic_timesync_match_table,
+ .remove = qaic_timesync_remove,
+ .probe = qaic_timesync_probe,
+ .ul_xfer_cb = qaic_timesync_ul_xfer_cb,
+ .dl_xfer_cb = qaic_timesync_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_timesync_periodic",
+ },
+};
+
+static void qaic_boot_timesync_worker(struct work_struct *work)
+{
+ struct qts_resp *resp = container_of(work, struct qts_resp, work);
+ struct qts_host_time_sync_msg_data *req;
+ struct qts_resp_msg data = resp->data;
+ struct qaic_device *qdev = resp->qdev;
+ struct mhi_device *mhi_dev;
+ struct timespec64 ts;
+ int ret;
+
+ mhi_dev = qdev->qts_ch;
+ /* Queue the response message beforehand to avoid race conditions */
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, &resp->data, sizeof(resp->data), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ dev_warn(&mhi_dev->dev, "Failed to re-queue response buffer %d\n", ret);
+ return;
+ }
+
+ switch (data.hdr.msg_type) {
+ case QAIC_TS_CMD_TO_HOST:
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ break;
+
+ req->header = data.hdr;
+ req->header.msg_type = QAIC_TS_SYNC_REQ;
+ ktime_get_real_ts64(&ts);
+ req->data.tv_sec = cpu_to_le64(ts.tv_sec);
+ req->data.tv_usec = cpu_to_le64(div_u64(ts.tv_nsec, NSEC_PER_USEC));
+
+ ret = mhi_queue_buf(mhi_dev, DMA_TO_DEVICE, req, sizeof(*req), MHI_EOT);
+ if (ret) {
+ kfree(req);
+ dev_dbg(&mhi_dev->dev, "Failed to send request message. Error %d\n", ret);
+ }
+ break;
+ case QAIC_TS_ACK_TO_HOST:
+ dev_dbg(&mhi_dev->dev, "ACK received from device\n");
+ break;
+ default:
+ dev_err(&mhi_dev->dev, "Invalid message type %u.\n", data.hdr.msg_type);
+ }
+}
+
+static int qaic_boot_timesync_queue_resp(struct mhi_device *mhi_dev, struct qaic_device *qdev)
+{
+ struct qts_resp *resp;
+ int ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ resp->qdev = qdev;
+ INIT_WORK(&resp->work, qaic_boot_timesync_worker);
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, &resp->data, sizeof(resp->data), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ dev_warn(&mhi_dev->dev, "Failed to queue response buffer %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qaic_boot_timesync_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ mhi_unprepare_from_transfer(qdev->qts_ch);
+ qdev->qts_ch = NULL;
+}
+
+static int qaic_boot_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ qdev->qts_ch = mhi_dev;
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+
+ ret = qaic_boot_timesync_queue_resp(mhi_dev, qdev);
+ if (ret) {
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+ qdev->qts_ch = NULL;
+ mhi_unprepare_from_transfer(mhi_dev);
+ }
+
+ return ret;
+}
+
+static void qaic_boot_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ kfree(mhi_result->buf_addr);
+}
+
+static void qaic_boot_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qts_resp *resp = container_of(mhi_result->buf_addr, struct qts_resp, data);
+
+ if (mhi_result->transaction_status || mhi_result->bytes_xferd != sizeof(resp->data)) {
+ kfree(resp);
+ return;
+ }
+
+ queue_work(resp->qdev->qts_wq, &resp->work);
+}
+
+static const struct mhi_device_id qaic_boot_timesync_match_table[] = {
+ { .chan = "QAIC_TIMESYNC"},
+ {},
+};
+
+static struct mhi_driver qaic_boot_timesync_driver = {
+ .id_table = qaic_boot_timesync_match_table,
+ .remove = qaic_boot_timesync_remove,
+ .probe = qaic_boot_timesync_probe,
+ .ul_xfer_cb = qaic_boot_timesync_ul_xfer_cb,
+ .dl_xfer_cb = qaic_boot_timesync_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_timesync",
+ },
+};
+
+int qaic_timesync_init(void)
+{
+ int ret;
+
+ ret = mhi_driver_register(&qaic_timesync_driver);
+ if (ret)
+ return ret;
+ ret = mhi_driver_register(&qaic_boot_timesync_driver);
+
+ return ret;
+}
+
+void qaic_timesync_deinit(void)
+{
+ mhi_driver_unregister(&qaic_boot_timesync_driver);
+ mhi_driver_unregister(&qaic_timesync_driver);
+}
diff --git a/drivers/accel/qaic/qaic_timesync.h b/drivers/accel/qaic/qaic_timesync.h
new file mode 100644
index 000000000000..851b7acd43bb
--- /dev/null
+++ b/drivers/accel/qaic/qaic_timesync.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QAIC_TIMESYNC_H__
+#define __QAIC_TIMESYNC_H__
+
+int qaic_timesync_init(void);
+void qaic_timesync_deinit(void);
+#endif /* __QAIC_TIMESYNC_H__ */
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 0b7a01f38b65..d321ca7160d9 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2031,7 +2031,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
* HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0
* evaluated to have functional panel brightness control.
*/
- acpi_device_fix_up_power_extended(device);
+ acpi_device_fix_up_power_children(device);
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index f007116a8427..3b4d048c4941 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -397,6 +397,19 @@ void acpi_device_fix_up_power_extended(struct acpi_device *adev)
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
+/**
+ * acpi_device_fix_up_power_children - Force a device's children into D0.
+ * @adev: Parent device object whose children's power state is to be fixed up.
+ *
+ * Call acpi_device_fix_up_power() for @adev's children so long as they
+ * are reported as present and enabled.
+ */
+void acpi_device_fix_up_power_children(struct acpi_device *adev)
+{
+ acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children);
+
int acpi_device_update_power(struct acpi_device *device, int *state_p)
{
int state;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3a34a8c425fe..55437f5e0c3a 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -592,7 +592,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT)
- safe_halt();
+ raw_safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
} else
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 15a3bdbd0755..9bd9f79cd409 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -448,6 +448,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus ExpertBook B1402CVA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
+ },
+ },
+ {
/* Asus ExpertBook B1502CBA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 25a63d043c8e..0f77e0424066 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
if (pnp_port_valid(idev, 1)) {
ctl_addr = devm_ioport_map(&idev->dev,
pnp_port_start(idev, 1), 1);
+ if (!ctl_addr)
+ return -ENOMEM;
+
ap->ioaddr.altstatus_addr = ctl_addr;
ap->ioaddr.ctl_addr = ctl_addr;
ap->ops = &isapnp_port_ops;
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 64012cda4d12..d944d5298eca 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -112,10 +112,7 @@ config CFAG12864B
depends on X86
depends on FB
depends on KS0108
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS
default n
help
If you have a Crystalfontz 128x64 2-color LCD, cfag12864b Series,
@@ -170,10 +167,7 @@ config IMG_ASCII_LCD
config HT16K33
tristate "Holtek Ht16K33 LED controller with keyscan"
depends on FB && I2C && INPUT
- select FB_SYS_FOPS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_HELPERS
select INPUT_MATRIXKMAP
select FB_BACKLIGHT
select NEW_LEDS
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 729845bcc803..5ba19c339f08 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -51,16 +51,15 @@ static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct page *pages = virt_to_page(cfag12864b_buffer);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return vm_map_pages_zero(vma, &pages, 1);
}
static const struct fb_ops cfag12864bfb_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = cfag12864bfb_mmap,
};
@@ -72,6 +71,7 @@ static int cfag12864bfb_probe(struct platform_device *device)
if (!info)
goto none;
+ info->flags = FBINFO_VIRTFB;
info->screen_buffer = cfag12864b_buffer;
info->screen_size = CFAG12864B_SIZE;
info->fbops = &cfag12864bfb_ops;
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 3a2d88387224..a90430b7d07b 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -351,17 +351,16 @@ static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct ht16k33_priv *priv = info->par;
struct page *pages = virt_to_page(priv->fbdev.buffer);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return vm_map_pages_zero(vma, &pages, 1);
}
static const struct fb_ops ht16k33_fb_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
.fb_blank = ht16k33_blank,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = ht16k33_mmap,
};
@@ -640,6 +639,7 @@ static int ht16k33_fbdev_probe(struct device *dev, struct ht16k33_priv *priv,
INIT_DELAYED_WORK(&priv->work, ht16k33_fb_update);
fbdev->info->fbops = &ht16k33_fb_ops;
+ fbdev->info->flags |= FBINFO_VIRTFB;
fbdev->info->screen_buffer = fbdev->buffer;
fbdev->info->screen_size = HT16K33_FB_SIZE;
fbdev->info->fix = ht16k33_fb_fix;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 855fdf5c3b4e..b6414e1e645b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -67,6 +67,7 @@ struct nbd_sock {
struct recv_thread_args {
struct work_struct work;
struct nbd_device *nbd;
+ struct nbd_sock *nsock;
int index;
};
@@ -395,6 +396,22 @@ static u32 req_to_nbd_cmd_type(struct request *req)
}
}
+static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
+{
+ if (refcount_inc_not_zero(&nbd->config_refs)) {
+ /*
+ * Add smp_mb__after_atomic to ensure that reading nbd->config_refs
+ * and reading nbd->config is ordered. The pair is the barrier in
+ * nbd_alloc_and_init_config(), avoid nbd->config_refs is set
+ * before nbd->config.
+ */
+ smp_mb__after_atomic();
+ return nbd->config;
+ }
+
+ return NULL;
+}
+
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -409,13 +426,13 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
return BLK_EH_DONE;
}
- if (!refcount_inc_not_zero(&nbd->config_refs)) {
+ config = nbd_get_config_unlocked(nbd);
+ if (!config) {
cmd->status = BLK_STS_TIMEOUT;
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock);
goto done;
}
- config = nbd->config;
if (config->num_connections > 1 ||
(config->num_connections == 1 && nbd->tag_set.timeout)) {
@@ -489,15 +506,9 @@ done:
return BLK_EH_DONE;
}
-/*
- * Send or receive packet. Return a positive value on success and
- * negtive value on failue, and never return 0.
- */
-static int sock_xmit(struct nbd_device *nbd, int index, int send,
- struct iov_iter *iter, int msg_flags, int *sent)
+static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
+ struct iov_iter *iter, int msg_flags, int *sent)
{
- struct nbd_config *config = nbd->config;
- struct socket *sock = config->socks[index]->sock;
int result;
struct msghdr msg;
unsigned int noreclaim_flag;
@@ -541,6 +552,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
}
/*
+ * Send or receive packet. Return a positive value on success and
+ * negtive value on failure, and never return 0.
+ */
+static int sock_xmit(struct nbd_device *nbd, int index, int send,
+ struct iov_iter *iter, int msg_flags, int *sent)
+{
+ struct nbd_config *config = nbd->config;
+ struct socket *sock = config->socks[index]->sock;
+
+ return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
+}
+
+/*
* Different settings for sk->sk_sndtimeo can result in different return values
* if there is a signal pending when we enter sendmsg, because reasons?
*/
@@ -696,7 +720,7 @@ out:
return 0;
}
-static int nbd_read_reply(struct nbd_device *nbd, int index,
+static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
struct nbd_reply *reply)
{
struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
@@ -705,7 +729,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
reply->magic = 0;
iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
- result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
+ result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
if (result < 0) {
if (!nbd_disconnected(nbd->config))
dev_err(disk_to_dev(nbd->disk),
@@ -829,14 +853,14 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
struct request_queue *q = nbd->disk->queue;
- struct nbd_sock *nsock;
+ struct nbd_sock *nsock = args->nsock;
struct nbd_cmd *cmd;
struct request *rq;
while (1) {
struct nbd_reply reply;
- if (nbd_read_reply(nbd, args->index, &reply))
+ if (nbd_read_reply(nbd, nsock->sock, &reply))
break;
/*
@@ -871,7 +895,6 @@ static void recv_work(struct work_struct *work)
percpu_ref_put(&q->q_usage_counter);
}
- nsock = config->socks[args->index];
mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
@@ -977,12 +1000,12 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_sock *nsock;
int ret;
- if (!refcount_inc_not_zero(&nbd->config_refs)) {
+ config = nbd_get_config_unlocked(nbd);
+ if (!config) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
return -EINVAL;
}
- config = nbd->config;
if (index >= config->num_connections) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -1215,6 +1238,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
INIT_WORK(&args->work, recv_work);
args->index = i;
args->nbd = nbd;
+ args->nsock = nsock;
nsock->cookie++;
mutex_unlock(&nsock->tx_lock);
sockfd_put(old);
@@ -1397,6 +1421,7 @@ static int nbd_start_device(struct nbd_device *nbd)
refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work);
args->nbd = nbd;
+ args->nsock = config->socks[i];
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
@@ -1530,17 +1555,20 @@ static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
return error;
}
-static struct nbd_config *nbd_alloc_config(void)
+static int nbd_alloc_and_init_config(struct nbd_device *nbd)
{
struct nbd_config *config;
+ if (WARN_ON(nbd->config))
+ return -EINVAL;
+
if (!try_module_get(THIS_MODULE))
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
if (!config) {
module_put(THIS_MODULE);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
atomic_set(&config->recv_threads, 0);
@@ -1548,12 +1576,24 @@ static struct nbd_config *nbd_alloc_config(void)
init_waitqueue_head(&config->conn_wait);
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0);
- return config;
+
+ nbd->config = config;
+ /*
+ * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
+ * its pair is the barrier in nbd_get_config_unlocked().
+ * So nbd_get_config_unlocked() won't see nbd->config as null after
+ * refcount_inc_not_zero() succeed.
+ */
+ smp_mb__before_atomic();
+ refcount_set(&nbd->config_refs, 1);
+
+ return 0;
}
static int nbd_open(struct gendisk *disk, blk_mode_t mode)
{
struct nbd_device *nbd;
+ struct nbd_config *config;
int ret = 0;
mutex_lock(&nbd_index_mutex);
@@ -1566,27 +1606,25 @@ static int nbd_open(struct gendisk *disk, blk_mode_t mode)
ret = -ENXIO;
goto out;
}
- if (!refcount_inc_not_zero(&nbd->config_refs)) {
- struct nbd_config *config;
+ config = nbd_get_config_unlocked(nbd);
+ if (!config) {
mutex_lock(&nbd->config_lock);
if (refcount_inc_not_zero(&nbd->config_refs)) {
mutex_unlock(&nbd->config_lock);
goto out;
}
- config = nbd_alloc_config();
- if (IS_ERR(config)) {
- ret = PTR_ERR(config);
+ ret = nbd_alloc_and_init_config(nbd);
+ if (ret) {
mutex_unlock(&nbd->config_lock);
goto out;
}
- nbd->config = config;
- refcount_set(&nbd->config_refs, 1);
+
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state);
- } else if (nbd_disconnected(nbd->config)) {
+ } else if (nbd_disconnected(config)) {
if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state);
}
@@ -1990,22 +2028,17 @@ again:
pr_err("nbd%d already in use\n", index);
return -EBUSY;
}
- if (WARN_ON(nbd->config)) {
- mutex_unlock(&nbd->config_lock);
- nbd_put(nbd);
- return -EINVAL;
- }
- config = nbd_alloc_config();
- if (IS_ERR(config)) {
+
+ ret = nbd_alloc_and_init_config(nbd);
+ if (ret) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
pr_err("couldn't allocate config\n");
- return PTR_ERR(config);
+ return ret;
}
- nbd->config = config;
- refcount_set(&nbd->config_refs, 1);
- set_bit(NBD_RT_BOUND, &config->runtime_flags);
+ config = nbd->config;
+ set_bit(NBD_RT_BOUND, &config->runtime_flags);
ret = nbd_genl_size_set(info, nbd);
if (ret)
goto out;
@@ -2208,7 +2241,8 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
}
mutex_unlock(&nbd_index_mutex);
- if (!refcount_inc_not_zero(&nbd->config_refs)) {
+ config = nbd_get_config_unlocked(nbd);
+ if (!config) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
nbd_put(nbd);
@@ -2216,7 +2250,6 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
}
mutex_lock(&nbd->config_lock);
- config = nbd->config;
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
!nbd->pid) {
dev_err(nbd_to_dev(nbd),
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 22a3cf7f32e2..3021d58ca51c 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1464,19 +1464,13 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
return BLK_STS_OK;
}
-static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
- sector_t nr_sectors, enum req_op op)
+static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
+ sector_t nr_sectors, enum req_op op)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
blk_status_t sts;
- if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
- sts = null_handle_throttled(cmd);
- if (sts != BLK_STS_OK)
- return sts;
- }
-
if (op == REQ_OP_FLUSH) {
cmd->error = errno_to_blk_status(null_handle_flush(nullb));
goto out;
@@ -1493,7 +1487,6 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
out:
nullb_complete_cmd(cmd);
- return BLK_STS_OK;
}
static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
@@ -1724,8 +1717,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->fake_timeout = should_timeout_request(rq) ||
blk_should_fake_timeout(rq->q);
- blk_mq_start_request(rq);
-
if (should_requeue_request(rq)) {
/*
* Alternate between hitting the core BUSY path, and the
@@ -1738,6 +1729,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
+ if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) {
+ blk_status_t sts = null_handle_throttled(cmd);
+
+ if (sts != BLK_STS_OK)
+ return sts;
+ }
+
+ blk_mq_start_request(rq);
+
if (is_poll) {
spin_lock(&nq->poll_lock);
list_add_tail(&rq->queuelist, &nq->poll_list);
@@ -1747,7 +1747,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
if (cmd->fake_timeout)
return BLK_STS_OK;
- return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
+ null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
+ return BLK_STS_OK;
}
static void null_queue_rqs(struct request **rqlist)
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 25834557e486..43b09cf193bb 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -1,12 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
agpgart-y := backend.o generic.o isoch.o
-ifeq ($(CONFIG_DRM_LEGACY),y)
-agpgart-$(CONFIG_COMPAT) += compat_ioctl.o
-agpgart-y += frontend.o
-endif
-
-
obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_AGP_ALI) += ali-agp.o
obj-$(CONFIG_AGP_ATI) += ati-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 8771dcc9b8e2..5c36ab85f80b 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -185,15 +185,6 @@ void agp_put_bridge(struct agp_bridge_data *bridge);
int agp_add_bridge(struct agp_bridge_data *bridge);
void agp_remove_bridge(struct agp_bridge_data *bridge);
-/* Frontend routines. */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int agp_frontend_initialize(void);
-void agp_frontend_cleanup(void);
-#else
-static inline int agp_frontend_initialize(void) { return 0; }
-static inline void agp_frontend_cleanup(void) {}
-#endif
-
/* Generic routines. */
void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 0e19c600db53..1776afd3ee07 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -293,13 +293,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
}
if (list_empty(&agp_bridges)) {
- error = agp_frontend_initialize();
- if (error) {
- dev_info(&bridge->dev->dev,
- "agp_frontend_initialize() failed\n");
- goto frontend_err;
- }
-
dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
bridge->driver->fetch_size(), bridge->gart_bus_addr);
@@ -308,8 +301,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
list_add(&bridge->list, &agp_bridges);
return 0;
-frontend_err:
- agp_backend_cleanup(bridge);
err_out:
module_put(bridge->driver->owner);
err_put_bridge:
@@ -323,8 +314,6 @@ void agp_remove_bridge(struct agp_bridge_data *bridge)
{
agp_backend_cleanup(bridge);
list_del(&bridge->list);
- if (list_empty(&agp_bridges))
- agp_frontend_cleanup();
module_put(bridge->driver->owner);
}
EXPORT_SYMBOL_GPL(agp_remove_bridge);
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
deleted file mode 100644
index 52ffe1706ce0..000000000000
--- a/drivers/char/agp/compat_ioctl.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * AGPGART driver frontend compatibility ioctls
- * Copyright (C) 2004 Silicon Graphics, Inc.
- * Copyright (C) 2002-2003 Dave Jones
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/agpgart.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include "agp.h"
-#include "compat_ioctl.h"
-
-static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_info32 userinfo;
- struct agp_kern_info kerninfo;
-
- agp_copy_info(agp_bridge, &kerninfo);
-
- userinfo.version.major = kerninfo.version.major;
- userinfo.version.minor = kerninfo.version.minor;
- userinfo.bridge_id = kerninfo.device->vendor |
- (kerninfo.device->device << 16);
- userinfo.agp_mode = kerninfo.mode;
- userinfo.aper_base = (compat_long_t)kerninfo.aper_base;
- userinfo.aper_size = kerninfo.aper_size;
- userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
- userinfo.pg_used = kerninfo.current_memory;
-
- if (copy_to_user(arg, &userinfo, sizeof(userinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_region32 ureserve;
- struct agp_region kreserve;
- struct agp_client *client;
- struct agp_file_private *client_priv;
-
- DBG("");
- if (copy_from_user(&ureserve, arg, sizeof(ureserve)))
- return -EFAULT;
-
- if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32))
- return -EFAULT;
-
- kreserve.pid = ureserve.pid;
- kreserve.seg_count = ureserve.seg_count;
-
- client = agp_find_client_by_pid(kreserve.pid);
-
- if (kreserve.seg_count == 0) {
- /* remove a client */
- client_priv = agp_find_private(kreserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- if (client == NULL) {
- /* client is already removed */
- return 0;
- }
- return agp_remove_client(kreserve.pid);
- } else {
- struct agp_segment32 *usegment;
- struct agp_segment *ksegment;
- int seg;
-
- if (ureserve.seg_count >= 16384)
- return -EINVAL;
-
- usegment = kmalloc_array(ureserve.seg_count,
- sizeof(*usegment),
- GFP_KERNEL);
- if (!usegment)
- return -ENOMEM;
-
- ksegment = kmalloc_array(kreserve.seg_count,
- sizeof(*ksegment),
- GFP_KERNEL);
- if (!ksegment) {
- kfree(usegment);
- return -ENOMEM;
- }
-
- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
- sizeof(*usegment) * ureserve.seg_count)) {
- kfree(usegment);
- kfree(ksegment);
- return -EFAULT;
- }
-
- for (seg = 0; seg < ureserve.seg_count; seg++) {
- ksegment[seg].pg_start = usegment[seg].pg_start;
- ksegment[seg].pg_count = usegment[seg].pg_count;
- ksegment[seg].prot = usegment[seg].prot;
- }
-
- kfree(usegment);
- kreserve.seg_list = ksegment;
-
- if (client == NULL) {
- /* Create the client and add the segment */
- client = agp_create_client(kreserve.pid);
-
- if (client == NULL) {
- kfree(ksegment);
- return -ENOMEM;
- }
- client_priv = agp_find_private(kreserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- }
- return agp_create_segment(client, &kreserve);
- }
- /* Will never really happen */
- return -EINVAL;
-}
-
-static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_allocate32 alloc;
-
- DBG("");
- if (copy_from_user(&alloc, arg, sizeof(alloc)))
- return -EFAULT;
-
- memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
-
- if (memory == NULL)
- return -ENOMEM;
-
- alloc.key = memory->key;
- alloc.physical = memory->physical;
-
- if (copy_to_user(arg, &alloc, sizeof(alloc))) {
- agp_free_memory_wrap(memory);
- return -EFAULT;
- }
- return 0;
-}
-
-static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_bind32 bind_info;
- struct agp_memory *memory;
-
- DBG("");
- if (copy_from_user(&bind_info, arg, sizeof(bind_info)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(bind_info.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_bind_memory(memory, bind_info.pg_start);
-}
-
-static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_unbind32 unbind;
-
- DBG("");
- if (copy_from_user(&unbind, arg, sizeof(unbind)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(unbind.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_unbind_memory(memory);
-}
-
-long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct agp_file_private *curr_priv = file->private_data;
- int ret_val = -ENOTTY;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- if ((agp_fe.current_controller == NULL) &&
- (cmd != AGPIOC_ACQUIRE32)) {
- ret_val = -EINVAL;
- goto ioctl_out;
- }
- if ((agp_fe.backend_acquired != true) &&
- (cmd != AGPIOC_ACQUIRE32)) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- if (cmd != AGPIOC_ACQUIRE32) {
- if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
- ret_val = -EPERM;
- goto ioctl_out;
- }
- /* Use the original pid of the controller,
- * in case it's threaded */
-
- if (agp_fe.current_controller->pid != curr_priv->my_pid) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- }
-
- switch (cmd) {
- case AGPIOC_INFO32:
- ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_ACQUIRE32:
- ret_val = agpioc_acquire_wrap(curr_priv);
- break;
-
- case AGPIOC_RELEASE32:
- ret_val = agpioc_release_wrap(curr_priv);
- break;
-
- case AGPIOC_SETUP32:
- ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_RESERVE32:
- ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_PROTECT32:
- ret_val = agpioc_protect_wrap(curr_priv);
- break;
-
- case AGPIOC_ALLOCATE32:
- ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_DEALLOCATE32:
- ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
- break;
-
- case AGPIOC_BIND32:
- ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_UNBIND32:
- ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_CHIPSET_FLUSH32:
- break;
- }
-
-ioctl_out:
- DBG("ioctl returns %d\n", ret_val);
- mutex_unlock(&(agp_fe.agp_mutex));
- return ret_val;
-}
-
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
deleted file mode 100644
index f30e0fd97963..000000000000
--- a/drivers/char/agp/compat_ioctl.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _AGP_COMPAT_IOCTL_H
-#define _AGP_COMPAT_IOCTL_H
-
-#include <linux/compat.h>
-#include <linux/agpgart.h>
-
-#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t)
-#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1)
-#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2)
-#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t)
-#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t)
-#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t)
-#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t)
-#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
-#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t)
-#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t)
-#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10)
-
-struct agp_info32 {
- struct agp_version version; /* version of the driver */
- u32 bridge_id; /* bridge vendor/device */
- u32 agp_mode; /* mode info of bridge */
- compat_long_t aper_base; /* base of aperture */
- compat_size_t aper_size; /* size of aperture */
- compat_size_t pg_total; /* max pages (swap + system) */
- compat_size_t pg_system; /* max pages (system) */
- compat_size_t pg_used; /* current pages used */
-};
-
-/*
- * The "prot" down below needs still a "sleep" flag somehow ...
- */
-struct agp_segment32 {
- compat_off_t pg_start; /* starting page to populate */
- compat_size_t pg_count; /* number of pages */
- compat_int_t prot; /* prot flags for mmap */
-};
-
-struct agp_region32 {
- compat_pid_t pid; /* pid of process */
- compat_size_t seg_count; /* number of segments */
- struct agp_segment32 *seg_list;
-};
-
-struct agp_allocate32 {
- compat_int_t key; /* tag of allocation */
- compat_size_t pg_count; /* number of pages */
- u32 type; /* 0 == normal, other devspec */
- u32 physical; /* device specific (some devices
- * need a phys address of the
- * actual page behind the gatt
- * table) */
-};
-
-struct agp_bind32 {
- compat_int_t key; /* tag of allocation */
- compat_off_t pg_start; /* starting page to populate */
-};
-
-struct agp_unbind32 {
- compat_int_t key; /* tag of allocation */
- u32 priority; /* priority for paging out */
-};
-
-extern struct agp_front_data agp_fe;
-
-int agpioc_acquire_wrap(struct agp_file_private *priv);
-int agpioc_release_wrap(struct agp_file_private *priv);
-int agpioc_protect_wrap(struct agp_file_private *priv);
-int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg);
-int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg);
-struct agp_file_private *agp_find_private(pid_t pid);
-struct agp_client *agp_create_client(pid_t id);
-int agp_remove_client(pid_t id);
-int agp_create_segment(struct agp_client *client, struct agp_region *region);
-void agp_free_memory_wrap(struct agp_memory *memory);
-struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
-struct agp_memory *agp_find_mem_by_key(int key);
-struct agp_client *agp_find_client_by_pid(pid_t id);
-
-#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
deleted file mode 100644
index 321118a9cfa5..000000000000
--- a/drivers/char/agp/frontend.c
+++ /dev/null
@@ -1,1068 +0,0 @@
-/*
- * AGPGART driver frontend
- * Copyright (C) 2004 Silicon Graphics, Inc.
- * Copyright (C) 2002-2003 Dave Jones
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <linux/pci.h>
-#include <linux/miscdevice.h>
-#include <linux/agp_backend.h>
-#include <linux/agpgart.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-
-#include "agp.h"
-#include "compat_ioctl.h"
-
-struct agp_front_data agp_fe;
-
-struct agp_memory *agp_find_mem_by_key(int key)
-{
- struct agp_memory *curr;
-
- if (agp_fe.current_controller == NULL)
- return NULL;
-
- curr = agp_fe.current_controller->pool;
-
- while (curr != NULL) {
- if (curr->key == key)
- break;
- curr = curr->next;
- }
-
- DBG("key=%d -> mem=%p", key, curr);
- return curr;
-}
-
-static void agp_remove_from_pool(struct agp_memory *temp)
-{
- struct agp_memory *prev;
- struct agp_memory *next;
-
- /* Check to see if this is even in the memory pool */
-
- DBG("mem=%p", temp);
- if (agp_find_mem_by_key(temp->key) != NULL) {
- next = temp->next;
- prev = temp->prev;
-
- if (prev != NULL) {
- prev->next = next;
- if (next != NULL)
- next->prev = prev;
-
- } else {
- /* This is the first item on the list */
- if (next != NULL)
- next->prev = NULL;
-
- agp_fe.current_controller->pool = next;
- }
- }
-}
-
-/*
- * Routines for managing each client's segment list -
- * These routines handle adding and removing segments
- * to each auth'ed client.
- */
-
-static struct
-agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
- unsigned long offset,
- int size, pgprot_t page_prot)
-{
- struct agp_segment_priv *seg;
- int i;
- off_t pg_start;
- size_t pg_count;
-
- pg_start = offset / 4096;
- pg_count = size / 4096;
- seg = *(client->segments);
-
- for (i = 0; i < client->num_segments; i++) {
- if ((seg[i].pg_start == pg_start) &&
- (seg[i].pg_count == pg_count) &&
- (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
- return seg + i;
- }
- }
-
- return NULL;
-}
-
-static void agp_remove_seg_from_client(struct agp_client *client)
-{
- DBG("client=%p", client);
-
- if (client->segments != NULL) {
- if (*(client->segments) != NULL) {
- DBG("Freeing %p from client %p", *(client->segments), client);
- kfree(*(client->segments));
- }
- DBG("Freeing %p from client %p", client->segments, client);
- kfree(client->segments);
- client->segments = NULL;
- }
-}
-
-static void agp_add_seg_to_client(struct agp_client *client,
- struct agp_segment_priv ** seg, int num_segments)
-{
- struct agp_segment_priv **prev_seg;
-
- prev_seg = client->segments;
-
- if (prev_seg != NULL)
- agp_remove_seg_from_client(client);
-
- DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
- client->num_segments = num_segments;
- client->segments = seg;
-}
-
-static pgprot_t agp_convert_mmap_flags(int prot)
-{
- unsigned long prot_bits;
-
- prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED;
- return vm_get_page_prot(prot_bits);
-}
-
-int agp_create_segment(struct agp_client *client, struct agp_region *region)
-{
- struct agp_segment_priv **ret_seg;
- struct agp_segment_priv *seg;
- struct agp_segment *user_seg;
- size_t i;
-
- seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
- if (seg == NULL) {
- kfree(region->seg_list);
- region->seg_list = NULL;
- return -ENOMEM;
- }
- user_seg = region->seg_list;
-
- for (i = 0; i < region->seg_count; i++) {
- seg[i].pg_start = user_seg[i].pg_start;
- seg[i].pg_count = user_seg[i].pg_count;
- seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
- }
- kfree(region->seg_list);
- region->seg_list = NULL;
-
- ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
- if (ret_seg == NULL) {
- kfree(seg);
- return -ENOMEM;
- }
- *ret_seg = seg;
- agp_add_seg_to_client(client, ret_seg, region->seg_count);
- return 0;
-}
-
-/* End - Routines for managing each client's segment list */
-
-/* This function must only be called when current_controller != NULL */
-static void agp_insert_into_pool(struct agp_memory * temp)
-{
- struct agp_memory *prev;
-
- prev = agp_fe.current_controller->pool;
-
- if (prev != NULL) {
- prev->prev = temp;
- temp->next = prev;
- }
- agp_fe.current_controller->pool = temp;
-}
-
-
-/* File private list routines */
-
-struct agp_file_private *agp_find_private(pid_t pid)
-{
- struct agp_file_private *curr;
-
- curr = agp_fe.file_priv_list;
-
- while (curr != NULL) {
- if (curr->my_pid == pid)
- return curr;
- curr = curr->next;
- }
-
- return NULL;
-}
-
-static void agp_insert_file_private(struct agp_file_private * priv)
-{
- struct agp_file_private *prev;
-
- prev = agp_fe.file_priv_list;
-
- if (prev != NULL)
- prev->prev = priv;
- priv->next = prev;
- agp_fe.file_priv_list = priv;
-}
-
-static void agp_remove_file_private(struct agp_file_private * priv)
-{
- struct agp_file_private *next;
- struct agp_file_private *prev;
-
- next = priv->next;
- prev = priv->prev;
-
- if (prev != NULL) {
- prev->next = next;
-
- if (next != NULL)
- next->prev = prev;
-
- } else {
- if (next != NULL)
- next->prev = NULL;
-
- agp_fe.file_priv_list = next;
- }
-}
-
-/* End - File flag list routines */
-
-/*
- * Wrappers for agp_free_memory & agp_allocate_memory
- * These make sure that internal lists are kept updated.
- */
-void agp_free_memory_wrap(struct agp_memory *memory)
-{
- agp_remove_from_pool(memory);
- agp_free_memory(memory);
-}
-
-struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
-{
- struct agp_memory *memory;
-
- memory = agp_allocate_memory(agp_bridge, pg_count, type);
- if (memory == NULL)
- return NULL;
-
- agp_insert_into_pool(memory);
- return memory;
-}
-
-/* Routines for managing the list of controllers -
- * These routines manage the current controller, and the list of
- * controllers
- */
-
-static struct agp_controller *agp_find_controller_by_pid(pid_t id)
-{
- struct agp_controller *controller;
-
- controller = agp_fe.controllers;
-
- while (controller != NULL) {
- if (controller->pid == id)
- return controller;
- controller = controller->next;
- }
-
- return NULL;
-}
-
-static struct agp_controller *agp_create_controller(pid_t id)
-{
- struct agp_controller *controller;
-
- controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL);
- if (controller == NULL)
- return NULL;
-
- controller->pid = id;
- return controller;
-}
-
-static int agp_insert_controller(struct agp_controller *controller)
-{
- struct agp_controller *prev_controller;
-
- prev_controller = agp_fe.controllers;
- controller->next = prev_controller;
-
- if (prev_controller != NULL)
- prev_controller->prev = controller;
-
- agp_fe.controllers = controller;
-
- return 0;
-}
-
-static void agp_remove_all_clients(struct agp_controller *controller)
-{
- struct agp_client *client;
- struct agp_client *temp;
-
- client = controller->clients;
-
- while (client) {
- struct agp_file_private *priv;
-
- temp = client;
- agp_remove_seg_from_client(temp);
- priv = agp_find_private(temp->pid);
-
- if (priv != NULL) {
- clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
- clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
- }
- client = client->next;
- kfree(temp);
- }
-}
-
-static void agp_remove_all_memory(struct agp_controller *controller)
-{
- struct agp_memory *memory;
- struct agp_memory *temp;
-
- memory = controller->pool;
-
- while (memory) {
- temp = memory;
- memory = memory->next;
- agp_free_memory_wrap(temp);
- }
-}
-
-static int agp_remove_controller(struct agp_controller *controller)
-{
- struct agp_controller *prev_controller;
- struct agp_controller *next_controller;
-
- prev_controller = controller->prev;
- next_controller = controller->next;
-
- if (prev_controller != NULL) {
- prev_controller->next = next_controller;
- if (next_controller != NULL)
- next_controller->prev = prev_controller;
-
- } else {
- if (next_controller != NULL)
- next_controller->prev = NULL;
-
- agp_fe.controllers = next_controller;
- }
-
- agp_remove_all_memory(controller);
- agp_remove_all_clients(controller);
-
- if (agp_fe.current_controller == controller) {
- agp_fe.current_controller = NULL;
- agp_fe.backend_acquired = false;
- agp_backend_release(agp_bridge);
- }
- kfree(controller);
- return 0;
-}
-
-static void agp_controller_make_current(struct agp_controller *controller)
-{
- struct agp_client *clients;
-
- clients = controller->clients;
-
- while (clients != NULL) {
- struct agp_file_private *priv;
-
- priv = agp_find_private(clients->pid);
-
- if (priv != NULL) {
- set_bit(AGP_FF_IS_VALID, &priv->access_flags);
- set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
- }
- clients = clients->next;
- }
-
- agp_fe.current_controller = controller;
-}
-
-static void agp_controller_release_current(struct agp_controller *controller,
- struct agp_file_private *controller_priv)
-{
- struct agp_client *clients;
-
- clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
- clients = controller->clients;
-
- while (clients != NULL) {
- struct agp_file_private *priv;
-
- priv = agp_find_private(clients->pid);
-
- if (priv != NULL)
- clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
-
- clients = clients->next;
- }
-
- agp_fe.current_controller = NULL;
- agp_fe.used_by_controller = false;
- agp_backend_release(agp_bridge);
-}
-
-/*
- * Routines for managing client lists -
- * These routines are for managing the list of auth'ed clients.
- */
-
-static struct agp_client
-*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
-{
- struct agp_client *client;
-
- if (controller == NULL)
- return NULL;
-
- client = controller->clients;
-
- while (client != NULL) {
- if (client->pid == id)
- return client;
- client = client->next;
- }
-
- return NULL;
-}
-
-static struct agp_controller *agp_find_controller_for_client(pid_t id)
-{
- struct agp_controller *controller;
-
- controller = agp_fe.controllers;
-
- while (controller != NULL) {
- if ((agp_find_client_in_controller(controller, id)) != NULL)
- return controller;
- controller = controller->next;
- }
-
- return NULL;
-}
-
-struct agp_client *agp_find_client_by_pid(pid_t id)
-{
- struct agp_client *temp;
-
- if (agp_fe.current_controller == NULL)
- return NULL;
-
- temp = agp_find_client_in_controller(agp_fe.current_controller, id);
- return temp;
-}
-
-static void agp_insert_client(struct agp_client *client)
-{
- struct agp_client *prev_client;
-
- prev_client = agp_fe.current_controller->clients;
- client->next = prev_client;
-
- if (prev_client != NULL)
- prev_client->prev = client;
-
- agp_fe.current_controller->clients = client;
- agp_fe.current_controller->num_clients++;
-}
-
-struct agp_client *agp_create_client(pid_t id)
-{
- struct agp_client *new_client;
-
- new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL);
- if (new_client == NULL)
- return NULL;
-
- new_client->pid = id;
- agp_insert_client(new_client);
- return new_client;
-}
-
-int agp_remove_client(pid_t id)
-{
- struct agp_client *client;
- struct agp_client *prev_client;
- struct agp_client *next_client;
- struct agp_controller *controller;
-
- controller = agp_find_controller_for_client(id);
- if (controller == NULL)
- return -EINVAL;
-
- client = agp_find_client_in_controller(controller, id);
- if (client == NULL)
- return -EINVAL;
-
- prev_client = client->prev;
- next_client = client->next;
-
- if (prev_client != NULL) {
- prev_client->next = next_client;
- if (next_client != NULL)
- next_client->prev = prev_client;
-
- } else {
- if (next_client != NULL)
- next_client->prev = NULL;
- controller->clients = next_client;
- }
-
- controller->num_clients--;
- agp_remove_seg_from_client(client);
- kfree(client);
- return 0;
-}
-
-/* End - Routines for managing client lists */
-
-/* File Operations */
-
-static int agp_mmap(struct file *file, struct vm_area_struct *vma)
-{
- unsigned int size, current_size;
- unsigned long offset;
- struct agp_client *client;
- struct agp_file_private *priv = file->private_data;
- struct agp_kern_info kerninfo;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- if (agp_fe.backend_acquired != true)
- goto out_eperm;
-
- if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
- goto out_eperm;
-
- agp_copy_info(agp_bridge, &kerninfo);
- size = vma->vm_end - vma->vm_start;
- current_size = kerninfo.aper_size;
- current_size = current_size * 0x100000;
- offset = vma->vm_pgoff << PAGE_SHIFT;
- DBG("%lx:%lx", offset, offset+size);
-
- if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
- if ((size + offset) > current_size)
- goto out_inval;
-
- client = agp_find_client_by_pid(current->pid);
-
- if (client == NULL)
- goto out_eperm;
-
- if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
- goto out_inval;
-
- DBG("client vm_ops=%p", kerninfo.vm_ops);
- if (kerninfo.vm_ops) {
- vma->vm_ops = kerninfo.vm_ops;
- } else if (io_remap_pfn_range(vma, vma->vm_start,
- (kerninfo.aper_base + offset) >> PAGE_SHIFT,
- size,
- pgprot_writecombine(vma->vm_page_prot))) {
- goto out_again;
- }
- mutex_unlock(&(agp_fe.agp_mutex));
- return 0;
- }
-
- if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
- if (size != current_size)
- goto out_inval;
-
- DBG("controller vm_ops=%p", kerninfo.vm_ops);
- if (kerninfo.vm_ops) {
- vma->vm_ops = kerninfo.vm_ops;
- } else if (io_remap_pfn_range(vma, vma->vm_start,
- kerninfo.aper_base >> PAGE_SHIFT,
- size,
- pgprot_writecombine(vma->vm_page_prot))) {
- goto out_again;
- }
- mutex_unlock(&(agp_fe.agp_mutex));
- return 0;
- }
-
-out_eperm:
- mutex_unlock(&(agp_fe.agp_mutex));
- return -EPERM;
-
-out_inval:
- mutex_unlock(&(agp_fe.agp_mutex));
- return -EINVAL;
-
-out_again:
- mutex_unlock(&(agp_fe.agp_mutex));
- return -EAGAIN;
-}
-
-static int agp_release(struct inode *inode, struct file *file)
-{
- struct agp_file_private *priv = file->private_data;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- DBG("priv=%p", priv);
-
- if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
- struct agp_controller *controller;
-
- controller = agp_find_controller_by_pid(priv->my_pid);
-
- if (controller != NULL) {
- if (controller == agp_fe.current_controller)
- agp_controller_release_current(controller, priv);
- agp_remove_controller(controller);
- controller = NULL;
- }
- }
-
- if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
- agp_remove_client(priv->my_pid);
-
- agp_remove_file_private(priv);
- kfree(priv);
- file->private_data = NULL;
- mutex_unlock(&(agp_fe.agp_mutex));
- return 0;
-}
-
-static int agp_open(struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- struct agp_file_private *priv;
- struct agp_client *client;
-
- if (minor != AGPGART_MINOR)
- return -ENXIO;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL);
- if (priv == NULL) {
- mutex_unlock(&(agp_fe.agp_mutex));
- return -ENOMEM;
- }
-
- set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
- priv->my_pid = current->pid;
-
- if (capable(CAP_SYS_RAWIO))
- /* Root priv, can be controller */
- set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
-
- client = agp_find_client_by_pid(current->pid);
-
- if (client != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &priv->access_flags);
- }
- file->private_data = (void *) priv;
- agp_insert_file_private(priv);
- DBG("private=%p, client=%p", priv, client);
-
- mutex_unlock(&(agp_fe.agp_mutex));
-
- return 0;
-}
-
-static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_info userinfo;
- struct agp_kern_info kerninfo;
-
- agp_copy_info(agp_bridge, &kerninfo);
-
- memset(&userinfo, 0, sizeof(userinfo));
- userinfo.version.major = kerninfo.version.major;
- userinfo.version.minor = kerninfo.version.minor;
- userinfo.bridge_id = kerninfo.device->vendor |
- (kerninfo.device->device << 16);
- userinfo.agp_mode = kerninfo.mode;
- userinfo.aper_base = kerninfo.aper_base;
- userinfo.aper_size = kerninfo.aper_size;
- userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
- userinfo.pg_used = kerninfo.current_memory;
-
- if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
- return -EFAULT;
-
- return 0;
-}
-
-int agpioc_acquire_wrap(struct agp_file_private *priv)
-{
- struct agp_controller *controller;
-
- DBG("");
-
- if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
- return -EPERM;
-
- if (agp_fe.current_controller != NULL)
- return -EBUSY;
-
- if (!agp_bridge)
- return -ENODEV;
-
- if (atomic_read(&agp_bridge->agp_in_use))
- return -EBUSY;
-
- atomic_inc(&agp_bridge->agp_in_use);
-
- agp_fe.backend_acquired = true;
-
- controller = agp_find_controller_by_pid(priv->my_pid);
-
- if (controller != NULL) {
- agp_controller_make_current(controller);
- } else {
- controller = agp_create_controller(priv->my_pid);
-
- if (controller == NULL) {
- agp_fe.backend_acquired = false;
- agp_backend_release(agp_bridge);
- return -ENOMEM;
- }
- agp_insert_controller(controller);
- agp_controller_make_current(controller);
- }
-
- set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &priv->access_flags);
- return 0;
-}
-
-int agpioc_release_wrap(struct agp_file_private *priv)
-{
- DBG("");
- agp_controller_release_current(agp_fe.current_controller, priv);
- return 0;
-}
-
-int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_setup mode;
-
- DBG("");
- if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
- return -EFAULT;
-
- agp_enable(agp_bridge, mode.agp_mode);
- return 0;
-}
-
-static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_region reserve;
- struct agp_client *client;
- struct agp_file_private *client_priv;
-
- DBG("");
- if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
- return -EFAULT;
-
- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
- return -EFAULT;
-
- client = agp_find_client_by_pid(reserve.pid);
-
- if (reserve.seg_count == 0) {
- /* remove a client */
- client_priv = agp_find_private(reserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- if (client == NULL) {
- /* client is already removed */
- return 0;
- }
- return agp_remove_client(reserve.pid);
- } else {
- struct agp_segment *segment;
-
- if (reserve.seg_count >= 16384)
- return -EINVAL;
-
- segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
- GFP_KERNEL);
-
- if (segment == NULL)
- return -ENOMEM;
-
- if (copy_from_user(segment, (void __user *) reserve.seg_list,
- sizeof(struct agp_segment) * reserve.seg_count)) {
- kfree(segment);
- return -EFAULT;
- }
- reserve.seg_list = segment;
-
- if (client == NULL) {
- /* Create the client and add the segment */
- client = agp_create_client(reserve.pid);
-
- if (client == NULL) {
- kfree(segment);
- return -ENOMEM;
- }
- client_priv = agp_find_private(reserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- }
- return agp_create_segment(client, &reserve);
- }
- /* Will never really happen */
- return -EINVAL;
-}
-
-int agpioc_protect_wrap(struct agp_file_private *priv)
-{
- DBG("");
- /* This function is not currently implemented */
- return -EINVAL;
-}
-
-static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_allocate alloc;
-
- DBG("");
- if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
- return -EFAULT;
-
- if (alloc.type >= AGP_USER_TYPES)
- return -EINVAL;
-
- memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
-
- if (memory == NULL)
- return -ENOMEM;
-
- alloc.key = memory->key;
- alloc.physical = memory->physical;
-
- if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
- agp_free_memory_wrap(memory);
- return -EFAULT;
- }
- return 0;
-}
-
-int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
-{
- struct agp_memory *memory;
-
- DBG("");
- memory = agp_find_mem_by_key(arg);
-
- if (memory == NULL)
- return -EINVAL;
-
- agp_free_memory_wrap(memory);
- return 0;
-}
-
-static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_bind bind_info;
- struct agp_memory *memory;
-
- DBG("");
- if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(bind_info.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_bind_memory(memory, bind_info.pg_start);
-}
-
-static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_unbind unbind;
-
- DBG("");
- if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(unbind.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_unbind_memory(memory);
-}
-
-static long agp_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct agp_file_private *curr_priv = file->private_data;
- int ret_val = -ENOTTY;
-
- DBG("priv=%p, cmd=%x", curr_priv, cmd);
- mutex_lock(&(agp_fe.agp_mutex));
-
- if ((agp_fe.current_controller == NULL) &&
- (cmd != AGPIOC_ACQUIRE)) {
- ret_val = -EINVAL;
- goto ioctl_out;
- }
- if ((agp_fe.backend_acquired != true) &&
- (cmd != AGPIOC_ACQUIRE)) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- if (cmd != AGPIOC_ACQUIRE) {
- if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
- ret_val = -EPERM;
- goto ioctl_out;
- }
- /* Use the original pid of the controller,
- * in case it's threaded */
-
- if (agp_fe.current_controller->pid != curr_priv->my_pid) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- }
-
- switch (cmd) {
- case AGPIOC_INFO:
- ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_ACQUIRE:
- ret_val = agpioc_acquire_wrap(curr_priv);
- break;
-
- case AGPIOC_RELEASE:
- ret_val = agpioc_release_wrap(curr_priv);
- break;
-
- case AGPIOC_SETUP:
- ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_RESERVE:
- ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_PROTECT:
- ret_val = agpioc_protect_wrap(curr_priv);
- break;
-
- case AGPIOC_ALLOCATE:
- ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_DEALLOCATE:
- ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
- break;
-
- case AGPIOC_BIND:
- ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_UNBIND:
- ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_CHIPSET_FLUSH:
- break;
- }
-
-ioctl_out:
- DBG("ioctl returns %d\n", ret_val);
- mutex_unlock(&(agp_fe.agp_mutex));
- return ret_val;
-}
-
-static const struct file_operations agp_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .unlocked_ioctl = agp_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_agp_ioctl,
-#endif
- .mmap = agp_mmap,
- .open = agp_open,
- .release = agp_release,
-};
-
-static struct miscdevice agp_miscdev =
-{
- .minor = AGPGART_MINOR,
- .name = "agpgart",
- .fops = &agp_fops
-};
-
-int agp_frontend_initialize(void)
-{
- memset(&agp_fe, 0, sizeof(struct agp_front_data));
- mutex_init(&(agp_fe.agp_mutex));
-
- if (misc_register(&agp_miscdev)) {
- printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
- return -EIO;
- }
- return 0;
-}
-
-void agp_frontend_cleanup(void)
-{
- misc_deregister(&agp_miscdev);
-}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 21916bba77d5..8fe5aa67b167 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -46,12 +46,12 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
struct dma_buf *dmabuf;
char name[DMA_BUF_NAME_LEN];
- size_t ret = 0;
+ ssize_t ret = 0;
dmabuf = dentry->d_fsdata;
spin_lock(&dmabuf->name_lock);
if (dmabuf->name)
- ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
+ ret = strscpy(name, dmabuf->name, sizeof(name));
spin_unlock(&dmabuf->name_lock);
return dynamic_dname(buffer, buflen, "/%s:%s",
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 8aa8f8cb7071..e0fd99e61a2d 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -934,7 +934,8 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
* the GPU's devfreq to reduce frequency, when in fact the opposite is what is
* needed.
*
- * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline.
+ * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline
+ * (or indirectly via userspace facing ioctls like &sync_set_deadline).
* The deadline hint provides a way for the waiting driver, or userspace, to
* convey an appropriate sense of urgency to the signaling driver.
*
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index f0a35277fd84..c353029789cf 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -52,12 +52,33 @@ struct sw_sync_create_fence_data {
__s32 fence; /* fd of new fence */
};
+/**
+ * struct sw_sync_get_deadline - get the deadline hint of a sw_sync fence
+ * @deadline_ns: absolute time of the deadline
+ * @pad: must be zero
+ * @fence_fd: the sw_sync fence fd (in)
+ *
+ * Return the earliest deadline set on the fence. The timebase for the
+ * deadline is CLOCK_MONOTONIC (same as vblank). If there is no deadline
+ * set on the fence, this ioctl will return -ENOENT.
+ */
+struct sw_sync_get_deadline {
+ __u64 deadline_ns;
+ __u32 pad;
+ __s32 fence_fd;
+};
+
#define SW_SYNC_IOC_MAGIC 'W'
#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
struct sw_sync_create_fence_data)
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+#define SW_SYNC_GET_DEADLINE _IOWR(SW_SYNC_IOC_MAGIC, 2, \
+ struct sw_sync_get_deadline)
+
+
+#define SW_SYNC_HAS_DEADLINE_BIT DMA_FENCE_FLAG_USER_BITS
static const struct dma_fence_ops timeline_fence_ops;
@@ -171,6 +192,22 @@ static void timeline_fence_timeline_value_str(struct dma_fence *fence,
snprintf(str, size, "%d", parent->value);
}
+static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
+{
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ unsigned long flags;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
+ if (ktime_before(deadline, pt->deadline))
+ pt->deadline = deadline;
+ } else {
+ pt->deadline = deadline;
+ __set_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags);
+ }
+ spin_unlock_irqrestore(fence->lock, flags);
+}
+
static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
@@ -179,6 +216,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
+ .set_deadline = timeline_fence_set_deadline,
};
/**
@@ -387,6 +425,47 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
return 0;
}
+static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long arg)
+{
+ struct sw_sync_get_deadline data;
+ struct dma_fence *fence;
+ unsigned long flags;
+ struct sync_pt *pt;
+ int ret = 0;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+
+ if (data.deadline_ns || data.pad)
+ return -EINVAL;
+
+ fence = sync_file_get_fence(data.fence_fd);
+ if (!fence)
+ return -EINVAL;
+
+ pt = dma_fence_to_sync_pt(fence);
+ if (!pt)
+ return -EINVAL;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
+ data.deadline_ns = ktime_to_ns(pt->deadline);
+ } else {
+ ret = -ENOENT;
+ }
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ dma_fence_put(fence);
+
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long sw_sync_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -399,6 +478,9 @@ static long sw_sync_ioctl(struct file *file, unsigned int cmd,
case SW_SYNC_IOC_INC:
return sw_sync_ioctl_inc(obj, arg);
+ case SW_SYNC_GET_DEADLINE:
+ return sw_sync_ioctl_get_deadline(obj, arg);
+
default:
return -ENOTTY;
}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index 6176e52ba2d7..a1bdd62efccd 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -55,11 +55,13 @@ static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
* @base: base fence object
* @link: link on the sync timeline's list
* @node: node in the sync timeline's tree
+ * @deadline: the earliest fence deadline hint
*/
struct sync_pt {
struct dma_fence base;
struct list_head link;
struct rb_node node;
+ ktime_t deadline;
};
extern const struct file_operations sw_sync_debugfs_fops;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 2e9a316c596a..d9b1c1b2a72b 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -347,6 +347,22 @@ out:
return ret;
}
+static int sync_file_ioctl_set_deadline(struct sync_file *sync_file,
+ unsigned long arg)
+{
+ struct sync_set_deadline ts;
+
+ if (copy_from_user(&ts, (void __user *)arg, sizeof(ts)))
+ return -EFAULT;
+
+ if (ts.pad)
+ return -EINVAL;
+
+ dma_fence_set_deadline(sync_file->fence, ns_to_ktime(ts.deadline_ns));
+
+ return 0;
+}
+
static long sync_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -359,6 +375,9 @@ static long sync_file_ioctl(struct file *file, unsigned int cmd,
case SYNC_IOC_FILE_INFO:
return sync_file_ioctl_fence_info(sync_file, arg);
+ case SYNC_IOC_SET_DEADLINE:
+ return sync_file_ioctl_set_deadline(sync_file, arg);
+
default:
return -ENOTTY;
}
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index a6dc3997bf5c..442a0ebeb953 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -1093,9 +1093,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_PIN_ID_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(msg);
return -EMSGSIZE;
-
+ }
pin = dpll_pin_find_from_nlattr(info);
if (!IS_ERR(pin)) {
ret = dpll_msg_add_pin_handle(msg, pin);
@@ -1123,8 +1124,10 @@ int dpll_nl_pin_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_PIN_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(msg);
return -EMSGSIZE;
+ }
ret = dpll_cmd_pin_get_one(msg, pin, info->extack);
if (ret) {
nlmsg_free(msg);
@@ -1256,8 +1259,10 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_DEVICE_ID_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(msg);
return -EMSGSIZE;
+ }
dpll = dpll_device_find_from_nlattr(info);
if (!IS_ERR(dpll)) {
@@ -1284,8 +1289,10 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_DEVICE_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(msg);
return -EMSGSIZE;
+ }
ret = dpll_device_get_one(dpll, msg, info->extack);
if (ret) {
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 74d00b0c83fe..4a98a859d44d 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -131,7 +131,7 @@ config RASPBERRYPI_FIRMWARE
config FW_CFG_SYSFS
tristate "QEMU fw_cfg device support in sysfs"
- depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86)
+ depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || RISCV || SPARC || X86)
depends on HAS_IOPORT_MAP
default n
help
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index a69399a6b7c0..1448f61173b3 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void)
/* arch-specific ctrl & data register offsets are not available in ACPI, DT */
#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF))
-# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
+# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV))
# define FW_CFG_CTRL_OFF 0x08
# define FW_CFG_DATA_OFF 0x00
# define FW_CFG_DMA_OFF 0x10
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 3eee8636f847..31cfe2c2a2af 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -74,16 +74,17 @@ config DRM_KUNIT_TEST_HELPERS
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT
- select PRIME_NUMBERS
+ depends on DRM && KUNIT && MMU
+ select DRM_BUDDY
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_LIB_RANDOM
- select DRM_KMS_HELPER
- select DRM_BUDDY
+ select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS
- select DRM_EXEC
+ select DRM_LIB_RANDOM
+ select PRIME_NUMBERS
default KUNIT_ALL_TESTS
help
This builds unit tests for DRM. This option is not useful for
@@ -394,6 +395,8 @@ source "drivers/gpu/drm/solomon/Kconfig"
source "drivers/gpu/drm/sprd/Kconfig"
+source "drivers/gpu/drm/imagination/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
depends on DRM && PCI && MMU && HYPERV
@@ -407,27 +410,6 @@ config DRM_HYPERV
If M is selected the module will be called hyperv_drm.
-# Keep legacy drivers last
-
-menuconfig DRM_LEGACY
- bool "Enable legacy drivers (DANGEROUS)"
- depends on DRM && MMU
- help
- Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
- APIs to user-space, which can be used to circumvent access
- restrictions and other security measures. For backwards compatibility
- those drivers are still available, but their use is highly
- inadvisable and might harm your system.
-
- You are recommended to use the safe modeset-only drivers instead, and
- perform 3D emulation in user-space.
-
- Unless you have strong reasons to go rogue, say "N".
-
-if DRM_LEGACY
-# leave here to list legacy drivers
-endif # DRM_LEGACY
-
config DRM_EXPORT_FOR_TESTS
bool
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 8e1bde059170..8ac6f4b9546e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -22,6 +22,7 @@ drm-y := \
drm_drv.o \
drm_dumb_buffers.o \
drm_edid.o \
+ drm_eld.o \
drm_encoder.o \
drm_file.o \
drm_fourcc.o \
@@ -46,18 +47,6 @@ drm-y := \
drm_vblank_work.o \
drm_vma_manager.o \
drm_writeback.o
-drm-$(CONFIG_DRM_LEGACY) += \
- drm_agpsupport.o \
- drm_bufs.o \
- drm_context.o \
- drm_dma.o \
- drm_hashtab.o \
- drm_irq.o \
- drm_legacy_misc.o \
- drm_lock.o \
- drm_memory.o \
- drm_scatter.o \
- drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
@@ -198,3 +187,4 @@ obj-$(CONFIG_DRM_HYPERV) += hyperv/
obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
+obj-$(CONFIG_DRM_POWERVR) += imagination/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index afec09930efa..9d92ca157677 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -248,6 +248,7 @@ extern int amdgpu_umsch_mm;
extern int amdgpu_seamless;
extern int amdgpu_user_partt_mode;
+extern int amdgpu_agp;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 625db444df1c..10d56979fe3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- if (!(ring && ring->sched.thread))
+ if (!(ring && drm_sched_wqueue_ready(&ring->sched)))
continue;
/* stop secheduler and drain ring. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index df3ecfa9e13f..e50be6500030 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -207,7 +207,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
for (i = 0; i < p->nchunks; i++) {
- struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
+ struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e2ae9ba147ba..5cb33ac99f70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
- return DRM_SCHED_PRIORITY_MIN;
+ return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_LOW:
- return DRM_SCHED_PRIORITY_MIN;
+ return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a53f436fa9f1..c1efa13bccbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1665,9 +1665,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue;
- kthread_park(ring->sched.thread);
+ drm_sched_wqueue_stop(&ring->sched);
}
seq_puts(m, "run ib test:\n");
@@ -1681,9 +1681,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue;
- kthread_unpark(ring->sched.thread);
+ drm_sched_wqueue_start(&ring->sched);
}
up_write(&adev->reset_domain->sem);
@@ -1903,7 +1903,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
ring = adev->rings[val];
- if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
+ if (!ring || !ring->funcs->preempt_ib ||
+ !drm_sched_wqueue_ready(&ring->sched))
return -EINVAL;
/* the last preemption failed */
@@ -1921,7 +1922,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
goto pro_end;
/* stop the scheduler */
- kthread_park(ring->sched.thread);
+ drm_sched_wqueue_stop(&ring->sched);
/* preempt the IB */
r = amdgpu_ring_preempt_ib(ring);
@@ -1955,7 +1956,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
failure:
/* restart the scheduler */
- kthread_unpark(ring->sched.thread);
+ drm_sched_wqueue_start(&ring->sched);
up_read(&adev->reset_domain->sem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7eeaf0aa7f81..2a6684a38714 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2573,7 +2573,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
break;
}
- r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
+ r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
ring->num_hw_submission, 0,
timeout, adev->reset_domain->wq,
@@ -4964,7 +4964,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue;
spin_lock(&ring->sched.job_list_lock);
@@ -5103,7 +5103,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue;
/* Clear job fence from fence drv to avoid force_completion
@@ -5592,7 +5592,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
@@ -5668,7 +5668,7 @@ skip_hw_reset:
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue;
drm_sched_start(&ring->sched, true);
@@ -5991,7 +5991,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue;
drm_sched_stop(&ring->sched, NULL);
@@ -6119,7 +6119,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue;
drm_sched_start(&ring->sched, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 3095a3a864af..8f24cabe2155 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -207,6 +207,7 @@ int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE;
int amdgpu_umsch_mm;
int amdgpu_seamless = -1; /* auto */
uint amdgpu_debug_mask;
+int amdgpu_agp = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -961,6 +962,15 @@ module_param_named(seamless, amdgpu_seamless, int, 0444);
MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
+/**
+ * DOC: agp (int)
+ * Enable the AGP aperture. This provides an aperture in the GPU's internal
+ * address space for direct access to system memory. Note that these accesses
+ * are non-snooped, so they are only used for access to uncached memory.
+ */
+MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)");
+module_param_named(agp, amdgpu_agp, int, 0444);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 1f357198533f..71a5cf37b472 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -115,7 +115,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
- return drm_sched_job_init(&(*job)->base, entity, owner);
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner);
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
@@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i;
/* Signal all jobs not yet scheduled */
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 32b701cc0376..a21045d018f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1473,6 +1473,11 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
topology->nodes[i].num_links : node_num_links;
}
+ /* popluate the connected port num info if supported and available */
+ if (ta_port_num_support && topology->nodes[i].num_links) {
+ memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
+ sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
+ }
/* reflect the topology information for bi-directionality */
if (requires_reflection && topology->nodes[i].num_hops)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 5d36ad3f48c7..c4d9cbde55b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -150,6 +150,7 @@ struct psp_xgmi_node_info {
uint8_t is_sharing_enabled;
enum ta_xgmi_assigned_sdma_engine sdma_engine;
uint8_t num_links;
+ struct xgmi_connected_port_num port_num[TA_XGMI__MAX_PORT_NUM];
};
struct psp_xgmi_topology_info {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 84e5987b14e0..a3dc68e98910 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1188,7 +1188,7 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
}
if (block_obj->hw_ops->query_ras_error_count)
- block_obj->hw_ops->query_ras_error_count(adev, &err_data);
+ block_obj->hw_ops->query_ras_error_count(adev, err_data);
if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
(info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 65949cc7abb9..07d930339b07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -398,6 +398,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
* amdgpu_uvd_entity_init - init entity
*
* @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring pointer to check
*
* Initialize the entity used for handle management in the kernel driver.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0954447f689d..59acf424a078 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -230,6 +230,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
* amdgpu_vce_entity_init - init entity
*
* @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring pointer to check
*
* Initialize the entity used for handle management in the kernel driver.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 0ec7b061d7c2..a5a05c16c10d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -675,7 +675,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 6dce9b29f675..23d7b548d13f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -640,8 +640,9 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH);
- if (!amdgpu_sriov_vf(adev) ||
- (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)))
+ if (!amdgpu_sriov_vf(adev) &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) &&
+ (amdgpu_agp == 1))
amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index bde25eb4ed8e..2ac5820e9c92 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1630,7 +1630,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
} else {
amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
amdgpu_gmc_agp_location(adev, mc);
}
/* base offset of vram pages */
@@ -2170,8 +2170,6 @@ static int gmc_v9_0_sw_fini(void *handle)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
amdgpu_gmc_sysfs_fini(adev);
- adev->gmc.num_mem_partitions = 0;
- kfree(adev->gmc.mem_partitions);
amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
@@ -2185,6 +2183,9 @@ static int gmc_v9_0_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
amdgpu_bo_fini(adev);
+ adev->gmc.num_mem_partitions = 0;
+ kfree(adev->gmc.mem_partitions);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index ea142611be1c..9b0146732e13 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -130,6 +130,9 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
int i;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
inst_mask = adev->aid_mask;
for_each_inst(i, inst_mask) {
/* Program the AGP BAR */
@@ -139,9 +142,6 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
adev->gmc.agp_end >> 24);
- if (amdgpu_sriov_vf(adev))
- return;
-
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6f99f6754c11..b8c3a9b104a4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -85,12 +85,13 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <acpi/video.h>
@@ -2079,7 +2080,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
struct dmub_srv_create_params create_params;
struct dmub_srv_region_params region_params;
struct dmub_srv_region_info region_info;
- struct dmub_srv_fb_params fb_params;
+ struct dmub_srv_memory_params memory_params;
struct dmub_srv_fb_info *fb_info;
struct dmub_srv *dmub_srv;
const struct dmcub_firmware_header_v1_0 *hdr;
@@ -2182,6 +2183,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
PSP_HEADER_BYTES;
+ region_params.is_mailbox_in_inbox = false;
status = dmub_srv_calc_region_info(dmub_srv, &region_params,
&region_info);
@@ -2205,10 +2207,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return r;
/* Rebase the regions on the framebuffer address. */
- memset(&fb_params, 0, sizeof(fb_params));
- fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
- fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
- fb_params.region_info = &region_info;
+ memset(&memory_params, 0, sizeof(memory_params));
+ memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
+ memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
+ memory_params.region_info = &region_info;
adev->dm.dmub_fb_info =
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
@@ -2220,7 +2222,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return -ENOMEM;
}
- status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
+ status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
if (status != DMUB_STATUS_OK) {
DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
return -EINVAL;
@@ -6908,8 +6910,8 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- if (!mst_state->pbn_div)
- mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
+ if (!mst_state->pbn_div.full)
+ mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -6921,7 +6923,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
max_bpc);
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
clock = adjusted_mode->clock;
- dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
}
dm_new_connector_state->vcpi_slots =
@@ -7481,6 +7483,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
int i;
int result = -EIO;
+ if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
+ return result;
+
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
if (!cmd.payloads)
@@ -9603,14 +9608,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
struct drm_plane *other;
struct drm_plane_state *old_other_state, *new_other_state;
struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
int i;
/*
- * TODO: Remove this hack once the checks below are sufficient
- * enough to determine when we need to reset all the planes on
- * the stream.
+ * TODO: Remove this hack for all asics once it proves that the
+ * fast updates works fine on DCN3.2+.
*/
- if (state->allow_modeset)
+ if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
return true;
/* Exit early if we know that we're adding or removing the plane. */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index ed784cf27d39..eb6121ad92fd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -31,6 +31,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include "dm_services.h"
#include "amdgpu.h"
@@ -210,7 +211,7 @@ static void dm_helpers_construct_old_payload(
struct drm_dp_mst_atomic_payload *old_payload)
{
struct drm_dp_mst_atomic_payload *pos;
- int pbn_per_slot = mst_state->pbn_div;
+ int pbn_per_slot = dfixed_trunc(mst_state->pbn_div);
u8 next_payload_vc_start = mgr->next_start_slot;
u8 payload_vc_start = new_payload->vc_start_slot;
u8 allocated_time_slots;
@@ -536,11 +537,8 @@ bool dm_helpers_dp_read_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
- if (!aconnector) {
- drm_dbg_dp(aconnector->base.dev,
- "Failed to find connector for link!\n");
+ if (!aconnector)
return false;
- }
return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data,
size) == size;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index d3b13d362eda..3608d520b227 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -27,6 +27,7 @@
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fixed.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
@@ -941,10 +942,10 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
link_timeslots_used = 0;
for (i = 0; i < count; i++)
- link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div);
+ link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div));
fair_pbn_alloc =
- (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div;
+ (63 - link_timeslots_used) / remaining_to_increase * dfixed_trunc(mst_state->pbn_div);
if (initial_slack[next_index] > fair_pbn_alloc) {
vars[next_index].pbn += fair_pbn_alloc;
@@ -1604,31 +1605,31 @@ enum dc_status dm_dp_mst_is_port_support_mode(
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
unsigned int max_compressed_bw_in_kbps = 0;
struct dc_dsc_bw_range bw_range = {0};
- struct drm_dp_mst_topology_mgr *mst_mgr;
+ uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
/*
- * check if the mode could be supported if DSC pass-through is supported
- * AND check if there enough bandwidth available to support the mode
- * with DSC enabled.
+ * Consider the case with the depth of the mst topology tree is equal or less than 2
+ * A. When dsc bitstream can be transmitted along the entire path
+ * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
+ * 2. dsc passthrough supported at MST branch, or
+ * 3. dsc decoding supported at leaf MST device
+ * Use maximum dsc compression as bw constraint
+ * B. When dsc bitstream cannot be transmitted along the entire path
+ * Use native bw as bw constraint
*/
if (is_dsc_common_config_possible(stream, &bw_range) &&
- aconnector->mst_output_port->passthrough_aux) {
- mst_mgr = aconnector->mst_output_port->mgr;
- mutex_lock(&mst_mgr->lock);
-
+ (aconnector->mst_output_port->passthrough_aux ||
+ aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
cur_link_settings = stream->link->verified_link_cap;
upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
- &cur_link_settings
- );
- down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
+ &cur_link_settings);
+ down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
/* pick the bottleneck */
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
down_link_bw_in_kbps);
- mutex_unlock(&mst_mgr->lock);
-
/*
* use the maximum dsc compression bandwidth as the required
* bandwidth for the mode
@@ -1642,9 +1643,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
} else {
/* check if mode could be supported within full_pbn */
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
- pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
-
- if (pbn > aconnector->mst_output_port->full_pbn)
+ pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
+ if (pbn > full_pbn)
return DC_FAIL_BANDWIDTH_VALIDATE;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 0fa4fcd00de2..507a7cf56711 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -820,22 +820,22 @@ static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
- val |= DMUB_IPS1_ALLOW_MASK;
- val |= DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
- val |= DMUB_IPS1_ALLOW_MASK;
- val = val & ~DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
val |= DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ val = val & ~DMUB_IPS1_ALLOW_MASK;
+ val |= DMUB_IPS2_ALLOW_MASK;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ val = val & ~DMUB_IPS1_ALLOW_MASK;
+ val = val & ~DMUB_IPS2_ALLOW_MASK;
}
if (!allow_idle) {
- val = val & ~DMUB_IPS1_ALLOW_MASK;
- val = val & ~DMUB_IPS2_ALLOW_MASK;
+ val |= DMUB_IPS1_ALLOW_MASK;
+ val |= DMUB_IPS2_ALLOW_MASK;
}
dcn35_smu_write_ips_scratch(clk_mgr, val);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7b9bf5cb4529..76b47f178127 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3178,7 +3178,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
context->streams[i]);
- if (otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
+ if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
resource_build_test_pattern_params(&context->res_ctx, otg_master);
}
}
@@ -4934,8 +4934,8 @@ bool dc_dmub_is_ips_idle_state(struct dc *dc)
if (dc->hwss.get_idle_state)
idle_state = dc->hwss.get_idle_state(dc);
- if ((idle_state & DMUB_IPS1_ALLOW_MASK) ||
- (idle_state & DMUB_IPS2_ALLOW_MASK))
+ if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
+ !(idle_state & DMUB_IPS2_ALLOW_MASK))
return true;
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 1d48278cba96..a1f1d1003992 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -5190,6 +5190,9 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(
sec_next = sec_pipe->next_odm_pipe;
sec_prev = sec_pipe->prev_odm_pipe;
+ if (pri_pipe == NULL)
+ return false;
+
*sec_pipe = *pri_pipe;
sec_pipe->top_pipe = sec_top;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index e4c007203318..0e07699c1e83 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -1202,11 +1202,11 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
allow_state = dc->hwss.get_idle_state(dc);
dc->hwss.set_idle_state(dc, false);
- if (allow_state & DMUB_IPS2_ALLOW_MASK) {
+ if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
// Wait for evaluation time
udelay(dc->debug.ips2_eval_delay_us);
commit_state = dc->hwss.get_idle_state(dc);
- if (commit_state & DMUB_IPS2_COMMIT_MASK) {
+ if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
@@ -1216,7 +1216,7 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
for (i = 0; i < max_num_polls; ++i) {
commit_state = dc->hwss.get_idle_state(dc);
- if (!(commit_state & DMUB_IPS2_COMMIT_MASK))
+ if (commit_state & DMUB_IPS2_COMMIT_MASK)
break;
udelay(1);
@@ -1235,10 +1235,10 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
}
dc_dmub_srv_notify_idle(dc, false);
- if (allow_state & DMUB_IPS1_ALLOW_MASK) {
+ if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
for (i = 0; i < max_num_polls; ++i) {
commit_state = dc->hwss.get_idle_state(dc);
- if (!(commit_state & DMUB_IPS1_COMMIT_MASK))
+ if (commit_state & DMUB_IPS1_COMMIT_MASK)
break;
udelay(1);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index cea666ea66c6..fcb825e4f1bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -177,6 +177,7 @@ struct dc_panel_patch {
unsigned int disable_fams;
unsigned int skip_avmute;
unsigned int mst_start_top_delay;
+ unsigned int remove_sink_ext_caps;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c
index 001f9eb66920..62a8f0b56006 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c
@@ -261,12 +261,6 @@ static void enc35_stream_encoder_enable(
/* invalid mode ! */
ASSERT_CRITICAL(false);
}
-
- REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1);
- REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1);
- } else {
- REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0);
- REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0);
}
}
@@ -436,6 +430,8 @@ static void enc35_disable_fifo(struct stream_encoder *enc)
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
+ REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0);
+ REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0);
}
static void enc35_enable_fifo(struct stream_encoder *enc)
@@ -443,6 +439,8 @@ static void enc35_enable_fifo(struct stream_encoder *enc)
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1);
+ REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1);
enc35_reset_fifo(enc, true);
enc35_reset_fifo(enc, false);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index d6f0f857c05a..f2fe523f914f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -1088,6 +1088,9 @@ static bool detect_link_and_local_sink(struct dc_link *link,
if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
link->ctx->dc->debug.hdmi20_disable = true;
+ if (sink->edid_caps.panel_patch.remove_sink_ext_caps)
+ link->dpcd_sink_ext_caps.raw = 0;
+
if (dc_is_hdmi_signal(link->connector_signal))
read_scdc_caps(link->ddc, link->local_sink);
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 9665ada0f894..df63aa8f01e9 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -195,6 +195,7 @@ struct dmub_srv_region_params {
uint32_t vbios_size;
const uint8_t *fw_inst_const;
const uint8_t *fw_bss_data;
+ bool is_mailbox_in_inbox;
};
/**
@@ -214,20 +215,25 @@ struct dmub_srv_region_params {
*/
struct dmub_srv_region_info {
uint32_t fb_size;
+ uint32_t inbox_size;
uint8_t num_regions;
struct dmub_region regions[DMUB_WINDOW_TOTAL];
};
/**
- * struct dmub_srv_fb_params - parameters used for driver fb setup
+ * struct dmub_srv_memory_params - parameters used for driver fb setup
* @region_info: region info calculated by dmub service
- * @cpu_addr: base cpu address for the framebuffer
- * @gpu_addr: base gpu virtual address for the framebuffer
+ * @cpu_fb_addr: base cpu address for the framebuffer
+ * @cpu_inbox_addr: base cpu address for the gart
+ * @gpu_fb_addr: base gpu virtual address for the framebuffer
+ * @gpu_inbox_addr: base gpu virtual address for the gart
*/
-struct dmub_srv_fb_params {
+struct dmub_srv_memory_params {
const struct dmub_srv_region_info *region_info;
- void *cpu_addr;
- uint64_t gpu_addr;
+ void *cpu_fb_addr;
+ void *cpu_inbox_addr;
+ uint64_t gpu_fb_addr;
+ uint64_t gpu_inbox_addr;
};
/**
@@ -563,8 +569,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
* DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
- const struct dmub_srv_fb_params *params,
+enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
+ const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out);
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index e43e8d4bfe37..22fc4ba96def 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -434,7 +434,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
-
+ uint32_t previous_top = 0;
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
@@ -459,8 +459,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
bios->base = dmub_align(stack->top, 256);
bios->top = bios->base + params->vbios_size;
- mail->base = dmub_align(bios->top, 256);
- mail->top = mail->base + DMUB_MAILBOX_SIZE;
+ if (params->is_mailbox_in_inbox) {
+ mail->base = 0;
+ mail->top = mail->base + DMUB_MAILBOX_SIZE;
+ previous_top = bios->top;
+ } else {
+ mail->base = dmub_align(bios->top, 256);
+ mail->top = mail->base + DMUB_MAILBOX_SIZE;
+ previous_top = mail->top;
+ }
fw_info = dmub_get_fw_meta_info(params);
@@ -479,7 +486,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
dmub->fw_version = fw_info->fw_version;
}
- trace_buff->base = dmub_align(mail->top, 256);
+ trace_buff->base = dmub_align(previous_top, 256);
trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
fw_state->base = dmub_align(trace_buff->top, 256);
@@ -490,11 +497,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
out->fb_size = dmub_align(scratch_mem->top, 4096);
+ if (params->is_mailbox_in_inbox)
+ out->inbox_size = dmub_align(mail->top, 4096);
+
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
- const struct dmub_srv_fb_params *params,
+enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
+ const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out)
{
uint8_t *cpu_base;
@@ -509,8 +519,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
return DMUB_STATUS_INVALID;
- cpu_base = (uint8_t *)params->cpu_addr;
- gpu_base = params->gpu_addr;
+ cpu_base = (uint8_t *)params->cpu_fb_addr;
+ gpu_base = params->gpu_fb_addr;
for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
const struct dmub_region *reg =
@@ -518,6 +528,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
out->fb[i].cpu_addr = cpu_base + reg->base;
out->fb[i].gpu_addr = gpu_base + reg->base;
+
+ if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
+ out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
+ out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
+ }
+
out->fb[i].size = reg->top - reg->base;
}
@@ -707,9 +723,16 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
return DMUB_STATUS_INVALID;
if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
- dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
- dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
+ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
+
+ if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
+ return DMUB_STATUS_HW_FAILURE;
+ } else {
+ dmub->inbox1_rb.rptr = rptr;
+ dmub->inbox1_rb.wrpt = wptr;
+ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
+ }
}
return DMUB_STATUS_OK;
@@ -743,6 +766,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
+ if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
+ dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
+ return DMUB_STATUS_HW_FAILURE;
+ }
+
if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
return DMUB_STATUS_OK;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index dab35d878a90..fef2d290f3f2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x8
+#define SMU_METRICS_TABLE_VERSION 0x9
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -211,6 +211,14 @@ typedef struct __attribute__((packed, aligned(4))) {
//XGMI Data tranfser size
uint64_t XgmiReadDataSizeAcc[8];//in KByte
uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ //PCIE BW Data and error count
+ uint32_t PcieBandwidth[4];
+ uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
} MetricsTable_t;
#define SMU_VF_METRICS_TABLE_VERSION 0x3
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 891605d4975f..0e5a77c3c2e2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -1454,7 +1454,7 @@ static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
static int smu_v13_0_6_notify_unload(struct smu_context *smu)
{
- if (smu->smc_fw_version <= 0x553500)
+ if (amdgpu_in_reset(smu->adev))
return 0;
dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
@@ -2095,6 +2095,14 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
smu_v13_0_6_get_current_pcie_link_speed(smu);
gpu_metrics->pcie_bandwidth_acc =
SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
+ gpu_metrics->pcie_bandwidth_inst =
+ SMUQ10_ROUND(metrics->PcieBandwidth[0]);
+ gpu_metrics->pcie_l0_to_recov_count_acc =
+ metrics->PCIeL0ToRecoveryCountAcc;
+ gpu_metrics->pcie_replay_count_acc =
+ metrics->PCIenReplayAAcc;
+ gpu_metrics->pcie_replay_rover_count_acc =
+ metrics->PCIenReplayARolloverCountAcc;
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 15dd667aa2e7..c78687c755a8 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -7,8 +7,9 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -1012,26 +1013,17 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data)
int irq = platform_get_irq(pdev, 0);
const struct armada_variant *variant;
struct device_node *port = NULL;
+ struct device_node *np, *parent = dev->of_node;
if (irq < 0)
return irq;
- if (!dev->of_node) {
- const struct platform_device_id *id;
- id = platform_get_device_id(pdev);
- if (!id)
- return -ENXIO;
-
- variant = (const struct armada_variant *)id->driver_data;
- } else {
- const struct of_device_id *match;
- struct device_node *np, *parent = dev->of_node;
-
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match)
- return -ENXIO;
+ variant = device_get_match_data(dev);
+ if (!variant)
+ return -ENXIO;
+ if (parent) {
np = of_get_child_by_name(parent, "ports");
if (np)
parent = np;
@@ -1041,8 +1033,6 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data)
dev_err(dev, "no port node found in %pOF\n", parent);
return -ENXIO;
}
-
- variant = match->data;
}
return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
@@ -1066,10 +1056,9 @@ static int armada_lcd_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &armada_lcd_ops);
}
-static int armada_lcd_remove(struct platform_device *pdev)
+static void armada_lcd_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &armada_lcd_ops);
- return 0;
}
static const struct of_device_id armada_lcd_of_match[] = {
@@ -1095,7 +1084,7 @@ MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
struct platform_driver armada_lcd_platform_driver = {
.probe = armada_lcd_probe,
- .remove = armada_lcd_remove,
+ .remove_new = armada_lcd_remove,
.driver = {
.name = "armada-lcd",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index fa1c67598706..e51ecc4f7ef4 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -226,10 +226,9 @@ static int armada_drm_probe(struct platform_device *pdev)
match);
}
-static int armada_drm_remove(struct platform_device *pdev)
+static void armada_drm_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &armada_master_ops);
- return 0;
}
static void armada_drm_shutdown(struct platform_device *pdev)
@@ -249,7 +248,7 @@ MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
static struct platform_driver armada_drm_platform_driver = {
.probe = armada_drm_probe,
- .remove = armada_drm_remove,
+ .remove_new = armada_drm_remove,
.shutdown = armada_drm_shutdown,
.driver = {
.name = "armada-drm",
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 78122b35a0cb..a7a6b70220eb 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -6,10 +6,10 @@
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -143,7 +143,6 @@ static int aspeed_gfx_load(struct drm_device *drm)
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct device_node *np = pdev->dev.of_node;
const struct aspeed_gfx_config *config;
- const struct of_device_id *match;
struct resource *res;
int ret;
@@ -152,10 +151,9 @@ static int aspeed_gfx_load(struct drm_device *drm)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- match = of_match_device(aspeed_gfx_match, &pdev->dev);
- if (!match)
+ config = device_get_match_data(&pdev->dev);
+ if (!config)
return -EINVAL;
- config = match->data;
priv->dac_reg = config->dac_reg;
priv->int_clr_reg = config->int_clear_reg;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index cf5b754f044c..90bcb1eb9cd9 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -89,11 +89,194 @@ static const struct pci_device_id ast_pciidlist[] = {
MODULE_DEVICE_TABLE(pci, ast_pciidlist);
+static bool ast_is_vga_enabled(void __iomem *ioregs)
+{
+ u8 vgaer = __ast_read8(ioregs, AST_IO_VGAER);
+
+ return vgaer & AST_IO_VGAER_VGA_ENABLE;
+}
+
+static void ast_enable_vga(void __iomem *ioregs)
+{
+ __ast_write8(ioregs, AST_IO_VGAER, AST_IO_VGAER_VGA_ENABLE);
+ __ast_write8(ioregs, AST_IO_VGAMR_W, AST_IO_VGAMR_IOSEL);
+}
+
+/*
+ * Run this function as part of the HW device cleanup; not
+ * when the DRM device gets released.
+ */
+static void ast_enable_mmio_release(void *data)
+{
+ void __iomem *ioregs = (void __force __iomem *)data;
+
+ /* enable standard VGA decode */
+ __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, AST_IO_VGACRA1_MMIO_ENABLED);
+}
+
+static int ast_enable_mmio(struct device *dev, void __iomem *ioregs)
+{
+ void *data = (void __force *)ioregs;
+
+ __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1,
+ AST_IO_VGACRA1_MMIO_ENABLED |
+ AST_IO_VGACRA1_VGAIO_DISABLED);
+
+ return devm_add_action_or_reset(dev, ast_enable_mmio_release, data);
+}
+
+static void ast_open_key(void __iomem *ioregs)
+{
+ __ast_write8_i(ioregs, AST_IO_VGACRI, 0x80, AST_IO_VGACR80_PASSWORD);
+}
+
+static int ast_detect_chip(struct pci_dev *pdev,
+ void __iomem *regs, void __iomem *ioregs,
+ enum ast_chip *chip_out,
+ enum ast_config_mode *config_mode_out)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ enum ast_config_mode config_mode = ast_use_defaults;
+ uint32_t scu_rev = 0xffffffff;
+ enum ast_chip chip;
+ u32 data;
+ u8 vgacrd0, vgacrd1;
+
+ /*
+ * Find configuration mode and read SCU revision
+ */
+
+ /* Check if we have device-tree properties */
+ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
+ /* We do, disable P2A access */
+ config_mode = ast_use_dt;
+ scu_rev = data;
+ } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
+ /*
+ * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
+ * is disabled. We force using P2A if VGA only mode bit
+ * is set D[7]
+ */
+ vgacrd0 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd0);
+ vgacrd1 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd1);
+ if (!(vgacrd0 & 0x80) || !(vgacrd1 & 0x10)) {
+
+ /*
+ * We have a P2A bridge and it is enabled.
+ */
+
+ /* Patch AST2500/AST2510 */
+ if ((pdev->revision & 0xf0) == 0x40) {
+ if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK))
+ ast_patch_ahb_2500(regs);
+ }
+
+ /* Double check that it's actually working */
+ data = __ast_read32(regs, 0xf004);
+ if ((data != 0xffffffff) && (data != 0x00)) {
+ config_mode = ast_use_p2a;
+
+ /* Read SCU7c (silicon revision register) */
+ __ast_write32(regs, 0xf004, 0x1e6e0000);
+ __ast_write32(regs, 0xf000, 0x1);
+ scu_rev = __ast_read32(regs, 0x1207c);
+ }
+ }
+ }
+
+ switch (config_mode) {
+ case ast_use_defaults:
+ dev_info(dev, "Using default configuration\n");
+ break;
+ case ast_use_dt:
+ dev_info(dev, "Using device-tree for configuration\n");
+ break;
+ case ast_use_p2a:
+ dev_info(dev, "Using P2A bridge for configuration\n");
+ break;
+ }
+
+ /*
+ * Identify chipset
+ */
+
+ if (pdev->revision >= 0x50) {
+ chip = AST2600;
+ dev_info(dev, "AST 2600 detected\n");
+ } else if (pdev->revision >= 0x40) {
+ switch (scu_rev & 0x300) {
+ case 0x0100:
+ chip = AST2510;
+ dev_info(dev, "AST 2510 detected\n");
+ break;
+ default:
+ chip = AST2500;
+ dev_info(dev, "AST 2500 detected\n");
+ break;
+ }
+ } else if (pdev->revision >= 0x30) {
+ switch (scu_rev & 0x300) {
+ case 0x0100:
+ chip = AST1400;
+ dev_info(dev, "AST 1400 detected\n");
+ break;
+ default:
+ chip = AST2400;
+ dev_info(dev, "AST 2400 detected\n");
+ break;
+ }
+ } else if (pdev->revision >= 0x20) {
+ switch (scu_rev & 0x300) {
+ case 0x0000:
+ chip = AST1300;
+ dev_info(dev, "AST 1300 detected\n");
+ break;
+ default:
+ chip = AST2300;
+ dev_info(dev, "AST 2300 detected\n");
+ break;
+ }
+ } else if (pdev->revision >= 0x10) {
+ switch (scu_rev & 0x0300) {
+ case 0x0200:
+ chip = AST1100;
+ dev_info(dev, "AST 1100 detected\n");
+ break;
+ case 0x0100:
+ chip = AST2200;
+ dev_info(dev, "AST 2200 detected\n");
+ break;
+ case 0x0000:
+ chip = AST2150;
+ dev_info(dev, "AST 2150 detected\n");
+ break;
+ default:
+ chip = AST2100;
+ dev_info(dev, "AST 2100 detected\n");
+ break;
+ }
+ } else {
+ chip = AST2000;
+ dev_info(dev, "AST 2000 detected\n");
+ }
+
+ *chip_out = chip;
+ *config_mode_out = config_mode;
+
+ return 0;
+}
+
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ast_device *ast;
- struct drm_device *dev;
+ struct device *dev = &pdev->dev;
int ret;
+ void __iomem *regs;
+ void __iomem *ioregs;
+ enum ast_config_mode config_mode;
+ enum ast_chip chip;
+ struct drm_device *drm;
+ bool need_post = false;
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver);
if (ret)
@@ -103,16 +286,80 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- ast = ast_device_create(&ast_driver, pdev, ent->driver_data);
- if (IS_ERR(ast))
- return PTR_ERR(ast);
- dev = &ast->base;
+ regs = pcim_iomap(pdev, 1, 0);
+ if (!regs)
+ return -EIO;
+
+ if (pdev->revision >= 0x40) {
+ /*
+ * On AST2500 and later models, MMIO is enabled by
+ * default. Adopt it to be compatible with ARM.
+ */
+ resource_size_t len = pci_resource_len(pdev, 1);
+
+ if (len < AST_IO_MM_OFFSET)
+ return -EIO;
+ if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH)
+ return -EIO;
+ ioregs = regs + AST_IO_MM_OFFSET;
+ } else if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) {
+ /*
+ * Map I/O registers if we have a PCI BAR for I/O.
+ */
+ resource_size_t len = pci_resource_len(pdev, 2);
+
+ if (len < AST_IO_MM_LENGTH)
+ return -EIO;
+ ioregs = pcim_iomap(pdev, 2, 0);
+ if (!ioregs)
+ return -EIO;
+ } else {
+ /*
+ * Anything else is best effort.
+ */
+ resource_size_t len = pci_resource_len(pdev, 1);
+
+ if (len < AST_IO_MM_OFFSET)
+ return -EIO;
+ if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH)
+ return -EIO;
+ ioregs = regs + AST_IO_MM_OFFSET;
+
+ dev_info(dev, "Platform has no I/O space, using MMIO\n");
+ }
+
+ if (!ast_is_vga_enabled(ioregs)) {
+ dev_info(dev, "VGA not enabled on entry, requesting chip POST\n");
+ need_post = true;
+ }
+
+ /*
+ * If VGA isn't enabled, we need to enable now or subsequent
+ * access to the scratch registers will fail.
+ */
+ if (need_post)
+ ast_enable_vga(ioregs);
+ /* Enable extended register access */
+ ast_open_key(ioregs);
+
+ ret = ast_enable_mmio(dev, ioregs);
+ if (ret)
+ return ret;
+
+ ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode);
+ if (ret)
+ return ret;
+
+ drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+ pci_set_drvdata(pdev, drm);
- ret = drm_dev_register(dev, ent->driver_data);
+ ret = drm_dev_register(drm, ent->driver_data);
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 32);
+ drm_fbdev_generic_setup(drm, 32);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 2aee32344f4a..3be5ccf1f5f4 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,6 +98,12 @@ enum ast_tx_chip {
#define AST_TX_DP501_BIT BIT(AST_TX_DP501)
#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP)
+enum ast_config_mode {
+ ast_use_p2a,
+ ast_use_dt,
+ ast_use_defaults
+};
+
#define AST_DRAM_512Mx16 0
#define AST_DRAM_1Gx16 1
#define AST_DRAM_512Mx32 2
@@ -174,6 +180,17 @@ to_ast_sil164_connector(struct drm_connector *connector)
return container_of(connector, struct ast_sil164_connector, base);
}
+struct ast_bmc_connector {
+ struct drm_connector base;
+ struct drm_connector *physical_connector;
+};
+
+static inline struct ast_bmc_connector *
+to_ast_bmc_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct ast_bmc_connector, base);
+}
+
/*
* Device
*/
@@ -181,12 +198,13 @@ to_ast_sil164_connector(struct drm_connector *connector)
struct ast_device {
struct drm_device base;
- struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */
void __iomem *regs;
void __iomem *ioregs;
void __iomem *dp501_fw_buf;
+ enum ast_config_mode config_mode;
enum ast_chip chip;
+
uint32_t dram_bus_width;
uint32_t dram_type;
uint32_t mclk;
@@ -196,6 +214,8 @@ struct ast_device {
unsigned long vram_size;
unsigned long vram_fb_available;
+ struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
+
struct ast_plane primary_plane;
struct ast_plane cursor_plane;
struct drm_crtc crtc;
@@ -218,16 +238,11 @@ struct ast_device {
} astdp;
struct {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct ast_bmc_connector bmc_connector;
} bmc;
} output;
bool support_wide_screen;
- enum {
- ast_use_p2a,
- ast_use_dt,
- ast_use_defaults
- } config_mode;
unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */
u8 *dp501_fw_addr;
@@ -239,9 +254,13 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev)
return container_of(dev, struct ast_device, base);
}
-struct ast_device *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags);
+struct drm_device *ast_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
static inline unsigned long __ast_gen(struct ast_device *ast)
{
@@ -261,55 +280,94 @@ static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen)
#define IS_AST_GEN6(__ast) __ast_gen_is_eq(__ast, 6)
#define IS_AST_GEN7(__ast) __ast_gen_is_eq(__ast, 7)
+static inline u8 __ast_read8(const void __iomem *addr, u32 reg)
+{
+ return ioread8(addr + reg);
+}
+
+static inline u32 __ast_read32(const void __iomem *addr, u32 reg)
+{
+ return ioread32(addr + reg);
+}
+
+static inline void __ast_write8(void __iomem *addr, u32 reg, u8 val)
+{
+ iowrite8(val, addr + reg);
+}
+
+static inline void __ast_write32(void __iomem *addr, u32 reg, u32 val)
+{
+ iowrite32(val, addr + reg);
+}
+
+static inline u8 __ast_read8_i(void __iomem *addr, u32 reg, u8 index)
+{
+ __ast_write8(addr, reg, index);
+ return __ast_read8(addr, reg + 1);
+}
+
+static inline u8 __ast_read8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask)
+{
+ u8 val = __ast_read8_i(addr, reg, index);
+
+ return val & read_mask;
+}
+
+static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
+{
+ __ast_write8(addr, reg, index);
+ __ast_write8(addr, reg + 1, val);
+}
+
+static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
+ u8 val)
+{
+ u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
+
+ tmp |= val;
+ __ast_write8_i(addr, reg, index, tmp);
+}
+
static inline u32 ast_read32(struct ast_device *ast, u32 reg)
{
- return ioread32(ast->regs + reg);
+ return __ast_read32(ast->regs, reg);
}
static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val)
{
- iowrite32(val, ast->regs + reg);
+ __ast_write32(ast->regs, reg, val);
}
static inline u8 ast_io_read8(struct ast_device *ast, u32 reg)
{
- return ioread8(ast->ioregs + reg);
+ return __ast_read8(ast->ioregs, reg);
}
static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val)
{
- iowrite8(val, ast->ioregs + reg);
+ __ast_write8(ast->ioregs, reg, val);
}
static inline u8 ast_get_index_reg(struct ast_device *ast, u32 base, u8 index)
{
- ast_io_write8(ast, base, index);
- ++base;
- return ast_io_read8(ast, base);
+ return __ast_read8_i(ast->ioregs, base, index);
}
static inline u8 ast_get_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
u8 preserve_mask)
{
- u8 val = ast_get_index_reg(ast, base, index);
-
- return val & preserve_mask;
+ return __ast_read8_i_masked(ast->ioregs, base, index, preserve_mask);
}
static inline void ast_set_index_reg(struct ast_device *ast, u32 base, u8 index, u8 val)
{
- ast_io_write8(ast, base, index);
- ++base;
- ast_io_write8(ast, base, val);
+ __ast_write8_i(ast->ioregs, base, index, val);
}
static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
u8 preserve_mask, u8 val)
{
- u8 tmp = ast_get_index_reg_mask(ast, base, index, preserve_mask);
-
- tmp |= val;
- ast_set_index_reg(ast, base, index, tmp);
+ __ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
}
#define AST_VIDMEM_SIZE_8M 0x00800000
@@ -431,7 +489,7 @@ int ast_mm_init(struct ast_device *ast);
void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(struct ast_device *ast);
+void ast_patch_ahb_2500(void __iomem *regs);
/* ast dp501 */
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f4ab40e22cea..2f3ad5f949fc 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -35,180 +35,6 @@
#include "ast_drv.h"
-static bool ast_is_vga_enabled(struct drm_device *dev)
-{
- struct ast_device *ast = to_ast_device(dev);
- u8 ch;
-
- ch = ast_io_read8(ast, AST_IO_VGAER);
-
- return !!(ch & 0x01);
-}
-
-static void ast_enable_vga(struct drm_device *dev)
-{
- struct ast_device *ast = to_ast_device(dev);
-
- ast_io_write8(ast, AST_IO_VGAER, 0x01);
- ast_io_write8(ast, AST_IO_VGAMR_W, 0x01);
-}
-
-/*
- * Run this function as part of the HW device cleanup; not
- * when the DRM device gets released.
- */
-static void ast_enable_mmio_release(void *data)
-{
- struct ast_device *ast = data;
-
- /* enable standard VGA decode */
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x04);
-}
-
-static int ast_enable_mmio(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
-
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06);
-
- return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast);
-}
-
-static void ast_open_key(struct ast_device *ast)
-{
- ast_set_index_reg(ast, AST_IO_VGACRI, 0x80, 0xA8);
-}
-
-static int ast_device_config_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct device_node *np = dev->dev->of_node;
- uint32_t scu_rev = 0xffffffff;
- u32 data;
- u8 jregd0, jregd1;
-
- /*
- * Find configuration mode and read SCU revision
- */
-
- ast->config_mode = ast_use_defaults;
-
- /* Check if we have device-tree properties */
- if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
- /* We do, disable P2A access */
- ast->config_mode = ast_use_dt;
- scu_rev = data;
- } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
- /*
- * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
- * is disabled. We force using P2A if VGA only mode bit
- * is set D[7]
- */
- jregd0 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- jregd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
- if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
-
- /*
- * We have a P2A bridge and it is enabled.
- */
-
- /* Patch AST2500/AST2510 */
- if ((pdev->revision & 0xf0) == 0x40) {
- if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK))
- ast_patch_ahb_2500(ast);
- }
-
- /* Double check that it's actually working */
- data = ast_read32(ast, 0xf004);
- if ((data != 0xffffffff) && (data != 0x00)) {
- ast->config_mode = ast_use_p2a;
-
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- scu_rev = ast_read32(ast, 0x1207c);
- }
- }
- }
-
- switch (ast->config_mode) {
- case ast_use_defaults:
- drm_info(dev, "Using default configuration\n");
- break;
- case ast_use_dt:
- drm_info(dev, "Using device-tree for configuration\n");
- break;
- case ast_use_p2a:
- drm_info(dev, "Using P2A bridge for configuration\n");
- break;
- }
-
- /*
- * Identify chipset
- */
-
- if (pdev->revision >= 0x50) {
- ast->chip = AST2600;
- drm_info(dev, "AST 2600 detected\n");
- } else if (pdev->revision >= 0x40) {
- switch (scu_rev & 0x300) {
- case 0x0100:
- ast->chip = AST2510;
- drm_info(dev, "AST 2510 detected\n");
- break;
- default:
- ast->chip = AST2500;
- drm_info(dev, "AST 2500 detected\n");
- }
- } else if (pdev->revision >= 0x30) {
- switch (scu_rev & 0x300) {
- case 0x0100:
- ast->chip = AST1400;
- drm_info(dev, "AST 1400 detected\n");
- break;
- default:
- ast->chip = AST2400;
- drm_info(dev, "AST 2400 detected\n");
- }
- } else if (pdev->revision >= 0x20) {
- switch (scu_rev & 0x300) {
- case 0x0000:
- ast->chip = AST1300;
- drm_info(dev, "AST 1300 detected\n");
- break;
- default:
- ast->chip = AST2300;
- drm_info(dev, "AST 2300 detected\n");
- break;
- }
- } else if (pdev->revision >= 0x10) {
- switch (scu_rev & 0x0300) {
- case 0x0200:
- ast->chip = AST1100;
- drm_info(dev, "AST 1100 detected\n");
- break;
- case 0x0100:
- ast->chip = AST2200;
- drm_info(dev, "AST 2200 detected\n");
- break;
- case 0x0000:
- ast->chip = AST2150;
- drm_info(dev, "AST 2150 detected\n");
- break;
- default:
- ast->chip = AST2100;
- drm_info(dev, "AST 2100 detected\n");
- break;
- }
- } else {
- ast->chip = AST2000;
- drm_info(dev, "AST 2000 detected\n");
- }
-
- return 0;
-}
-
static void ast_detect_widescreen(struct ast_device *ast)
{
u8 jreg;
@@ -424,69 +250,27 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-struct ast_device *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags)
+struct drm_device *ast_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
{
struct drm_device *dev;
struct ast_device *ast;
- bool need_post = false;
- int ret = 0;
+ int ret;
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
if (IS_ERR(ast))
- return ast;
+ return ERR_CAST(ast);
dev = &ast->base;
- pci_set_drvdata(pdev, dev);
-
- ret = drmm_mutex_init(dev, &ast->ioregs_lock);
- if (ret)
- return ERR_PTR(ret);
-
- ast->regs = pcim_iomap(pdev, 1, 0);
- if (!ast->regs)
- return ERR_PTR(-EIO);
-
- /*
- * After AST2500, MMIO is enabled by default, and it should be adopted
- * to be compatible with Arm.
- */
- if (pdev->revision >= 0x40) {
- ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
- } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
- drm_info(dev, "platform has no IO space, trying MMIO\n");
- ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
- }
-
- /* "map" IO regs if the above hasn't done so already */
- if (!ast->ioregs) {
- ast->ioregs = pcim_iomap(pdev, 2, 0);
- if (!ast->ioregs)
- return ERR_PTR(-EIO);
- }
-
- if (!ast_is_vga_enabled(dev)) {
- drm_info(dev, "VGA not enabled on entry, requesting chip POST\n");
- need_post = true;
- }
-
- /*
- * If VGA isn't enabled, we need to enable now or subsequent
- * access to the scratch registers will fail.
- */
- if (need_post)
- ast_enable_vga(dev);
-
- /* Enable extended register access */
- ast_open_key(ast);
- ret = ast_enable_mmio(ast);
- if (ret)
- return ERR_PTR(ret);
-
- ret = ast_device_config_init(ast);
- if (ret)
- return ERR_PTR(ret);
+ ast->chip = chip;
+ ast->config_mode = config_mode;
+ ast->regs = regs;
+ ast->ioregs = ioregs;
ast_detect_widescreen(ast);
ast_detect_tx_chip(ast, need_post);
@@ -517,5 +301,5 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
if (ret)
return ERR_PTR(ret);
- return ast;
+ return dev;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index cb9614984285..a718646a66b8 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1358,13 +1358,13 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter);
if (!edid)
goto err_mutex_unlock;
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -1372,7 +1372,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
return count;
err_mutex_unlock:
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
@@ -1464,13 +1464,13 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter);
if (!edid)
goto err_mutex_unlock;
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -1478,7 +1478,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
return count;
err_mutex_unlock:
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
@@ -1670,13 +1670,13 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
succ = ast_astdp_read_edid(connector->dev, edid);
if (succ < 0)
goto err_mutex_unlock;
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
@@ -1685,7 +1685,7 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
return count;
err_mutex_unlock:
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
kfree(edid);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
@@ -1767,6 +1767,30 @@ static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static int ast_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_bmc_connector *bmc_connector = to_ast_bmc_connector(connector);
+ struct drm_connector *physical_connector = bmc_connector->physical_connector;
+
+ /*
+ * Most user-space compositors cannot handle more than one connected
+ * connector per CRTC. Hence, we only mark the BMC as connected if the
+ * physical connector is disconnected. If the physical connector's status
+ * is connected or unknown, the BMC remains disconnected. This has no
+ * effect on the output of the BMC.
+ *
+ * FIXME: Remove this logic once user-space compositors can handle more
+ * than one connector per CRTC. The BMC should always be connected.
+ */
+
+ if (physical_connector && physical_connector->status == connector_status_disconnected)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
{
return drm_add_modes_noedid(connector, 4096, 4096);
@@ -1774,6 +1798,7 @@ static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
.get_modes = ast_bmc_connector_helper_get_modes,
+ .detect_ctx = ast_bmc_connector_helper_detect_ctx,
};
static const struct drm_connector_funcs ast_bmc_connector_funcs = {
@@ -1784,12 +1809,33 @@ static const struct drm_connector_funcs ast_bmc_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ast_bmc_output_init(struct ast_device *ast)
+static int ast_bmc_connector_init(struct drm_device *dev,
+ struct ast_bmc_connector *bmc_connector,
+ struct drm_connector *physical_connector)
+{
+ struct drm_connector *connector = &bmc_connector->base;
+ int ret;
+
+ ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
+
+ bmc_connector->physical_connector = physical_connector;
+
+ return 0;
+}
+
+static int ast_bmc_output_init(struct ast_device *ast,
+ struct drm_connector *physical_connector)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.bmc.encoder;
- struct drm_connector *connector = &ast->output.bmc.connector;
+ struct ast_bmc_connector *bmc_connector = &ast->output.bmc.bmc_connector;
+ struct drm_connector *connector = &bmc_connector->base;
int ret;
ret = drm_encoder_init(dev, encoder,
@@ -1799,13 +1845,10 @@ static int ast_bmc_output_init(struct ast_device *ast)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
+ ret = ast_bmc_connector_init(dev, bmc_connector, physical_connector);
if (ret)
return ret;
- drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
-
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ret;
@@ -1827,9 +1870,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
* display modes. Protect access to I/O registers by acquiring
* the I/O-register lock. Released in atomic_flush().
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
drm_atomic_helper_commit_tail_rpm(state);
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
}
static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = {
@@ -1864,8 +1907,13 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
int ast_mode_config_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
+ struct drm_connector *physical_connector = NULL;
int ret;
+ ret = drmm_mutex_init(dev, &ast->modeset_lock);
+ if (ret)
+ return ret;
+
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
@@ -1904,23 +1952,27 @@ int ast_mode_config_init(struct ast_device *ast)
ret = ast_vga_output_init(ast);
if (ret)
return ret;
+ physical_connector = &ast->output.vga.vga_connector.base;
}
if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
ret = ast_sil164_output_init(ast);
if (ret)
return ret;
+ physical_connector = &ast->output.sil164.sil164_connector.base;
}
if (ast->tx_chip_types & AST_TX_DP501_BIT) {
ret = ast_dp501_output_init(ast);
if (ret)
return ret;
+ physical_connector = &ast->output.dp501.connector;
}
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ret = ast_astdp_output_init(ast);
if (ret)
return ret;
+ physical_connector = &ast->output.astdp.connector;
}
- ret = ast_bmc_output_init(ast);
+ ret = ast_bmc_output_init(ast, physical_connector);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 7a993a384314..22f548805dfb 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -77,28 +77,42 @@ ast_set_def_ext_reg(struct drm_device *dev)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
}
-u32 ast_mindwm(struct ast_device *ast, u32 r)
+static u32 __ast_mindwm(void __iomem *regs, u32 r)
{
- uint32_t data;
+ u32 data;
- ast_write32(ast, 0xf004, r & 0xffff0000);
- ast_write32(ast, 0xf000, 0x1);
+ __ast_write32(regs, 0xf004, r & 0xffff0000);
+ __ast_write32(regs, 0xf000, 0x1);
do {
- data = ast_read32(ast, 0xf004) & 0xffff0000;
+ data = __ast_read32(regs, 0xf004) & 0xffff0000;
} while (data != (r & 0xffff0000));
- return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+
+ return __ast_read32(regs, 0x10000 + (r & 0x0000ffff));
}
-void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
+static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
{
- uint32_t data;
- ast_write32(ast, 0xf004, r & 0xffff0000);
- ast_write32(ast, 0xf000, 0x1);
+ u32 data;
+
+ __ast_write32(regs, 0xf004, r & 0xffff0000);
+ __ast_write32(regs, 0xf000, 0x1);
+
do {
- data = ast_read32(ast, 0xf004) & 0xffff0000;
+ data = __ast_read32(regs, 0xf004) & 0xffff0000;
} while (data != (r & 0xffff0000));
- ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+
+ __ast_write32(regs, 0x10000 + (r & 0x0000ffff), v);
+}
+
+u32 ast_mindwm(struct ast_device *ast, u32 r)
+{
+ return __ast_mindwm(ast->regs, r);
+}
+
+void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
+{
+ __ast_moutdwm(ast->regs, r, v);
}
/*
@@ -1987,17 +2001,18 @@ static bool ast_dram_init_2500(struct ast_device *ast)
return true;
}
-void ast_patch_ahb_2500(struct ast_device *ast)
+void ast_patch_ahb_2500(void __iomem *regs)
{
- u32 data;
+ u32 data;
/* Clear bus lock condition */
- ast_moutdwm(ast, 0x1e600000, 0xAEED1A03);
- ast_moutdwm(ast, 0x1e600084, 0x00010000);
- ast_moutdwm(ast, 0x1e600088, 0x00000000);
- ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
- data = ast_mindwm(ast, 0x1e6e2070);
- if (data & 0x08000000) { /* check fast reset */
+ __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
+ __ast_moutdwm(regs, 0x1e600084, 0x00010000);
+ __ast_moutdwm(regs, 0x1e600088, 0x00000000);
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+
+ data = __ast_mindwm(regs, 0x1e6e2070);
+ if (data & 0x08000000) { /* check fast reset */
/*
* If "Fast restet" is enabled for ARM-ICE debugger,
* then WDT needs to enable, that
@@ -2009,16 +2024,18 @@ void ast_patch_ahb_2500(struct ast_device *ast)
* [1]:= 1:WDT will be cleeared and disabled after timeout occurs
* [0]:= 1:WDT enable
*/
- ast_moutdwm(ast, 0x1E785004, 0x00000010);
- ast_moutdwm(ast, 0x1E785008, 0x00004755);
- ast_moutdwm(ast, 0x1E78500c, 0x00000033);
+ __ast_moutdwm(regs, 0x1E785004, 0x00000010);
+ __ast_moutdwm(regs, 0x1E785008, 0x00004755);
+ __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
udelay(1000);
}
+
do {
- ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
- data = ast_mindwm(ast, 0x1e6e2000);
- } while (data != 1);
- ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+ data = __ast_mindwm(regs, 0x1e6e2000);
+ } while (data != 1);
+
+ __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
}
void ast_post_chip_2500(struct drm_device *dev)
@@ -2030,7 +2047,7 @@ void ast_post_chip_2500(struct drm_device *dev)
reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
/* Clear bus lock condition */
- ast_patch_ahb_2500(ast);
+ ast_patch_ahb_2500(ast->regs);
/* Disable watchdog */
ast_moutdwm(ast, 0x1E78502C, 0x00000000);
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 555286ecf520..62dddbf3fe56 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -10,10 +10,17 @@
*/
#define AST_IO_MM_OFFSET (0x380)
+#define AST_IO_MM_LENGTH (128)
#define AST_IO_VGAARI_W (0x40)
+
#define AST_IO_VGAMR_W (0x42)
+#define AST_IO_VGAMR_R (0x4c)
+#define AST_IO_VGAMR_IOSEL BIT(0)
+
#define AST_IO_VGAER (0x43)
+#define AST_IO_VGAER_VGA_ENABLE BIT(0)
+
#define AST_IO_VGASRI (0x44)
#define AST_IO_VGADRR (0x47)
#define AST_IO_VGADWR (0x48)
@@ -21,14 +28,15 @@
#define AST_IO_VGAGRI (0x4E)
#define AST_IO_VGACRI (0x54)
+#define AST_IO_VGACR80_PASSWORD (0xa8)
+#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
+#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
-#define AST_IO_VGAMR_R (0x4C)
-
/*
* Display Transmitter Type
*/
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index ba82a1142adf..19d2dc05c397 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -12,6 +12,23 @@ config DRM_PANEL_BRIDGE
help
DRM bridge wrapper of DRM panels
+config DRM_AUX_BRIDGE
+ tristate
+ depends on DRM_BRIDGE && OF
+ select AUXILIARY_BUS
+ select DRM_PANEL_BRIDGE
+ help
+ Simple transparent bridge that is used by several non-DRM drivers to
+ build bridges chain.
+
+config DRM_AUX_HPD_BRIDGE
+ tristate
+ depends on DRM_BRIDGE && OF
+ select AUXILIARY_BUS
+ help
+ Simple bridge that terminates the bridge chain and provides HPD
+ support.
+
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 2b892b7ed59e..017b5832733b 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_AUX_BRIDGE) += aux-bridge.o
+obj-$(CONFIG_DRM_AUX_HPD_BRIDGE) += aux-hpd-bridge.o
obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 8f740154707d..ef31033439bc 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1298,10 +1298,32 @@ static void anx7625_config(struct anx7625_data *ctx)
XTAL_FRQ_SEL, XTAL_FRQ_27M);
}
+static int anx7625_hpd_timer_config(struct anx7625_data *ctx)
+{
+ int ret;
+
+ /* Set irq detect window to 2ms */
+ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT8_15,
+ (HPD_TIME >> 8) & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT16_23,
+ (HPD_TIME >> 16) & 0xFF);
+
+ return ret;
+}
+
+static int anx7625_read_hpd_gpio_config_status(struct anx7625_data *ctx)
+{
+ return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, GPIO_CTRL_2);
+}
+
static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
{
struct device *dev = ctx->dev;
- int ret;
+ int ret, val;
/* Reset main ocm */
ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40);
@@ -1315,6 +1337,19 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n");
else
DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n");
+
+ /*
+ * Make sure the HPD GPIO already be configured after OCM release before
+ * setting HPD detect window register. Here we poll the status register
+ * at maximum 40ms, then config HPD irq detect window register
+ */
+ readx_poll_timeout(anx7625_read_hpd_gpio_config_status,
+ ctx, val,
+ ((val & HPD_SOURCE) || (val < 0)),
+ 2000, 2000 * 20);
+
+ /* Set HPD irq detect window to 2ms */
+ anx7625_hpd_timer_config(ctx);
}
static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
@@ -1437,20 +1472,6 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
{
- int ret;
-
- /* Set irq detect window to 2ms */
- ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
- HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
- ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
- HPD_DET_TIMER_BIT8_15,
- (HPD_TIME >> 8) & 0xFF);
- ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
- HPD_DET_TIMER_BIT16_23,
- (HPD_TIME >> 16) & 0xFF);
- if (ret < 0)
- return ret;
-
return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
}
@@ -1464,9 +1485,6 @@ static int _anx7625_hpd_polling(struct anx7625_data *ctx,
if (ctx->pdata.intp_irq)
return 0;
- /* Delay 200ms for FW HPD de-bounce */
- msleep(200);
-
ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
ctx, val,
((val & HPD_STATUS) || (val < 0)),
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index 5af819611ebc..66ebee7f3d83 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -259,6 +259,10 @@
#define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in 0: no RX in */
#define AP_DISABLE_PD BIT(6)
#define AP_DISABLE_DISPLAY BIT(7)
+
+#define GPIO_CTRL_2 0x49
+#define HPD_SOURCE BIT(6)
+
/***************************************************************/
/* Register definition of device address 0x84 */
#define MIPI_PHY_CONTROL_3 0x03
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
new file mode 100644
index 000000000000..49d7c2ab1ecc
--- /dev/null
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
+
+static DEFINE_IDA(drm_aux_bridge_ida);
+
+static void drm_aux_bridge_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ ida_free(&drm_aux_bridge_ida, adev->id);
+
+ kfree(adev);
+}
+
+static void drm_aux_bridge_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+/**
+ * drm_aux_bridge_register - Create a simple bridge device to link the chain
+ * @parent: device instance providing this bridge
+ *
+ * Creates a simple DRM bridge that doesn't implement any drm_bridge
+ * operations. Such bridges merely fill a place in the bridge chain linking
+ * surrounding DRM bridges.
+ *
+ * Return: zero on success, negative error code on failure
+ */
+int drm_aux_bridge_register(struct device *parent)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ ret = ida_alloc(&drm_aux_bridge_ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(adev);
+ return ret;
+ }
+
+ adev->id = ret;
+ adev->name = "aux_bridge";
+ adev->dev.parent = parent;
+ adev->dev.of_node = parent->of_node;
+ adev->dev.release = drm_aux_bridge_release;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ ida_free(&drm_aux_bridge_ida, adev->id);
+ kfree(adev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, drm_aux_bridge_unregister_adev, adev);
+}
+EXPORT_SYMBOL_GPL(drm_aux_bridge_register);
+
+struct drm_aux_bridge_data {
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct device *dev;
+};
+
+static int drm_aux_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct drm_aux_bridge_data *data;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ data = container_of(bridge, struct drm_aux_bridge_data, bridge);
+
+ return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+static const struct drm_bridge_funcs drm_aux_bridge_funcs = {
+ .attach = drm_aux_bridge_attach,
+};
+
+static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct drm_aux_bridge_data *data;
+
+ data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &auxdev->dev;
+ data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0);
+ if (IS_ERR(data->next_bridge))
+ return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge),
+ "failed to acquire drm_bridge\n");
+
+ data->bridge.funcs = &drm_aux_bridge_funcs;
+ data->bridge.of_node = data->dev->of_node;
+
+ return devm_drm_bridge_add(data->dev, &data->bridge);
+}
+
+static const struct auxiliary_device_id drm_aux_bridge_table[] = {
+ { .name = KBUILD_MODNAME ".aux_bridge" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, drm_aux_bridge_table);
+
+static struct auxiliary_driver drm_aux_bridge_drv = {
+ .name = "aux_bridge",
+ .id_table = drm_aux_bridge_table,
+ .probe = drm_aux_bridge_probe,
+};
+module_auxiliary_driver(drm_aux_bridge_drv);
+
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
+MODULE_DESCRIPTION("DRM transparent bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
new file mode 100644
index 000000000000..1999a053d59b
--- /dev/null
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
+
+static DEFINE_IDA(drm_aux_hpd_bridge_ida);
+
+struct drm_aux_hpd_bridge_data {
+ struct drm_bridge bridge;
+ struct device *dev;
+};
+
+static void drm_aux_hpd_bridge_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ ida_free(&drm_aux_hpd_bridge_ida, adev->id);
+
+ of_node_put(adev->dev.platform_data);
+
+ kfree(adev);
+}
+
+static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+/**
+ * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge
+ * @parent: device instance providing this bridge
+ * @np: device node pointer corresponding to this bridge instance
+ *
+ * Creates a simple DRM bridge with the type set to
+ * DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
+ * able to send the HPD events.
+ *
+ * Return: device instance that will handle created bridge or an error code
+ * encoded into the pointer.
+ */
+struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ida_alloc(&drm_aux_hpd_bridge_ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(adev);
+ return ERR_PTR(ret);
+ }
+
+ adev->id = ret;
+ adev->name = "dp_hpd_bridge";
+ adev->dev.parent = parent;
+ adev->dev.of_node = parent->of_node;
+ adev->dev.release = drm_aux_hpd_bridge_release;
+ adev->dev.platform_data = np;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ ida_free(&drm_aux_hpd_bridge_ida, adev->id);
+ kfree(adev);
+ return ERR_PTR(ret);
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &adev->dev;
+}
+EXPORT_SYMBOL_GPL(drm_dp_hpd_bridge_register);
+
+/**
+ * drm_aux_hpd_bridge_notify - notify hot plug detection events
+ * @dev: device created for the HPD bridge
+ * @status: output connection status
+ *
+ * A wrapper around drm_bridge_hpd_notify() that is used to report hot plug
+ * detection events for bridges created via drm_dp_hpd_bridge_register().
+ *
+ * This function shall be called in a context that can sleep.
+ */
+void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ struct drm_aux_hpd_bridge_data *data = auxiliary_get_drvdata(adev);
+
+ if (!data)
+ return;
+
+ drm_bridge_hpd_notify(&data->bridge, status);
+}
+EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
+
+static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static const struct drm_bridge_funcs drm_aux_hpd_bridge_funcs = {
+ .attach = drm_aux_hpd_bridge_attach,
+};
+
+static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct drm_aux_hpd_bridge_data *data;
+
+ data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &auxdev->dev;
+ data->bridge.funcs = &drm_aux_hpd_bridge_funcs;
+ data->bridge.of_node = dev_get_platdata(data->dev);
+ data->bridge.ops = DRM_BRIDGE_OP_HPD;
+ data->bridge.type = id->driver_data;
+
+ auxiliary_set_drvdata(auxdev, data);
+
+ return devm_drm_bridge_add(data->dev, &data->bridge);
+}
+
+static const struct auxiliary_device_id drm_aux_hpd_bridge_table[] = {
+ { .name = KBUILD_MODNAME ".dp_hpd_bridge", .driver_data = DRM_MODE_CONNECTOR_DisplayPort, },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, drm_aux_hpd_bridge_table);
+
+static struct auxiliary_driver drm_aux_hpd_bridge_drv = {
+ .name = "aux_hpd_bridge",
+ .id_table = drm_aux_hpd_bridge_table,
+ .probe = drm_aux_hpd_bridge_probe,
+};
+module_auxiliary_driver(drm_aux_hpd_bridge_drv);
+
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
+MODULE_DESCRIPTION("DRM HPD bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 6af565ac307a..7d470527455b 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2596,11 +2596,10 @@ clk_disable:
return ret;
}
-static int cdns_mhdp_remove(struct platform_device *pdev)
+static void cdns_mhdp_remove(struct platform_device *pdev)
{
struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
unsigned long timeout = msecs_to_jiffies(100);
- bool stop_fw = false;
int ret;
drm_bridge_remove(&mhdp->bridge);
@@ -2608,18 +2607,19 @@ static int cdns_mhdp_remove(struct platform_device *pdev)
ret = wait_event_timeout(mhdp->fw_load_wq,
mhdp->hw_state == MHDP_HW_READY,
timeout);
- if (ret == 0)
- dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
- __func__);
- else
- stop_fw = true;
-
spin_lock(&mhdp->start_lock);
mhdp->hw_state = MHDP_HW_STOPPED;
spin_unlock(&mhdp->start_lock);
- if (stop_fw)
+ if (ret == 0) {
+ dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
+ __func__);
+ } else {
ret = cdns_mhdp_set_firmware_active(mhdp, false);
+ if (ret)
+ dev_err(mhdp->dev, "Failed to stop firmware (%pe)\n",
+ ERR_PTR(ret));
+ }
phy_exit(mhdp->phy);
@@ -2634,8 +2634,6 @@ static int cdns_mhdp_remove(struct platform_device *pdev)
/* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
clk_disable_unprepare(mhdp->clk);
-
- return ret;
}
static const struct of_device_id mhdp_ids[] = {
@@ -2658,7 +2656,7 @@ static struct platform_driver mhdp_driver = {
.of_match_table = mhdp_ids,
},
.probe = cdns_mhdp_probe,
- .remove = cdns_mhdp_remove,
+ .remove_new = cdns_mhdp_remove,
};
module_platform_driver(mhdp_driver);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
index 946212a95598..5e3b8edcf794 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -403,7 +403,8 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)
static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
{
- int ret, tries = 3;
+ int ret = -EINVAL;
+ int tries = 3;
u32 i;
for (i = 0; i < tries; i++) {
diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
index 3ff30ce80c5b..2347f8dd632f 100644
--- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
@@ -226,8 +226,8 @@ dphy_pll_get_configure_from_opts(struct imx93_dsi *dsi,
unsigned long fout;
unsigned long best_fout = 0;
unsigned int fvco_div;
- unsigned int min_n, max_n, n, best_n;
- unsigned long m, best_m;
+ unsigned int min_n, max_n, n, best_n = UINT_MAX;
+ unsigned long m, best_m = 0;
unsigned long min_delta = ULONG_MAX;
unsigned long delta;
u64 tmp;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 03532efb893b..273157428c82 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -43,6 +43,8 @@ struct lt8912 {
struct videomode mode;
+ struct regulator_bulk_data supplies[7];
+
u8 data_lanes;
bool is_power_on;
};
@@ -257,6 +259,12 @@ static int lt8912_free_i2c(struct lt8912 *lt)
static int lt8912_hard_power_on(struct lt8912 *lt)
{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(lt->supplies), lt->supplies);
+ if (ret)
+ return ret;
+
gpiod_set_value_cansleep(lt->gp_reset, 0);
msleep(20);
@@ -267,6 +275,9 @@ static void lt8912_hard_power_off(struct lt8912 *lt)
{
gpiod_set_value_cansleep(lt->gp_reset, 1);
msleep(20);
+
+ regulator_bulk_disable(ARRAY_SIZE(lt->supplies), lt->supplies);
+
lt->is_power_on = false;
}
@@ -634,6 +645,48 @@ static const struct drm_bridge_funcs lt8912_bridge_funcs = {
.get_edid = lt8912_bridge_get_edid,
};
+static int lt8912_bridge_resume(struct device *dev)
+{
+ struct lt8912 *lt = dev_get_drvdata(dev);
+ int ret;
+
+ ret = lt8912_hard_power_on(lt);
+ if (ret)
+ return ret;
+
+ ret = lt8912_soft_power_on(lt);
+ if (ret)
+ return ret;
+
+ return lt8912_video_on(lt);
+}
+
+static int lt8912_bridge_suspend(struct device *dev)
+{
+ struct lt8912 *lt = dev_get_drvdata(dev);
+
+ lt8912_hard_power_off(lt);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(lt8912_bridge_pm_ops, lt8912_bridge_suspend, lt8912_bridge_resume);
+
+static int lt8912_get_regulators(struct lt8912 *lt)
+{
+ unsigned int i;
+ const char * const supply_names[] = {
+ "vdd", "vccmipirx", "vccsysclk", "vcclvdstx",
+ "vcchdmitx", "vcclvdspll", "vcchdmipll"
+ };
+
+ for (i = 0; i < ARRAY_SIZE(lt->supplies); i++)
+ lt->supplies[i].supply = supply_names[i];
+
+ return devm_regulator_bulk_get(lt->dev, ARRAY_SIZE(lt->supplies),
+ lt->supplies);
+}
+
static int lt8912_parse_dt(struct lt8912 *lt)
{
struct gpio_desc *gp_reset;
@@ -685,6 +738,10 @@ static int lt8912_parse_dt(struct lt8912 *lt)
goto err_free_host_node;
}
+ ret = lt8912_get_regulators(lt);
+ if (ret)
+ goto err_free_host_node;
+
of_node_put(port_node);
return 0;
@@ -770,6 +827,7 @@ static struct i2c_driver lt8912_i2c_driver = {
.driver = {
.name = "lt8912",
.of_match_table = lt8912_dt_match,
+ .pm = pm_sleep_ptr(&lt8912_bridge_pm_ops),
},
.probe = lt8912_probe,
.remove = lt8912_remove,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index d81920227a8a..7c0076e49953 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -54,13 +54,13 @@ static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
int ret;
ret = i2c_master_send(ptn_bridge->client, &addr, 1);
- if (ret <= 0) {
+ if (ret < 0) {
DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
return ret;
}
ret = i2c_master_recv(ptn_bridge->client, buf, len);
- if (ret <= 0) {
+ if (ret < 0) {
DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret);
return ret;
}
@@ -78,7 +78,7 @@ static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr,
buf[1] = val;
ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf));
- if (ret <= 0) {
+ if (ret < 0) {
DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index ef2e373606ba..615cc8f950d7 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2273,7 +2273,7 @@ static int tc_probe(struct i2c_client *client)
} else {
if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
dev_err(dev, "failed to parse HPD number\n");
- return ret;
+ return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index c45c07840f64..5b8e1dfc458d 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1413,11 +1413,9 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int ret;
if (!pdata->pwm_enabled) {
- ret = pm_runtime_get_sync(pdata->dev);
- if (ret < 0) {
- pm_runtime_put_sync(pdata->dev);
+ ret = pm_runtime_resume_and_get(pdata->dev);
+ if (ret < 0)
return ret;
- }
}
if (state->enabled) {
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index e0e015243a60..f9fb35683a27 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -179,13 +179,11 @@ static int tpd12s015_probe(struct platform_device *pdev)
return 0;
}
-static int __exit tpd12s015_remove(struct platform_device *pdev)
+static void tpd12s015_remove(struct platform_device *pdev)
{
struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
drm_bridge_remove(&tpd->bridge);
-
- return 0;
}
static const struct of_device_id tpd12s015_of_match[] = {
@@ -197,7 +195,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
static struct platform_driver tpd12s015_driver = {
.probe = tpd12s015_probe,
- .remove = __exit_p(tpd12s015_remove),
+ .remove_new = tpd12s015_remove,
.driver = {
.name = "tpd12s015",
.of_match_table = tpd12s015_of_match,
diff --git a/drivers/gpu/drm/ci/xfails/requirements.txt b/drivers/gpu/drm/ci/xfails/requirements.txt
index d8856d1581fd..e9994c9db799 100644
--- a/drivers/gpu/drm/ci/xfails/requirements.txt
+++ b/drivers/gpu/drm/ci/xfails/requirements.txt
@@ -5,7 +5,7 @@ termcolor==2.3.0
certifi==2023.7.22
charset-normalizer==3.2.0
idna==3.4
-pip==23.2.1
+pip==23.3
python-gitlab==3.15.0
requests==2.31.0
requests-toolbelt==1.0.0
@@ -13,5 +13,5 @@ ruamel.yaml==0.17.32
ruamel.yaml.clib==0.2.7
setuptools==68.0.0
tenacity==8.2.3
-urllib3==2.0.4
-wheel==0.41.1 \ No newline at end of file
+urllib3==2.0.7
+wheel==0.41.1
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index f3680f4e6970..d72b6f9a352c 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2245,6 +2245,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
/* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
+ /* Synaptics DP1.4 MST hubs require DSC for some modes on which it applies HBLANK expansion. */
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
};
@@ -2327,6 +2329,33 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
EXPORT_SYMBOL(drm_dp_read_desc);
/**
+ * drm_dp_dsc_sink_bpp_incr() - Get bits per pixel increment
+ * @dsc_dpcd: DSC capabilities from DPCD
+ *
+ * Returns the bpp precision supported by the DP sink.
+ */
+u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT];
+
+ switch (bpp_increment_dpcd) {
+ case DP_DSC_BITS_PER_PIXEL_1_16:
+ return 16;
+ case DP_DSC_BITS_PER_PIXEL_1_8:
+ return 8;
+ case DP_DSC_BITS_PER_PIXEL_1_4:
+ return 4;
+ case DP_DSC_BITS_PER_PIXEL_1_2:
+ return 2;
+ case DP_DSC_BITS_PER_PIXEL_1_1:
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_bpp_incr);
+
+/**
* drm_dp_dsc_sink_max_slice_count() - Get the max slice count
* supported by the DSC sink.
* @dsc_dpcd: DSC capabilities from DPCD
@@ -3898,3 +3927,135 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
EXPORT_SYMBOL(drm_panel_dp_aux_backlight);
#endif
+
+/* See DP Standard v2.1 2.6.4.4.1.1, 2.8.4.4, 2.8.7 */
+static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
+ int symbol_size, bool is_mst)
+{
+ int cycles = DIV_ROUND_UP(pixels * bpp_x16, 16 * symbol_size * lane_count);
+ int align = is_mst ? 4 / lane_count : 1;
+
+ return ALIGN(cycles, align);
+}
+
+static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_count,
+ int bpp_x16, int symbol_size, bool is_mst)
+{
+ int slice_pixels = DIV_ROUND_UP(pixels, slice_count);
+ int slice_data_cycles = drm_dp_link_symbol_cycles(lane_count, slice_pixels,
+ bpp_x16, symbol_size, is_mst);
+ int slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
+
+ return slice_count * (slice_data_cycles + slice_eoc_cycles);
+}
+
+/**
+ * drm_dp_bw_overhead - Calculate the BW overhead of a DP link stream
+ * @lane_count: DP link lane count
+ * @hactive: pixel count of the active period in one scanline of the stream
+ * @dsc_slice_count: DSC slice count if @flags/DRM_DP_LINK_BW_OVERHEAD_DSC is set
+ * @bpp_x16: bits per pixel in .4 binary fixed point
+ * @flags: DRM_DP_OVERHEAD_x flags
+ *
+ * Calculate the BW allocation overhead of a DP link stream, depending
+ * on the link's
+ * - @lane_count
+ * - SST/MST mode (@flags / %DRM_DP_OVERHEAD_MST)
+ * - symbol size (@flags / %DRM_DP_OVERHEAD_UHBR)
+ * - FEC mode (@flags / %DRM_DP_OVERHEAD_FEC)
+ * - SSC/REF_CLK mode (@flags / %DRM_DP_OVERHEAD_SSC_REF_CLK)
+ * as well as the stream's
+ * - @hactive timing
+ * - @bpp_x16 color depth
+ * - compression mode (@flags / %DRM_DP_OVERHEAD_DSC).
+ * Note that this overhead doesn't account for the 8b/10b, 128b/132b
+ * channel coding efficiency, for that see
+ * @drm_dp_link_bw_channel_coding_efficiency().
+ *
+ * Returns the overhead as 100% + overhead% in 1ppm units.
+ */
+int drm_dp_bw_overhead(int lane_count, int hactive,
+ int dsc_slice_count,
+ int bpp_x16, unsigned long flags)
+{
+ int symbol_size = flags & DRM_DP_BW_OVERHEAD_UHBR ? 32 : 8;
+ bool is_mst = flags & DRM_DP_BW_OVERHEAD_MST;
+ u32 overhead = 1000000;
+ int symbol_cycles;
+
+ /*
+ * DP Standard v2.1 2.6.4.1
+ * SSC downspread and ref clock variation margin:
+ * 5300ppm + 300ppm ~ 0.6%
+ */
+ if (flags & DRM_DP_BW_OVERHEAD_SSC_REF_CLK)
+ overhead += 6000;
+
+ /*
+ * DP Standard v2.1 2.6.4.1.1, 3.5.1.5.4:
+ * FEC symbol insertions for 8b/10b channel coding:
+ * After each 250 data symbols on 2-4 lanes:
+ * 250 LL + 5 FEC_PARITY_PH + 1 CD_ADJ (256 byte FEC block)
+ * After each 2 x 250 data symbols on 1 lane:
+ * 2 * 250 LL + 11 FEC_PARITY_PH + 1 CD_ADJ (512 byte FEC block)
+ * After 256 (2-4 lanes) or 128 (1 lane) FEC blocks:
+ * 256 * 256 bytes + 1 FEC_PM
+ * or
+ * 128 * 512 bytes + 1 FEC_PM
+ * (256 * 6 + 1) / (256 * 250) = 2.4015625 %
+ */
+ if (flags & DRM_DP_BW_OVERHEAD_FEC)
+ overhead += 24016;
+
+ /*
+ * DP Standard v2.1 2.7.9, 5.9.7
+ * The FEC overhead for UHBR is accounted for in its 96.71% channel
+ * coding efficiency.
+ */
+ WARN_ON((flags & DRM_DP_BW_OVERHEAD_UHBR) &&
+ (flags & DRM_DP_BW_OVERHEAD_FEC));
+
+ if (flags & DRM_DP_BW_OVERHEAD_DSC)
+ symbol_cycles = drm_dp_link_dsc_symbol_cycles(lane_count, hactive,
+ dsc_slice_count,
+ bpp_x16, symbol_size,
+ is_mst);
+ else
+ symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
+ bpp_x16, symbol_size,
+ is_mst);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(symbol_cycles * symbol_size * lane_count,
+ overhead * 16),
+ hactive * bpp_x16);
+}
+EXPORT_SYMBOL(drm_dp_bw_overhead);
+
+/**
+ * drm_dp_bw_channel_coding_efficiency - Get a DP link's channel coding efficiency
+ * @is_uhbr: Whether the link has a 128b/132b channel coding
+ *
+ * Return the channel coding efficiency of the given DP link type, which is
+ * either 8b/10b or 128b/132b (aka UHBR). The corresponding overhead includes
+ * the 8b -> 10b, 128b -> 132b pixel data to link symbol conversion overhead
+ * and for 128b/132b any link or PHY level control symbol insertion overhead
+ * (LLCP, FEC, PHY sync, see DP Standard v2.1 3.5.2.18). For 8b/10b the
+ * corresponding FEC overhead is BW allocation specific, included in the value
+ * returned by drm_dp_bw_overhead().
+ *
+ * Returns the efficiency in the 100%/coding-overhead% ratio in
+ * 1ppm units.
+ */
+int drm_dp_bw_channel_coding_efficiency(bool is_uhbr)
+{
+ if (is_uhbr)
+ return 967100;
+ else
+ /*
+ * Note that on 8b/10b MST the efficiency is only
+ * 78.75% due to the 1 out of 64 MTPH packet overhead,
+ * not accounted for here.
+ */
+ return 800000;
+}
+EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency);
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 0e0d0e76de06..8ca01a6bf645 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -43,6 +43,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -3578,16 +3579,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
* value is in units of PBNs/(timeslots/1 MTP). This value can be used to
* convert the number of PBNs required for a given stream to the number of
* timeslots this stream requires in each MTP.
+ *
+ * Returns the BW / timeslot value in 20.12 fixed point format.
*/
-int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count)
+fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int link_lane_count)
{
+ int ch_coding_efficiency =
+ drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
+ fixed20_12 ret;
+
if (link_rate == 0 || link_lane_count == 0)
drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
link_rate, link_lane_count);
- /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
- return link_rate * link_lane_count / 54000;
+ /* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
+ ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
+ ch_coding_efficiency),
+ (1000000ULL * 8 * 5400) >> 12);
+
+ return ret;
}
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
@@ -4335,7 +4346,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
}
}
- req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div);
+ req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full);
drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
port->connector->base.id, port->connector->name,
@@ -4718,35 +4729,36 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
/**
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
- * @clock: dot clock for the mode
- * @bpp: bpp for the mode.
- * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
+ * @clock: dot clock
+ * @bpp: bpp as .4 binary fixed point
*
* This uses the formula in the spec to calculate the PBN value for a mode.
*/
-int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
+int drm_dp_calc_pbn_mode(int clock, int bpp)
{
/*
- * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
* common multiplier to render an integer PBN for all link rate/lane
* counts combinations
* calculate
- * peak_kbps *= (1006/1000)
- * peak_kbps *= (64/54)
- * peak_kbps *= 8 convert to bytes
- *
- * If the bpp is in units of 1/16, further divide by 16. Put this
- * factor in the numerator rather than the denominator to avoid
- * integer overflow
+ * peak_kbps = clock * bpp / 16
+ * peak_kbps *= SSC overhead / 1000000
+ * peak_kbps /= 8 convert to Kbytes
+ * peak_kBps *= (64/54) / 1000 convert to PBN
*/
+ /*
+ * TODO: Use the actual link and mode parameters to calculate
+ * the overhead. For now it's assumed that these are
+ * 4 link lanes, 4096 hactive pixels, which don't add any
+ * significant data padding overhead and that there is no DSC
+ * or FEC overhead.
+ */
+ int overhead = drm_dp_bw_overhead(4, 4096, 0, bpp,
+ DRM_DP_BW_OVERHEAD_MST |
+ DRM_DP_BW_OVERHEAD_SSC_REF_CLK);
- if (dsc)
- return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
- 8 * 54 * 1000 * 1000);
-
- return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
- 8 * 54 * 1000 * 1000);
+ return DIV64_U64_ROUND_UP(mul_u32_u32(clock * bpp, 64 * overhead >> 4),
+ 1000000ULL * 8 * 54 * 1000);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
@@ -4871,7 +4883,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
state = to_drm_dp_mst_topology_state(mgr->base.state);
seq_printf(m, "\n*** Atomic state info ***\n");
seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
- state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
+ state->payload_mask, mgr->max_payloads, state->start_slot,
+ dfixed_trunc(state->pbn_div));
seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | status | sink name |\n");
for (i = 0; i < mgr->max_payloads; i++) {
@@ -5136,13 +5149,67 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
return false;
}
+static bool
+drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_mst_port *parent)
+{
+ if (!mgr->mst_primary)
+ return false;
+
+ port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
+ port);
+ if (!port)
+ return false;
+
+ if (!parent)
+ return true;
+
+ parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
+ parent);
+ if (!parent)
+ return false;
+
+ if (!parent->mstb)
+ return false;
+
+ return drm_dp_mst_port_downstream_of_branch(port, parent->mstb);
+}
+
+/**
+ * drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port
+ * @mgr: MST topology manager
+ * @port: the port being looked up
+ * @parent: the parent port
+ *
+ * The function returns %true if @port is downstream of @parent. If @parent is
+ * %NULL - denoting the root port - the function returns %true if @port is in
+ * @mgr's topology.
+ */
+bool
+drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_mst_port *parent)
+{
+ bool ret;
+
+ mutex_lock(&mgr->lock);
+ ret = drm_dp_mst_port_downstream_of_parent_locked(mgr, port, parent);
+ mutex_unlock(&mgr->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_port_downstream_of_parent);
+
static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
- struct drm_dp_mst_topology_state *state);
+ struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port **failing_port);
static int
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
- struct drm_dp_mst_topology_state *state)
+ struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port **failing_port)
{
struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_port *port;
@@ -5171,7 +5238,7 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
list_for_each_entry(port, &mstb->ports, next) {
- ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
+ ret = drm_dp_mst_atomic_check_port_bw_limit(port, state, failing_port);
if (ret < 0)
return ret;
@@ -5183,7 +5250,8 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
- struct drm_dp_mst_topology_state *state)
+ struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port **failing_port)
{
struct drm_dp_mst_atomic_payload *payload;
int pbn_used = 0;
@@ -5204,13 +5272,15 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
drm_dbg_atomic(port->mgr->dev,
"[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
port->parent, port);
+ *failing_port = port;
return -EINVAL;
}
pbn_used = payload->pbn;
} else {
pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
- state);
+ state,
+ failing_port);
if (pbn_used <= 0)
return pbn_used;
}
@@ -5219,6 +5289,7 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
drm_dbg_atomic(port->mgr->dev,
"[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
port->parent, port, pbn_used, port->full_pbn);
+ *failing_port = port;
return -ENOSPC;
}
@@ -5271,10 +5342,10 @@ drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr
}
if (!payload_count)
- mst_state->pbn_div = 0;
+ mst_state->pbn_div.full = dfixed_const(0);
drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
- mgr, mst_state, mst_state->pbn_div, avail_slots,
+ mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots,
mst_state->total_avail_slots - avail_slots);
return 0;
@@ -5397,19 +5468,81 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
/**
+ * drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager
+ * @state: The global atomic state
+ * @mgr: Manager to check
+ * @mst_state: The MST atomic state for @mgr
+ * @failing_port: Returns the port with a BW limitation
+ *
+ * Checks the given MST manager's topology state for an atomic update to ensure
+ * that it's valid. This includes checking whether there's enough bandwidth to
+ * support the new timeslot allocations in the atomic update.
+ *
+ * Any atomic drivers supporting DP MST must make sure to call this or
+ * the drm_dp_mst_atomic_check() function after checking the rest of their state
+ * in their &drm_mode_config_funcs.atomic_check() callback.
+ *
+ * See also:
+ * drm_dp_mst_atomic_check()
+ * drm_dp_atomic_find_time_slots()
+ * drm_dp_atomic_release_time_slots()
+ *
+ * Returns:
+ * - 0 if the new state is valid
+ * - %-ENOSPC, if the new state is invalid, because of BW limitation
+ * @failing_port is set to:
+ * - The non-root port where a BW limit check failed
+ * with all the ports downstream of @failing_port passing
+ * the BW limit check.
+ * The returned port pointer is valid until at least
+ * one payload downstream of it exists.
+ * - %NULL if the BW limit check failed at the root port
+ * with all the ports downstream of the root port passing
+ * the BW limit check.
+ * - %-EINVAL, if the new state is invalid, because the root port has
+ * too many payloads.
+ */
+int drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_port **failing_port)
+{
+ int ret;
+
+ *failing_port = NULL;
+
+ if (!mgr->mst_state)
+ return 0;
+
+ mutex_lock(&mgr->lock);
+ ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
+ mst_state,
+ failing_port);
+ mutex_unlock(&mgr->lock);
+
+ if (ret < 0)
+ return ret;
+
+ return drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr);
+
+/**
* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
* atomic update is valid
* @state: Pointer to the new &struct drm_dp_mst_topology_state
*
* Checks the given topology state for an atomic update to ensure that it's
- * valid. This includes checking whether there's enough bandwidth to support
- * the new timeslot allocations in the atomic update.
+ * valid, calling drm_dp_mst_atomic_check_mgr() for all MST manager in the
+ * atomic state. This includes checking whether there's enough bandwidth to
+ * support the new timeslot allocations in the atomic update.
*
* Any atomic drivers supporting DP MST must make sure to call this after
* checking the rest of their state in their
* &drm_mode_config_funcs.atomic_check() callback.
*
* See also:
+ * drm_dp_mst_atomic_check_mgr()
* drm_dp_atomic_find_time_slots()
* drm_dp_atomic_release_time_slots()
*
@@ -5424,21 +5557,11 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
int i, ret = 0;
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
- if (!mgr->mst_state)
- continue;
+ struct drm_dp_mst_port *tmp_port;
- ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
+ ret = drm_dp_mst_atomic_check_mgr(state, mgr, mst_state, &tmp_port);
if (ret)
break;
-
- mutex_lock(&mgr->lock);
- ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
- mst_state);
- mutex_unlock(&mgr->lock);
- if (ret < 0)
- break;
- else
- ret = 0;
}
return ret;
@@ -5894,6 +6017,7 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *immediate_upstream_port;
+ struct drm_dp_aux *immediate_upstream_aux;
struct drm_dp_mst_port *fec_port;
struct drm_dp_desc desc = {};
u8 endpoint_fec;
@@ -5958,21 +6082,25 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
* - Port is on primary branch device
* - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
*/
- if (drm_dp_read_desc(port->mgr->aux, &desc, true))
+ if (immediate_upstream_port)
+ immediate_upstream_aux = &immediate_upstream_port->aux;
+ else
+ immediate_upstream_aux = port->mgr->aux;
+
+ if (drm_dp_read_desc(immediate_upstream_aux, &desc, true))
return NULL;
- if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
- port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
- port->parent == port->mgr->mst_primary) {
+ if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
+ if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)
return NULL;
- if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
+ if (dpcd_ext[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
+ ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
- != DP_DWN_STRM_PORT_TYPE_ANALOG))
- return port->mgr->aux;
+ != DP_DWN_STRM_PORT_TYPE_ANALOG)))
+ return immediate_upstream_aux;
}
/*
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
deleted file mode 100644
index a4ad6fd13abc..000000000000
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ /dev/null
@@ -1,451 +0,0 @@
-/*
- * \file drm_agpsupport.c
- * DRM support for AGP/GART backend
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-#if IS_ENABLED(CONFIG_AGP)
-
-/*
- * Get AGP information.
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been initialized and acquired and fills in the
- * drm_agp_info structure with the information in drm_agp_head::agp_info.
- */
-int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info)
-{
- struct agp_kern_info *kern;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- kern = &dev->agp->agp_info;
- info->agp_version_major = kern->version.major;
- info->agp_version_minor = kern->version.minor;
- info->mode = kern->mode;
- info->aperture_base = kern->aper_base;
- info->aperture_size = kern->aper_size * 1024 * 1024;
- info->memory_allowed = kern->max_memory << PAGE_SHIFT;
- info->memory_used = kern->current_memory << PAGE_SHIFT;
- info->id_vendor = kern->device->vendor;
- info->id_device = kern->device->device;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_info);
-
-int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_info *info = data;
- int err;
-
- err = drm_legacy_agp_info(dev, info);
- if (err)
- return err;
-
- return 0;
-}
-
-/*
- * Acquire the AGP device.
- *
- * \param dev DRM device that is to acquire AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_legacy_agp_acquire(struct drm_device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- if (!dev->agp)
- return -ENODEV;
- if (dev->agp->acquired)
- return -EBUSY;
- dev->agp->bridge = agp_backend_acquire(pdev);
- if (!dev->agp->bridge)
- return -ENODEV;
- dev->agp->acquired = 1;
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_acquire);
-
-/*
- * Acquire the AGP device (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_legacy_agp_acquire((struct drm_device *)file_priv->minor->dev);
-}
-
-/*
- * Release the AGP device.
- *
- * \param dev DRM device that is to release AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired and calls \c agp_backend_release.
- */
-int drm_legacy_agp_release(struct drm_device *dev)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- agp_backend_release(dev->agp->bridge);
- dev->agp->acquired = 0;
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_release);
-
-int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_legacy_agp_release(dev);
-}
-
-/*
- * Enable the AGP bus.
- *
- * \param dev DRM device that has previously acquired AGP.
- * \param mode Requested AGP mode.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired but not enabled, and calls
- * \c agp_enable.
- */
-int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- dev->agp->mode = mode.mode;
- agp_enable(dev->agp->bridge, mode.mode);
- dev->agp->enabled = 1;
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_enable);
-
-int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_mode *mode = data;
-
- return drm_legacy_agp_enable(dev, *mode);
-}
-
-/*
- * Allocate AGP memory.
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired, allocates the
- * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
- */
-int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
- struct agp_memory *memory;
- unsigned long pages;
- u32 type;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- pages = DIV_ROUND_UP(request->size, PAGE_SIZE);
- type = (u32) request->type;
- memory = agp_allocate_memory(dev->agp->bridge, pages, type);
- if (!memory) {
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->handle = (unsigned long)memory->key + 1;
- entry->memory = memory;
- entry->bound = 0;
- entry->pages = pages;
- list_add(&entry->head, &dev->agp->memory);
-
- request->handle = entry->handle;
- request->physical = memory->physical;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_alloc);
-
-
-int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_legacy_agp_alloc(dev, request);
-}
-
-/*
- * Search for the AGP memory entry associated with a handle.
- *
- * \param dev DRM device structure.
- * \param handle AGP memory handle.
- * \return pointer to the drm_agp_mem structure associated with \p handle.
- *
- * Walks through drm_agp_head::memory until finding a matching handle.
- */
-static struct drm_agp_mem *drm_legacy_agp_lookup_entry(struct drm_device *dev,
- unsigned long handle)
-{
- struct drm_agp_mem *entry;
-
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if (entry->handle == handle)
- return entry;
- }
- return NULL;
-}
-
-/*
- * Unbind AGP memory from the GATT (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and acquired, looks-up the AGP memory
- * entry and passes it to the unbind_agp() function.
- */
-int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int ret;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = drm_legacy_agp_lookup_entry(dev, request->handle);
- if (!entry || !entry->bound)
- return -EINVAL;
- ret = agp_unbind_memory(entry->memory);
- if (ret == 0)
- entry->bound = 0;
- return ret;
-}
-EXPORT_SYMBOL(drm_legacy_agp_unbind);
-
-
-int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_legacy_agp_unbind(dev, request);
-}
-
-/*
- * Bind AGP memory into the GATT (ioctl)
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and that no memory
- * is currently bound into the GATT. Looks-up the AGP memory entry and passes
- * it to bind_agp() function.
- */
-int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int retcode;
- int page;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = drm_legacy_agp_lookup_entry(dev, request->handle);
- if (!entry || entry->bound)
- return -EINVAL;
- page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
- retcode = agp_bind_memory(entry->memory, page);
- if (retcode)
- return retcode;
- entry->bound = dev->agp->base + (page << PAGE_SHIFT);
- DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
- dev->agp->base, entry->bound);
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_bind);
-
-
-int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_legacy_agp_bind(dev, request);
-}
-
-/*
- * Free AGP memory (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and looks up the
- * AGP memory entry. If the memory is currently bound, unbind it via
- * unbind_agp(). Frees it via free_agp() as well as the entry itself
- * and unlinks from the doubly linked list it's inserted in.
- */
-int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = drm_legacy_agp_lookup_entry(dev, request->handle);
- if (!entry)
- return -EINVAL;
- if (entry->bound)
- agp_unbind_memory(entry->memory);
-
- list_del(&entry->head);
-
- agp_free_memory(entry->memory);
- kfree(entry);
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_free);
-
-
-int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_legacy_agp_free(dev, request);
-}
-
-/*
- * Initialize the AGP resources.
- *
- * \return pointer to a drm_agp_head structure.
- *
- * Gets the drm_agp_t structure which is made available by the agpgart module
- * via the inter_module_* functions. Creates and initializes a drm_agp_head
- * structure.
- *
- * Note that final cleanup of the kmalloced structure is directly done in
- * drm_pci_agp_destroy.
- */
-struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct drm_agp_head *head = NULL;
-
- head = kzalloc(sizeof(*head), GFP_KERNEL);
- if (!head)
- return NULL;
- head->bridge = agp_find_bridge(pdev);
- if (!head->bridge) {
- head->bridge = agp_backend_acquire(pdev);
- if (!head->bridge) {
- kfree(head);
- return NULL;
- }
- agp_copy_info(head->bridge, &head->agp_info);
- agp_backend_release(head->bridge);
- } else {
- agp_copy_info(head->bridge, &head->agp_info);
- }
- if (head->agp_info.chipset == NOT_SUPPORTED) {
- kfree(head);
- return NULL;
- }
- INIT_LIST_HEAD(&head->memory);
- head->cant_use_aperture = head->agp_info.cant_use_aperture;
- head->page_mask = head->agp_info.page_mask;
- head->base = head->agp_info.aper_base;
- return head;
-}
-/* Only exported for i810.ko */
-EXPORT_SYMBOL(drm_legacy_agp_init);
-
-/**
- * drm_legacy_agp_clear - Clear AGP resource list
- * @dev: DRM device
- *
- * Iterate over all AGP resources and remove them. But keep the AGP head
- * intact so it can still be used. It is safe to call this if AGP is disabled or
- * was already removed.
- *
- * Cleanup is only done for drivers who have DRIVER_LEGACY set.
- */
-void drm_legacy_agp_clear(struct drm_device *dev)
-{
- struct drm_agp_mem *entry, *tempe;
-
- if (!dev->agp)
- return;
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
- if (entry->bound)
- agp_unbind_memory(entry->memory);
- agp_free_memory(entry->memory);
- kfree(entry);
- }
- INIT_LIST_HEAD(&dev->agp->memory);
-
- if (dev->agp->acquired)
- drm_legacy_agp_release(dev);
-
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f1a503aafe5a..c31fc0b48c31 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1773,6 +1773,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ struct drm_private_obj *obj;
if (!drm_drv_uses_atomic_modeset(dev))
return;
@@ -1801,6 +1802,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
if (take_locks)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
drm_connector_list_iter_end(&conn_iter);
+
+ list_for_each_entry(obj, &config->privobj_list, head) {
+ if (take_locks)
+ drm_modeset_lock(&obj->lock, NULL);
+ drm_atomic_private_obj_print_state(p, obj->state);
+ if (take_locks)
+ drm_modeset_unlock(&obj->lock);
+ }
}
/**
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2444fc33dd7c..c98a766ca3bd 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -795,9 +795,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
- * drm_atomic_helper_check_wb_encoder_state() - Check writeback encoder state
- * @encoder: encoder state to check
- * @conn_state: connector state to check
+ * drm_atomic_helper_check_wb_connector_state() - Check writeback connector state
+ * @connector: corresponding connector
+ * @state: the driver state object
*
* Checks if the writeback connector state is valid, and returns an error if it
* isn't.
@@ -806,9 +806,11 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
* Zero for success or -errno
*/
int
-drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
- struct drm_connector_state *conn_state)
+drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
struct drm_writeback_job *wb_job = conn_state->writeback_job;
struct drm_property_blob *pixel_format_blob;
struct drm_framebuffer *fb;
@@ -827,11 +829,11 @@ drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
if (fb->format->format == formats[i])
return 0;
- drm_dbg_kms(encoder->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
+ drm_dbg_kms(connector->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
return -EINVAL;
}
-EXPORT_SYMBOL(drm_atomic_helper_check_wb_encoder_state);
+EXPORT_SYMBOL(drm_atomic_helper_check_wb_connector_state);
/**
* drm_atomic_helper_check_plane_state() - Check plane state for validity
@@ -2382,10 +2384,10 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
/**
- * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
+ * drm_atomic_helper_wait_for_dependencies - wait for required preceding commits
* @old_state: atomic state object with old state structures
*
- * This function waits for all preceeding commits that touch the same CRTC as
+ * This function waits for all preceding commits that touch the same CRTC as
* @old_state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 784e63d70a42..54975de44a0e 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -275,6 +275,20 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
plane_state->normalized_zpos = val;
}
}
+
+ if (plane->hotspot_x_property) {
+ if (!drm_object_property_get_default_value(&plane->base,
+ plane->hotspot_x_property,
+ &val))
+ plane_state->hotspot_x = val;
+ }
+
+ if (plane->hotspot_y_property) {
+ if (!drm_object_property_get_default_value(&plane->base,
+ plane->hotspot_y_property,
+ &val))
+ plane_state->hotspot_y = val;
+ }
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 98d3b10c08ae..aee4a65d4959 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -593,6 +593,22 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
+ } else if (property == plane->hotspot_x_property) {
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n",
+ plane->base.id, plane->name, val);
+ return -EINVAL;
+ }
+ state->hotspot_x = val;
+ } else if (property == plane->hotspot_y_property) {
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n",
+ plane->base.id, plane->name, val);
+ return -EINVAL;
+ }
+ state->hotspot_y = val;
} else {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
@@ -653,6 +669,10 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->scaling_filter;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
+ } else if (property == plane->hotspot_x_property) {
+ *val = state->hotspot_x;
+ } else if (property == plane->hotspot_y_property) {
+ *val = state->hotspot_y;
} else {
drm_dbg_atomic(dev,
"[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
@@ -1006,13 +1026,28 @@ out:
return ret;
}
+static int drm_atomic_check_prop_changes(int ret, uint64_t old_val, uint64_t prop_value,
+ struct drm_property *prop)
+{
+ if (ret != 0 || old_val != prop_value) {
+ drm_dbg_atomic(prop->dev,
+ "[PROP:%d:%s] No prop can be changed during async flip\n",
+ prop->base.id, prop->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int drm_atomic_set_property(struct drm_atomic_state *state,
struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
- uint64_t prop_value)
+ u64 prop_value,
+ bool async_flip)
{
struct drm_mode_object *ref;
+ u64 old_val;
int ret;
if (!drm_property_change_valid_get(prop, prop_value, &ref))
@@ -1029,6 +1064,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
+ if (async_flip) {
+ ret = drm_atomic_connector_get_property(connector, connector_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ break;
+ }
+
ret = drm_atomic_connector_set_property(connector,
connector_state, file_priv,
prop, prop_value);
@@ -1044,6 +1086,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
+ if (async_flip) {
+ ret = drm_atomic_crtc_get_property(crtc, crtc_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ break;
+ }
+
ret = drm_atomic_crtc_set_property(crtc,
crtc_state, prop, prop_value);
break;
@@ -1051,6 +1100,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
case DRM_MODE_OBJECT_PLANE: {
struct drm_plane *plane = obj_to_plane(obj);
struct drm_plane_state *plane_state;
+ struct drm_mode_config *config = &plane->dev->mode_config;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -1058,6 +1108,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
+ if (async_flip && prop != config->prop_fb_id) {
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ break;
+ }
+
+ if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ drm_dbg_atomic(prop->dev,
+ "[OBJECT:%d] Only primary planes can be changed during async flip\n",
+ obj->id);
+ ret = -EINVAL;
+ break;
+ }
+
ret = drm_atomic_plane_set_property(plane,
plane_state, file_priv,
prop, prop_value);
@@ -1323,6 +1388,18 @@ static void complete_signaling(struct drm_device *dev,
kfree(fence_state);
}
+static void
+set_async_flip(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ crtc_state->async_flip = true;
+ }
+}
+
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -1337,6 +1414,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_out_fence_state *fence_state;
int ret = 0;
unsigned int i, j, num_fences;
+ bool async_flip = false;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1363,9 +1441,13 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
}
if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) {
- drm_dbg_atomic(dev,
- "commit failed: invalid flag DRM_MODE_PAGE_FLIP_ASYNC\n");
- return -EINVAL;
+ if (!dev->mode_config.async_page_flip) {
+ drm_dbg_atomic(dev,
+ "commit failed: DRM_MODE_PAGE_FLIP_ASYNC not supported\n");
+ return -EINVAL;
+ }
+
+ async_flip = true;
}
/* can't test and expect an event at the same time. */
@@ -1450,8 +1532,8 @@ retry:
goto out;
}
- ret = drm_atomic_set_property(state, file_priv,
- obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, file_priv, obj,
+ prop, prop_value, async_flip);
if (ret) {
drm_mode_object_put(obj);
goto out;
@@ -1468,6 +1550,9 @@ retry:
if (ret)
goto out;
+ if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ set_async_flip(state);
+
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 2ed2585ded37..252c105d614f 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -37,13 +37,12 @@
#include <drm/drm_print.h>
#include "drm_internal.h"
-#include "drm_legacy.h"
/**
* DOC: master and authentication
*
* &struct drm_master is used to track groups of clients with open
- * primary/legacy device nodes. For every &struct drm_file which has had at
+ * primary device nodes. For every &struct drm_file which has had at
* least once successfully became the device master (either through the
* SET_MASTER IOCTL, or implicitly through opening the primary device node when
* no one else is the current master that time) there exists one &drm_master.
@@ -139,7 +138,6 @@ struct drm_master *drm_master_create(struct drm_device *dev)
return NULL;
kref_init(&master->refcount);
- drm_master_legacy_init(master);
idr_init_base(&master->magic_map, 1);
master->dev = dev;
@@ -365,8 +363,6 @@ void drm_master_release(struct drm_file *file_priv)
if (!drm_is_current_master_locked(file_priv))
goto out;
- drm_legacy_lock_master_cleanup(dev, master);
-
if (dev->master == file_priv->master)
drm_drop_master(dev, file_priv);
out:
@@ -429,8 +425,6 @@ static void drm_master_destroy(struct kref *kref)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_lease_destroy(master);
- drm_legacy_master_rmmaps(dev, master);
-
idr_destroy(&master->magic_map);
idr_destroy(&master->leases);
idr_destroy(&master->lessee_idr);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 30d66bee0ec6..cee3188adf3d 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -1347,50 +1347,6 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
-#ifdef CONFIG_DEBUG_FS
-static int drm_bridge_chains_info(struct seq_file *m, void *data)
-{
- struct drm_debugfs_entry *entry = m->private;
- struct drm_device *dev = entry->dev;
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_encoder *encoder;
- unsigned int bridge_idx = 0;
-
- list_for_each_entry(encoder, &config->encoder_list, head) {
- struct drm_bridge *bridge;
-
- drm_printf(&p, "encoder[%u]\n", encoder->base.id);
-
- drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x",
- bridge_idx, bridge->type, bridge->ops);
-
-#ifdef CONFIG_OF
- if (bridge->of_node)
- drm_printf(&p, ", OF: %pOFfc", bridge->of_node);
-#endif
-
- drm_printf(&p, "\n");
-
- bridge_idx++;
- }
- }
-
- return 0;
-}
-
-static const struct drm_debugfs_info drm_bridge_debugfs_list[] = {
- { "bridge_chains", drm_bridge_chains_info, 0 },
-};
-
-void drm_bridge_debugfs_init(struct drm_device *dev)
-{
- drm_debugfs_add_files(dev, drm_bridge_debugfs_list,
- ARRAY_SIZE(drm_bridge_debugfs_list));
-}
-#endif
-
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 8239ad43aed5..3acd67021ec6 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -198,12 +198,6 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector)
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
- if (bridge_connector->bridge_hpd) {
- struct drm_bridge *hpd = bridge_connector->bridge_hpd;
-
- drm_bridge_hpd_disable(hpd);
- }
-
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
deleted file mode 100644
index 86700560fea2..000000000000
--- a/drivers/gpu/drm/drm_bufs.c
+++ /dev/null
@@ -1,1627 +0,0 @@
-/*
- * Legacy: Generic DRM Buffer Management
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/log2.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/nospec.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-
-#include <asm/shmparam.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-
-static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
- struct drm_local_map *map)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- /*
- * Because the kernel-userspace ABI is fixed at a 32-bit offset
- * while PCI resources may live above that, we only compare the
- * lower 32 bits of the map offset for maps of type
- * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
- * It is assumed that if a driver have more than one resource
- * of each type, the lower 32 bits are different.
- */
- if (!entry->map ||
- map->type != entry->map->type ||
- entry->master != dev->master)
- continue;
- switch (map->type) {
- case _DRM_SHM:
- if (map->flags != _DRM_CONTAINS_LOCK)
- break;
- return entry;
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- if ((entry->map->offset & 0xffffffff) ==
- (map->offset & 0xffffffff))
- return entry;
- break;
- default: /* Make gcc happy */
- break;
- }
- if (entry->map->offset == map->offset)
- return entry;
- }
-
- return NULL;
-}
-
-static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
- unsigned long user_token, int hashed_handle, int shm)
-{
- int use_hashed_handle, shift;
- unsigned long add;
-
-#if (BITS_PER_LONG == 64)
- use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
-#elif (BITS_PER_LONG == 32)
- use_hashed_handle = hashed_handle;
-#else
-#error Unsupported long size. Neither 64 nor 32 bits.
-#endif
-
- if (!use_hashed_handle) {
- int ret;
-
- hash->key = user_token >> PAGE_SHIFT;
- ret = drm_ht_insert_item(&dev->map_hash, hash);
- if (ret != -EINVAL)
- return ret;
- }
-
- shift = 0;
- add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
- if (shm && (SHMLBA > PAGE_SIZE)) {
- int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
-
- /* For shared memory, we have to preserve the SHMLBA
- * bits of the eventual vma->vm_pgoff value during
- * mmap(). Otherwise we run into cache aliasing problems
- * on some platforms. On these platforms, the pgoff of
- * a mmap() request is used to pick a suitable virtual
- * address for the mmap() region such that it will not
- * cause cache aliasing problems.
- *
- * Therefore, make sure the SHMLBA relevant bits of the
- * hash value we use are equal to those in the original
- * kernel virtual address.
- */
- shift = bits;
- add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
- }
-
- return drm_ht_just_insert_please(&dev->map_hash, hash,
- user_token, 32 - PAGE_SHIFT - 3,
- shift, add);
-}
-
-/*
- * Core function to create a range of memory available for mapping by a
- * non-root process.
- *
- * Adjusts the memory offset to its absolute value according to the mapping
- * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
- * applicable and if supported by the kernel.
- */
-static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags,
- struct drm_map_list **maplist)
-{
- struct drm_local_map *map;
- struct drm_map_list *list;
- unsigned long user_token;
- int ret;
-
- map = kmalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- return -ENOMEM;
-
- map->offset = offset;
- map->size = size;
- map->flags = flags;
- map->type = type;
-
- /* Only allow shared memory to be removable since we only keep enough
- * book keeping information about shared memory to allow for removal
- * when processes fork.
- */
- if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
- kfree(map);
- return -EINVAL;
- }
- DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
- (unsigned long long)map->offset, map->size, map->type);
-
- /* page-align _DRM_SHM maps. They are allocated here so there is no security
- * hole created by that and it works around various broken drivers that use
- * a non-aligned quantity to map the SAREA. --BenH
- */
- if (map->type == _DRM_SHM)
- map->size = PAGE_ALIGN(map->size);
-
- if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
- kfree(map);
- return -EINVAL;
- }
- map->mtrr = -1;
- map->handle = NULL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
- if (map->offset + (map->size-1) < map->offset ||
- map->offset < virt_to_phys(high_memory)) {
- kfree(map);
- return -EINVAL;
- }
-#endif
- /* Some drivers preinitialize some maps, without the X Server
- * needing to be aware of it. Therefore, we just return success
- * when the server tries to create a duplicate map.
- */
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if (list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size,
- list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
-
- if (map->type == _DRM_FRAME_BUFFER ||
- (map->flags & _DRM_WRITE_COMBINING)) {
- map->mtrr =
- arch_phys_wc_add(map->offset, map->size);
- }
- if (map->type == _DRM_REGISTERS) {
- if (map->flags & _DRM_WRITE_COMBINING)
- map->handle = ioremap_wc(map->offset,
- map->size);
- else
- map->handle = ioremap(map->offset, map->size);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- }
-
- break;
- case _DRM_SHM:
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if (list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size, list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
- map->handle = vmalloc_user(map->size);
- DRM_DEBUG("%lu %d %p\n",
- map->size, order_base_2(map->size), map->handle);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- map->offset = (unsigned long)map->handle;
- if (map->flags & _DRM_CONTAINS_LOCK) {
- /* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->master->lock.hw_lock != NULL) {
- vfree(map->handle);
- kfree(map);
- return -EBUSY;
- }
- dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
- }
- break;
- case _DRM_AGP: {
- struct drm_agp_mem *entry;
- int valid = 0;
-
- if (!dev->agp) {
- kfree(map);
- return -EINVAL;
- }
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
- /* In some cases (i810 driver), user space may have already
- * added the AGP base itself, because dev->agp->base previously
- * only got set during AGP enable. So, only add the base
- * address if the map's offset isn't already within the
- * aperture.
- */
- if (map->offset < dev->agp->base ||
- map->offset > dev->agp->base +
- dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
- map->offset += dev->agp->base;
- }
- map->mtrr = dev->agp->agp_mtrr; /* for getmap */
-
- /* This assumes the DRM is in total control of AGP space.
- * It's not always the case as AGP can be in the control
- * of user space (i.e. i810 driver). So this loop will get
- * skipped and we double check that dev->agp->memory is
- * actually set as well as being invalid before EPERM'ing
- */
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if ((map->offset >= entry->bound) &&
- (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- kfree(map);
- return -EPERM;
- }
- DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
- (unsigned long long)map->offset, map->size);
-
- break;
- }
- case _DRM_SCATTER_GATHER:
- if (!dev->sg) {
- kfree(map);
- return -EINVAL;
- }
- map->offset += (unsigned long)dev->sg->virtual;
- break;
- case _DRM_CONSISTENT:
- /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
- * As we're limiting the address to 2^32-1 (or less),
- * casting it down to 32 bits is no problem, but we
- * need to point to a 64bit variable first.
- */
- map->handle = dma_alloc_coherent(dev->dev,
- map->size,
- &map->offset,
- GFP_KERNEL);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- break;
- default:
- kfree(map);
- return -EINVAL;
- }
-
- list = kzalloc(sizeof(*list), GFP_KERNEL);
- if (!list) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- return -EINVAL;
- }
- list->map = map;
-
- mutex_lock(&dev->struct_mutex);
- list_add(&list->head, &dev->maplist);
-
- /* Assign a 32-bit handle */
- /* We do it here so that dev->struct_mutex protects the increment */
- user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
- map->offset;
- ret = drm_map_handle(dev, &list->hash, user_token, 0,
- (map->type == _DRM_SHM));
- if (ret) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- kfree(list);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- list->user_token = list->hash.key << PAGE_SHIFT;
- mutex_unlock(&dev->struct_mutex);
-
- if (!(map->flags & _DRM_DRIVER))
- list->master = dev->master;
- *maplist = list;
- return 0;
-}
-
-int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map **map_ptr)
-{
- struct drm_map_list *list;
- int rc;
-
- rc = drm_addmap_core(dev, offset, size, type, flags, &list);
- if (!rc)
- *map_ptr = list->map;
- return rc;
-}
-EXPORT_SYMBOL(drm_legacy_addmap);
-
-struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
- unsigned int token)
-{
- struct drm_map_list *_entry;
-
- list_for_each_entry(_entry, &dev->maplist, head)
- if (_entry->user_token == token)
- return _entry->map;
- return NULL;
-}
-EXPORT_SYMBOL(drm_legacy_findmap);
-
-/*
- * Ioctl to specify a range of memory that is available for mapping by a
- * non-root process.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_map structure.
- * \return zero on success or a negative value on error.
- *
- */
-int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *maplist;
- int err;
-
- if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
- return -EPERM;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- err = drm_addmap_core(dev, map->offset, map->size, map->type,
- map->flags, &maplist);
-
- if (err)
- return err;
-
- /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
- map->handle = (void *)(unsigned long)maplist->user_token;
-
- /*
- * It appears that there are no users of this value whatsoever --
- * drmAddMap just discards it. Let's not encourage its use.
- * (Keeping drm_addmap_core's returned mtrr value would be wrong --
- * it's not a real mtrr index anymore.)
- */
- map->mtrr = -1;
-
- return 0;
-}
-
-/*
- * Get a mapping information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_map structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the mapping with the specified offset and copies its information
- * into userspace
- */
-int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *r_list = NULL;
- struct list_head *list;
- int idx;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- idx = map->offset;
- if (idx < 0)
- return -EINVAL;
-
- i = 0;
- mutex_lock(&dev->struct_mutex);
- list_for_each(list, &dev->maplist) {
- if (i == idx) {
- r_list = list_entry(list, struct drm_map_list, head);
- break;
- }
- i++;
- }
- if (!r_list || !r_list->map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- map->offset = r_list->map->offset;
- map->size = r_list->map->size;
- map->type = r_list->map->type;
- map->flags = r_list->map->flags;
- map->handle = (void *)(unsigned long) r_list->user_token;
- map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*
- * Remove a map private from list and deallocate resources if the mapping
- * isn't in use.
- *
- * Searches the map on drm_device::maplist, removes it from the list, see if
- * it's being used, and free any associated resource (such as MTRR's) if it's not
- * being on use.
- *
- * \sa drm_legacy_addmap
- */
-int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
-{
- struct drm_map_list *r_list = NULL, *list_t;
- int found = 0;
- struct drm_master *master;
-
- /* Find the list entry for the map and remove it */
- list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
- if (r_list->map == map) {
- master = r_list->master;
- list_del(&r_list->head);
- drm_ht_remove_key(&dev->map_hash,
- r_list->user_token >> PAGE_SHIFT);
- kfree(r_list);
- found = 1;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- iounmap(map->handle);
- fallthrough;
- case _DRM_FRAME_BUFFER:
- arch_phys_wc_del(map->mtrr);
- break;
- case _DRM_SHM:
- vfree(map->handle);
- if (master) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL; /* SHM removed */
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dma_free_coherent(dev->dev,
- map->size,
- map->handle,
- map->offset);
- break;
- }
- kfree(map);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_rmmap_locked);
-
-void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- drm_legacy_rmmap_locked(dev, map);
- mutex_unlock(&dev->struct_mutex);
-}
-EXPORT_SYMBOL(drm_legacy_rmmap);
-
-void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
-{
- struct drm_map_list *r_list, *list_temp;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
- if (r_list->master == master) {
- drm_legacy_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-void drm_legacy_rmmaps(struct drm_device *dev)
-{
- struct drm_map_list *r_list, *list_temp;
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_legacy_rmmap(dev, r_list->map);
-}
-
-/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
- * the last close of the device, and this is necessary for cleanup when things
- * exit uncleanly. Therefore, having userland manually remove mappings seems
- * like a pointless exercise since they're going away anyway.
- *
- * One use case might be after addmap is allowed for normal users for SHM and
- * gets used by drivers that the server doesn't need to care about. This seems
- * unlikely.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_map structure.
- * \return zero on success or a negative value on error.
- */
-int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->user_token == (unsigned long)request->handle &&
- r_list->map->flags & _DRM_REMOVABLE) {
- map = r_list->map;
- break;
- }
- }
-
- /* List has wrapped around to the head pointer, or it's empty we didn't
- * find anything.
- */
- if (list_empty(&dev->maplist) || !map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- /* Register and framebuffer maps are permanent */
- if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- ret = drm_legacy_rmmap_locked(dev, map);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-/*
- * Cleanup after an error on one of the addbufs() functions.
- *
- * \param dev DRM device.
- * \param entry buffer entry where the error occurred.
- *
- * Frees any pages and buffers associated with the given entry.
- */
-static void drm_cleanup_buf_error(struct drm_device *dev,
- struct drm_buf_entry *entry)
-{
- drm_dma_handle_t *dmah;
- int i;
-
- if (entry->seg_count) {
- for (i = 0; i < entry->seg_count; i++) {
- if (entry->seglist[i]) {
- dmah = entry->seglist[i];
- dma_free_coherent(dev->dev,
- dmah->size,
- dmah->vaddr,
- dmah->busaddr);
- kfree(dmah);
- }
- }
- kfree(entry->seglist);
-
- entry->seg_count = 0;
- }
-
- if (entry->buf_count) {
- for (i = 0; i < entry->buf_count; i++) {
- kfree(entry->buflist[i].dev_private);
- }
- kfree(entry->buflist);
-
- entry->buf_count = 0;
- }
-}
-
-#if IS_ENABLED(CONFIG_AGP)
-/*
- * Add AGP buffers for DMA transfers.
- *
- * \param dev struct drm_device to which the buffers are to be added.
- * \param request pointer to a struct drm_buf_desc describing the request.
- * \return zero on success or a negative number on failure.
- *
- * After some sanity checks creates a drm_buf structure for each buffer and
- * reallocates the buffer list of the same size order to accommodate the new
- * buffers.
- */
-int drm_legacy_addbufs_agp(struct drm_device *dev,
- struct drm_buf_desc *request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_agp_mem *agp_entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i, valid;
- struct drm_buf **temp_buflist;
-
- if (!dma)
- return -EINVAL;
-
- count = request->count;
- order = order_base_2(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = dev->agp->base + request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lx\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- /* Make sure buffers are located in AGP memory that we own */
- valid = 0;
- list_for_each_entry(agp_entry, &dev->agp->memory, head) {
- if ((agp_offset >= agp_entry->bound) &&
- (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- DRM_DEBUG("zone invalid\n");
- return -EINVAL;
- }
- spin_lock(&dev->buf_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->buf_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_AGP;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_addbufs_agp);
-#endif /* CONFIG_AGP */
-
-int drm_legacy_addbufs_pci(struct drm_device *dev,
- struct drm_buf_desc *request)
-{
- struct drm_device_dma *dma = dev->dma;
- int count;
- int order;
- int size;
- int total;
- int page_order;
- struct drm_buf_entry *entry;
- drm_dma_handle_t *dmah;
- struct drm_buf *buf;
- int alignment;
- unsigned long offset;
- int i;
- int byte_count;
- int page_count;
- unsigned long *temp_pagelist;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = order_base_2(request->size);
- size = 1 << order;
-
- DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
- request->count, request->size, size, order);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- spin_lock(&dev->buf_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->buf_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
- if (!entry->seglist) {
- kfree(entry->buflist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- /* Keep the original pagelist until we know all the allocations
- * have succeeded
- */
- temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
- sizeof(*dma->pagelist),
- GFP_KERNEL);
- if (!temp_pagelist) {
- kfree(entry->buflist);
- kfree(entry->seglist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memcpy(temp_pagelist,
- dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
- DRM_DEBUG("pagelist: %d entries\n",
- dma->page_count + (count << page_order));
-
- entry->buf_size = size;
- entry->page_order = page_order;
- byte_count = 0;
- page_count = 0;
-
- while (entry->buf_count < count) {
- dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
- if (!dmah) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- dmah->size = total;
- dmah->vaddr = dma_alloc_coherent(dev->dev,
- dmah->size,
- &dmah->busaddr,
- GFP_KERNEL);
- if (!dmah->vaddr) {
- kfree(dmah);
-
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- entry->seglist[entry->seg_count++] = dmah;
- for (i = 0; i < (1 << page_order); i++) {
- DRM_DEBUG("page %d @ 0x%08lx\n",
- dma->page_count + page_count,
- (unsigned long)dmah->vaddr + PAGE_SIZE * i);
- temp_pagelist[dma->page_count + page_count++]
- = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
- }
- for (offset = 0;
- offset + size <= total && entry->buf_count < count;
- offset += alignment, ++entry->buf_count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
- buf->offset = (dma->byte_count + byte_count + offset);
- buf->address = (void *)(dmah->vaddr + offset);
- buf->bus_address = dmah->busaddr + offset;
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size,
- GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n",
- entry->buf_count, buf->address);
- }
- byte_count += PAGE_SIZE << page_order;
- }
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- /* No allocations failed, so now we can replace the original pagelist
- * with the new one.
- */
- if (dma->page_count) {
- kfree(dma->pagelist);
- }
- dma->pagelist = temp_pagelist;
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += entry->seg_count << page_order;
- dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- if (request->flags & _DRM_PCI_BUFFER_RO)
- dma->flags = _DRM_DMA_USE_PCI_RO;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-
-}
-EXPORT_SYMBOL(drm_legacy_addbufs_pci);
-
-static int drm_legacy_addbufs_sg(struct drm_device *dev,
- struct drm_buf_desc *request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = order_base_2(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- spin_lock(&dev->buf_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->buf_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset
- + (unsigned long)dev->sg->virtual);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_SG;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-/*
- * Add buffers for DMA transfers (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_buf_desc request.
- * \return zero on success or a negative number on failure.
- *
- * According with the memory type specified in drm_buf_desc::flags and the
- * build options, it dispatches the call either to addbufs_agp(),
- * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
- * PCI memory respectively.
- */
-int drm_legacy_addbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_desc *request = data;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
-#if IS_ENABLED(CONFIG_AGP)
- if (request->flags & _DRM_AGP_BUFFER)
- ret = drm_legacy_addbufs_agp(dev, request);
- else
-#endif
- if (request->flags & _DRM_SG_BUFFER)
- ret = drm_legacy_addbufs_sg(dev, request);
- else if (request->flags & _DRM_FB_BUFFER)
- ret = -EINVAL;
- else
- ret = drm_legacy_addbufs_pci(dev, request);
-
- return ret;
-}
-
-/*
- * Get information about the buffer mappings.
- *
- * This was originally mean for debugging purposes, or by a sophisticated
- * client library to determine how best to use the available buffers (e.g.,
- * large buffers can be used for image transfer).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_info structure.
- * \return zero on success or a negative number on failure.
- *
- * Increments drm_device::buf_use while holding the drm_device::buf_lock
- * lock, preventing of allocating more buffers after this call. Information
- * about each requested buffer is then copied into user space.
- */
-int __drm_legacy_infobufs(struct drm_device *dev,
- void *data, int *p,
- int (*f)(void *, int, struct drm_buf_entry *))
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
- int count;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->buf_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- ++dev->buf_use; /* Can't allocate more after this call */
- spin_unlock(&dev->buf_lock);
-
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count)
- ++count;
- }
-
- DRM_DEBUG("count = %d\n", count);
-
- if (*p >= count) {
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- struct drm_buf_entry *from = &dma->bufs[i];
-
- if (from->buf_count) {
- if (f(data, count, from) < 0)
- return -EFAULT;
- DRM_DEBUG("%d %d %d %d %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].buf_size,
- dma->bufs[i].low_mark,
- dma->bufs[i].high_mark);
- ++count;
- }
- }
- }
- *p = count;
-
- return 0;
-}
-
-static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
-{
- struct drm_buf_info *request = data;
- struct drm_buf_desc __user *to = &request->list[count];
- struct drm_buf_desc v = {.count = from->buf_count,
- .size = from->buf_size,
- .low_mark = from->low_mark,
- .high_mark = from->high_mark};
-
- if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
- return -EFAULT;
- return 0;
-}
-
-int drm_legacy_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_info *request = data;
-
- return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
-}
-
-/*
- * Specifies a low and high water mark for buffer allocation
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg a pointer to a drm_buf_desc structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies that the size order is bounded between the admissible orders and
- * updates the respective drm_device_dma::bufs entry low and high water mark.
- *
- * \note This ioctl is deprecated and mostly never used.
- */
-int drm_legacy_markbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_desc *request = data;
- int order;
- struct drm_buf_entry *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d, %d, %d\n",
- request->size, request->low_mark, request->high_mark);
- order = order_base_2(request->size);
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- entry = &dma->bufs[order];
-
- if (request->low_mark < 0 || request->low_mark > entry->buf_count)
- return -EINVAL;
- if (request->high_mark < 0 || request->high_mark > entry->buf_count)
- return -EINVAL;
-
- entry->low_mark = request->low_mark;
- entry->high_mark = request->high_mark;
-
- return 0;
-}
-
-/*
- * Unreserve the buffers in list, previously reserved using drmDMA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_free structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls free_buffer() for each used buffer.
- * This function is primarily used for debugging.
- */
-int drm_legacy_freebufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_free *request = data;
- int i;
- int idx;
- struct drm_buf *buf;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d\n", request->count);
- for (i = 0; i < request->count; i++) {
- if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
- return -EFAULT;
- if (idx < 0 || idx >= dma->buf_count) {
- DRM_ERROR("Index %d (of %d max)\n",
- idx, dma->buf_count - 1);
- return -EINVAL;
- }
- idx = array_index_nospec(idx, dma->buf_count);
- buf = dma->buflist[idx];
- if (buf->file_priv != file_priv) {
- DRM_ERROR("Process %d freeing buffer not owned\n",
- task_pid_nr(current));
- return -EINVAL;
- }
- drm_legacy_free_buffer(dev, buf);
- }
-
- return 0;
-}
-
-/*
- * Maps all of the DMA buffers into client-virtual space (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
- * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
- * offset equal to 0, which drm_mmap() interprets as PCI buffers and calls
- * drm_mmap_dma().
- */
-int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
- void __user **v,
- int (*f)(void *, int, unsigned long,
- struct drm_buf *),
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int retcode = 0;
- unsigned long virtual;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->buf_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- dev->buf_use++; /* Can't allocate more after this call */
- spin_unlock(&dev->buf_lock);
-
- if (*p >= dma->buf_count) {
- if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
- || (drm_core_check_feature(dev, DRIVER_SG)
- && (dma->flags & _DRM_DMA_USE_SG))) {
- struct drm_local_map *map = dev->agp_buffer_map;
- unsigned long token = dev->agp_buffer_token;
-
- if (!map) {
- retcode = -EINVAL;
- goto done;
- }
- virtual = vm_mmap(file_priv->filp, 0, map->size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- token);
- } else {
- virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
- PROT_READ | PROT_WRITE,
- MAP_SHARED, 0);
- }
- if (virtual > -1024UL) {
- /* Real error */
- retcode = (signed long)virtual;
- goto done;
- }
- *v = (void __user *)virtual;
-
- for (i = 0; i < dma->buf_count; i++) {
- if (f(data, i, virtual, dma->buflist[i]) < 0) {
- retcode = -EFAULT;
- goto done;
- }
- }
- }
- done:
- *p = dma->buf_count;
- DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
-
- return retcode;
-}
-
-static int map_one_buf(void *data, int idx, unsigned long virtual,
- struct drm_buf *buf)
-{
- struct drm_buf_map *request = data;
- unsigned long address = virtual + buf->offset; /* *** */
-
- if (copy_to_user(&request->list[idx].idx, &buf->idx,
- sizeof(request->list[0].idx)))
- return -EFAULT;
- if (copy_to_user(&request->list[idx].total, &buf->total,
- sizeof(request->list[0].total)))
- return -EFAULT;
- if (clear_user(&request->list[idx].used, sizeof(int)))
- return -EFAULT;
- if (copy_to_user(&request->list[idx].address, &address,
- sizeof(address)))
- return -EFAULT;
- return 0;
-}
-
-int drm_legacy_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_map *request = data;
-
- return __drm_legacy_mapbufs(dev, data, &request->count,
- &request->virtual, map_one_buf,
- file_priv);
-}
-
-int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (dev->driver->dma_ioctl)
- return dev->driver->dma_ioctl(dev, data, file_priv);
- else
- return -EINVAL;
-}
-
-struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && entry->map->type == _DRM_SHM &&
- (entry->map->flags & _DRM_CONTAINS_LOCK)) {
- return entry->map;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(drm_legacy_getsarea);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index c3027115d055..9403b3f576f7 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -5,7 +5,6 @@
#include <linux/iosys-map.h>
#include <linux/list.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -84,16 +83,13 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create)
return -EOPNOTSUPP;
- if (funcs && !try_module_get(funcs->owner))
- return -ENODEV;
-
client->dev = dev;
client->name = name;
client->funcs = funcs;
ret = drm_client_modeset_create(client);
if (ret)
- goto err_put_module;
+ return ret;
ret = drm_client_open(client);
if (ret)
@@ -105,10 +101,6 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
err_free:
drm_client_modeset_free(client);
-err_put_module:
- if (funcs)
- module_put(funcs->owner);
-
return ret;
}
EXPORT_SYMBOL(drm_client_init);
@@ -177,8 +169,6 @@ void drm_client_release(struct drm_client_dev *client)
drm_client_modeset_free(client);
drm_client_close(client);
drm_dev_put(dev);
- if (client->funcs)
- module_put(client->funcs->owner);
}
EXPORT_SYMBOL(drm_client_release);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index c3725086f413..b0516505f7ae 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1198,6 +1198,12 @@ static const u32 dp_colorspaces =
* drm_connector_set_path_property(), in the case of DP MST with the
* path property the MST manager created. Userspace cannot change this
* property.
+ *
+ * In the case of DP MST, the property has the format
+ * ``mst:<parent>-<ports>`` where ``<parent>`` is the KMS object ID of the
+ * parent connector and ``<ports>`` is a hyphen-separated list of DP MST
+ * port numbers. Note, KMS object IDs are not guaranteed to be stable
+ * across reboots.
* TILE:
* Connector tile group property to indicate how a set of DRM connector
* compose together into one logical screen. This is used by both high-res
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
deleted file mode 100644
index a0fc779e5e1e..000000000000
--- a/drivers/gpu/drm/drm_context.c
+++ /dev/null
@@ -1,513 +0,0 @@
-/*
- * Legacy: Generic DRM Contexts
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-struct drm_ctx_list {
- struct list_head head;
- drm_context_t handle;
- struct drm_file *tag;
-};
-
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-/*
- * Free a handle from the context bitmap.
- *
- * \param dev DRM device.
- * \param ctx_handle context handle.
- *
- * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
- * lock.
- */
-void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- idr_remove(&dev->ctx_idr, ctx_handle);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * Context bitmap allocation.
- *
- * \param dev DRM device.
- * \return (non-negative) context handle on success or a negative number on failure.
- *
- * Allocate a new idr from drm_device::ctx_idr while holding the
- * drm_device::struct_mutex lock.
- */
-static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
-{
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
- GFP_KERNEL);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/*
- * Context bitmap initialization.
- *
- * \param dev DRM device.
- *
- * Initialise the drm_device::ctx_idr
- */
-void drm_legacy_ctxbitmap_init(struct drm_device * dev)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- idr_init(&dev->ctx_idr);
-}
-
-/*
- * Context bitmap cleanup.
- *
- * \param dev DRM device.
- *
- * Free all idr members using drm_ctx_sarea_free helper function
- * while holding the drm_device::struct_mutex lock.
- */
-void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- idr_destroy(&dev->ctx_idr);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * drm_legacy_ctxbitmap_flush() - Flush all contexts owned by a file
- * @dev: DRM device to operate on
- * @file: Open file to flush contexts for
- *
- * This iterates over all contexts on @dev and drops them if they're owned by
- * @file. Note that after this call returns, new contexts might be added if
- * the file is still alive.
- */
-void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
-{
- struct drm_ctx_list *pos, *tmp;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->ctxlist_mutex);
-
- list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
- if (pos->tag == file &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev, pos->handle);
-
- drm_legacy_ctxbitmap_free(dev, pos->handle);
- list_del(&pos->head);
- kfree(pos);
- }
- }
-
- mutex_unlock(&dev->ctxlist_mutex);
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name Per Context SAREA Support */
-/*@{*/
-
-/*
- * Get per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Gets the map from drm_device::ctx_idr with the handle specified and
- * returns its handle.
- */
-int drm_legacy_getsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map;
- struct drm_map_list *_entry;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- mutex_lock(&dev->struct_mutex);
-
- map = idr_find(&dev->ctx_idr, request->ctx_id);
- if (!map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- request->handle = NULL;
- list_for_each_entry(_entry, &dev->maplist, head) {
- if (_entry->map == map) {
- request->handle =
- (void *)(unsigned long)_entry->user_token;
- break;
- }
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- if (request->handle == NULL)
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * Set per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Searches the mapping specified in \p arg and update the entry in
- * drm_device::ctx_idr with it.
- */
-int drm_legacy_setsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list = NULL;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map
- && r_list->user_token == (unsigned long) request->handle)
- goto found;
- }
- bad:
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
-
- found:
- map = r_list->map;
- if (!map)
- goto bad;
-
- if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
- goto bad;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name The actual DRM context handling routines */
-/*@{*/
-
-/*
- * Switch context.
- *
- * \param dev DRM device.
- * \param old old context handle.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Attempt to set drm_device::context_flag.
- */
-static int drm_context_switch(struct drm_device * dev, int old, int new)
-{
- if (test_and_set_bit(0, &dev->context_flag)) {
- DRM_ERROR("Reentering -- FIXME\n");
- return -EBUSY;
- }
-
- DRM_DEBUG("Context switch from %d to %d\n", old, new);
-
- if (new == dev->last_context) {
- clear_bit(0, &dev->context_flag);
- return 0;
- }
-
- return 0;
-}
-
-/*
- * Complete context switch.
- *
- * \param dev DRM device.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Updates drm_device::last_context and drm_device::last_switch. Verifies the
- * hardware lock is held, clears the drm_device::context_flag and wakes up
- * drm_device::context_wait.
- */
-static int drm_context_switch_complete(struct drm_device *dev,
- struct drm_file *file_priv, int new)
-{
- dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
-
- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
- DRM_ERROR("Lock isn't held after context switch\n");
- }
-
- /* If a context switch is ever initiated
- when the kernel holds the lock, release
- that lock here.
- */
- clear_bit(0, &dev->context_flag);
-
- return 0;
-}
-
-/*
- * Reserve contexts.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_res structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_legacy_resctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_res *res = data;
- struct drm_ctx ctx;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (res->count >= DRM_RESERVED_CONTEXTS) {
- memset(&ctx, 0, sizeof(ctx));
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- ctx.handle = i;
- if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
- return -EFAULT;
- }
- }
- res->count = DRM_RESERVED_CONTEXTS;
-
- return 0;
-}
-
-/*
- * Add context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Get a new handle for the context and copy to userspace.
- */
-int drm_legacy_addctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_list *ctx_entry;
- struct drm_ctx *ctx = data;
- int tmp_handle;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- tmp_handle = drm_legacy_ctxbitmap_next(dev);
- if (tmp_handle == DRM_KERNEL_CONTEXT) {
- /* Skip kernel's context and get a new one. */
- tmp_handle = drm_legacy_ctxbitmap_next(dev);
- }
- DRM_DEBUG("%d\n", tmp_handle);
- if (tmp_handle < 0) {
- DRM_DEBUG("Not enough free contexts.\n");
- /* Should this return -EBUSY instead? */
- return tmp_handle;
- }
-
- ctx->handle = tmp_handle;
-
- ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
- if (!ctx_entry) {
- DRM_DEBUG("out of memory\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&ctx_entry->head);
- ctx_entry->handle = ctx->handle;
- ctx_entry->tag = file_priv;
-
- mutex_lock(&dev->ctxlist_mutex);
- list_add(&ctx_entry->head, &dev->ctxlist);
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-/*
- * Get context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_legacy_getctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- /* This is 0, because we don't handle any context flags */
- ctx->flags = 0;
-
- return 0;
-}
-
-/*
- * Switch context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch().
- */
-int drm_legacy_switchctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- DRM_DEBUG("%d\n", ctx->handle);
- return drm_context_switch(dev, dev->last_context, ctx->handle);
-}
-
-/*
- * New context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch_complete().
- */
-int drm_legacy_newctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- DRM_DEBUG("%d\n", ctx->handle);
- drm_context_switch_complete(dev, file_priv, ctx->handle);
-
- return 0;
-}
-
-/*
- * Remove context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
- */
-int drm_legacy_rmctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev, ctx->handle);
- drm_legacy_ctxbitmap_free(dev, ctx->handle);
- }
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->handle == ctx->handle) {
- list_del(&pos->head);
- kfree(pos);
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-/*@}*/
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a209659a996c..2dafc39a27cb 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -439,11 +439,8 @@ EXPORT_SYMBOL(drm_crtc_helper_set_mode);
* @state: atomic state object
*
* Provides a default CRTC-state check handler for CRTCs that only have
- * one primary plane attached to it.
- *
- * This is often the case for the CRTC of simple framebuffers. See also
- * drm_plane_helper_atomic_check() for the respective plane-state check
- * helper function.
+ * one primary plane attached to it. This is often the case for the CRTC
+ * of simple framebuffers.
*
* RETURNS:
* Zero on success, or an errno code otherwise.
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 8556c3b3ff88..a514d5207e41 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -222,6 +222,8 @@ int drm_mode_addfb2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_rmfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+int drm_mode_closefb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_getfb2_ioctl(struct drm_device *dev,
@@ -251,7 +253,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
- uint64_t prop_value);
+ u64 prop_value, bool async_flip);
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index d8b2955e88fd..afb02aae707b 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -241,7 +241,8 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF);
iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF);
- if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
+ if (!iter->clips || state->ignore_damage_clips ||
+ !drm_rect_equals(&state->src, &old_state->src)) {
iter->clips = NULL;
iter->num_clips = 0;
iter->full_update = true;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index f291fb4b359f..f4715a67e340 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -314,10 +314,8 @@ void drm_debugfs_dev_register(struct drm_device *dev)
drm_framebuffer_debugfs_init(dev);
drm_client_debugfs_init(dev);
}
- if (drm_drv_uses_atomic_modeset(dev)) {
+ if (drm_drv_uses_atomic_modeset(dev))
drm_atomic_debugfs_init(dev);
- drm_bridge_debugfs_init(dev);
- }
}
int drm_debugfs_register(struct drm_minor *minor, int minor_id,
@@ -589,4 +587,65 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
crtc->debugfs_entry = NULL;
}
+static int bridges_show(struct seq_file *m, void *data)
+{
+ struct drm_encoder *encoder = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs);
+ drm_printf(&p, "\ttype: [%d] %s\n",
+ bridge->type,
+ drm_get_connector_type_name(bridge->type));
+#ifdef CONFIG_OF
+ if (bridge->of_node)
+ drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node);
+#endif
+ drm_printf(&p, "\tops: [0x%x]", bridge->ops);
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ drm_puts(&p, " detect");
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ drm_puts(&p, " edid");
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_puts(&p, " hpd");
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ drm_puts(&p, " modes");
+ drm_puts(&p, "\n");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(bridges);
+
+void drm_debugfs_encoder_add(struct drm_encoder *encoder)
+{
+ struct drm_minor *minor = encoder->dev->primary;
+ struct dentry *root;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "encoder-%d", encoder->index);
+ if (!name)
+ return;
+
+ root = debugfs_create_dir(name, minor->debugfs_root);
+ kfree(name);
+
+ encoder->debugfs_entry = root;
+
+ /* bridges list */
+ debugfs_create_file("bridges", 0444, root, encoder,
+ &bridges_fops);
+
+ if (encoder->funcs && encoder->funcs->debugfs_init)
+ encoder->funcs->debugfs_init(encoder, root);
+}
+
+void drm_debugfs_encoder_remove(struct drm_encoder *encoder)
+{
+ debugfs_remove_recursive(encoder->debugfs_entry);
+ encoder->debugfs_entry = NULL;
+}
+
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
deleted file mode 100644
index eb6b741a6f99..000000000000
--- a/drivers/gpu/drm/drm_dma.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * \file drm_dma.c
- * DMA IOCTL and function support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-/**
- * drm_legacy_dma_setup() - Initialize the DMA data.
- *
- * @dev: DRM device.
- * Return: zero on success or a negative value on failure.
- *
- * Allocate and initialize a drm_device_dma structure.
- */
-int drm_legacy_dma_setup(struct drm_device *dev)
-{
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
- !drm_core_check_feature(dev, DRIVER_LEGACY))
- return 0;
-
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
- if (!dev->dma)
- return -ENOMEM;
-
- for (i = 0; i <= DRM_MAX_ORDER; i++)
- memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
-
- return 0;
-}
-
-/**
- * drm_legacy_dma_takedown() - Cleanup the DMA resources.
- *
- * @dev: DRM device.
- *
- * Free all pages associated with DMA buffers, the buffers and pages lists, and
- * finally the drm_device::dma structure itself.
- */
-void drm_legacy_dma_takedown(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_dma_handle_t *dmah;
- int i, j;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
- !drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- if (!dma)
- return;
-
- /* Clear dma buffers */
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].seg_count) {
- DRM_DEBUG("order %d: buf_count = %d,"
- " seg_count = %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].seg_count);
- for (j = 0; j < dma->bufs[i].seg_count; j++) {
- if (dma->bufs[i].seglist[j]) {
- dmah = dma->bufs[i].seglist[j];
- dma_free_coherent(dev->dev,
- dmah->size,
- dmah->vaddr,
- dmah->busaddr);
- kfree(dmah);
- }
- }
- kfree(dma->bufs[i].seglist);
- }
- if (dma->bufs[i].buf_count) {
- for (j = 0; j < dma->bufs[i].buf_count; j++) {
- kfree(dma->bufs[i].buflist[j].dev_private);
- }
- kfree(dma->bufs[i].buflist);
- }
- }
-
- kfree(dma->buflist);
- kfree(dma->pagelist);
- kfree(dev->dma);
- dev->dma = NULL;
-}
-
-/**
- * drm_legacy_free_buffer() - Free a buffer.
- *
- * @dev: DRM device.
- * @buf: buffer to free.
- *
- * Resets the fields of \p buf.
- */
-void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
-{
- if (!buf)
- return;
-
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
- buf->used = 0;
-}
-
-/**
- * drm_legacy_reclaim_buffers() - Reclaim the buffers.
- *
- * @dev: DRM device.
- * @file_priv: DRM file private.
- *
- * Frees each buffer associated with \p file_priv not already on the hardware.
- */
-void drm_legacy_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- for (i = 0; i < dma->buf_count; i++) {
- if (dma->buflist[i]->file_priv == file_priv) {
- switch (dma->buflist[i]->list) {
- case DRM_LIST_NONE:
- drm_legacy_free_buffer(dev, dma->buflist[i]);
- break;
- case DRM_LIST_WAIT:
- dma->buflist[i]->list = DRM_LIST_RECLAIM;
- break;
- default:
- /* Buffer already on hardware. */
- break;
- }
- }
- }
-}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 535f16e7882e..243cacb3575c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -48,7 +48,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
@@ -585,8 +584,6 @@ static void drm_fs_inode_free(struct inode *inode)
static void drm_dev_init_release(struct drm_device *dev, void *res)
{
- drm_legacy_ctxbitmap_cleanup(dev);
- drm_legacy_remove_map_hash(dev);
drm_fs_inode_free(dev->anon_inode);
put_device(dev->dev);
@@ -597,7 +594,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
- drm_legacy_destroy_members(dev);
}
static int drm_dev_init(struct drm_device *dev,
@@ -632,7 +628,6 @@ static int drm_dev_init(struct drm_device *dev,
return -EINVAL;
}
- drm_legacy_init_members(dev);
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
@@ -673,12 +668,6 @@ static int drm_dev_init(struct drm_device *dev,
goto err;
}
- ret = drm_legacy_create_map_hash(dev);
- if (ret)
- goto err;
-
- drm_legacy_ctxbitmap_init(dev);
-
if (drm_core_check_feature(dev, DRIVER_GEM)) {
ret = drm_gem_init(dev);
if (ret) {
@@ -949,8 +938,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors;
}
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_modeset_register_all(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_modeset_register_all(dev);
+ if (ret)
+ goto err_unload;
+ }
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor,
@@ -960,6 +952,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto out_unlock;
+err_unload:
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
err_minors:
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_ACCEL);
@@ -990,9 +985,6 @@ EXPORT_SYMBOL(drm_dev_register);
*/
void drm_dev_unregister(struct drm_device *dev)
{
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_lastclose(dev);
-
dev->registered = false;
drm_client_dev_unregister(dev);
@@ -1003,9 +995,6 @@ void drm_dev_unregister(struct drm_device *dev)
if (dev->driver->unload)
dev->driver->unload(dev);
- drm_legacy_pci_agp_destroy(dev);
- drm_legacy_rmmaps(dev);
-
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_ACCEL);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 39db08f803ea..cb4031d5dcbb 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -41,10 +41,12 @@
#include <drm/drm_displayid.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include <drm/drm_encoder.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
static int oui(u8 first, u8 second, u8 third)
{
@@ -5510,6 +5512,27 @@ static void clear_eld(struct drm_connector *connector)
}
/*
+ * Get 3-byte SAD buffer from struct cea_sad.
+ */
+void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad)
+{
+ sad[0] = cta_sad->format << 3 | cta_sad->channels;
+ sad[1] = cta_sad->freq;
+ sad[2] = cta_sad->byte2;
+}
+
+/*
+ * Set struct cea_sad from 3-byte SAD buffer.
+ */
+void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad)
+{
+ cta_sad->format = (sad[0] & 0x78) >> 3;
+ cta_sad->channels = sad[0] & 0x07;
+ cta_sad->freq = sad[1] & 0x7f;
+ cta_sad->byte2 = sad[2];
+}
+
+/*
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
* @drm_edid: EDID to parse
@@ -5593,7 +5616,7 @@ static void drm_edid_to_eld(struct drm_connector *connector,
}
static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
- struct cea_sad **sads)
+ struct cea_sad **psads)
{
const struct cea_db *db;
struct cea_db_iter iter;
@@ -5602,20 +5625,16 @@ static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
if (cea_db_tag(db) == CTA_DB_AUDIO) {
- int j;
+ struct cea_sad *sads;
+ int i;
count = cea_db_payload_len(db) / 3; /* SAD is 3B */
- *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
- if (!*sads)
+ sads = kcalloc(count, sizeof(*sads), GFP_KERNEL);
+ *psads = sads;
+ if (!sads)
return -ENOMEM;
- for (j = 0; j < count; j++) {
- const u8 *sad = &db->data[j * 3];
-
- (*sads)[j].format = (sad[0] & 0x78) >> 3;
- (*sads)[j].channels = sad[0] & 0x7;
- (*sads)[j].freq = sad[1] & 0x7F;
- (*sads)[j].byte2 = sad[2];
- }
+ for (i = 0; i < count; i++)
+ drm_edid_cta_sad_set(&sads[i], &db->data[i * 3]);
break;
}
}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 5d9ef267ebb3..60fcb80bce61 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -23,22 +23,6 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
-/* Use only for backward compatibility with drm_kms_helper.edid_firmware */
-int __drm_set_edid_firmware_path(const char *path)
-{
- scnprintf(edid_firmware, sizeof(edid_firmware), "%s", path);
-
- return 0;
-}
-EXPORT_SYMBOL(__drm_set_edid_firmware_path);
-
-/* Use only for backward compatibility with drm_kms_helper.edid_firmware */
-int __drm_get_edid_firmware_path(char *buf, size_t bufsize)
-{
- return scnprintf(buf, bufsize, "%s", edid_firmware);
-}
-EXPORT_SYMBOL(__drm_get_edid_firmware_path);
-
#define GENERIC_EDIDS 6
static const char * const generic_edid_name[GENERIC_EDIDS] = {
"edid/800x600.bin",
diff --git a/drivers/gpu/drm/drm_eld.c b/drivers/gpu/drm/drm_eld.c
new file mode 100644
index 000000000000..5177991aa272
--- /dev/null
+++ b/drivers/gpu/drm/drm_eld.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
+
+#include "drm_internal.h"
+
+/**
+ * drm_eld_sad_get - get SAD from ELD to struct cea_sad
+ * @eld: ELD buffer
+ * @sad_index: SAD index
+ * @cta_sad: destination struct cea_sad
+ *
+ * @return: 0 on success, or negative on errors
+ */
+int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad)
+{
+ const u8 *sad;
+
+ if (sad_index >= drm_eld_sad_count(eld))
+ return -EINVAL;
+
+ sad = eld + DRM_ELD_CEA_SAD(drm_eld_mnl(eld), sad_index);
+
+ drm_edid_cta_sad_set(cta_sad, sad);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_eld_sad_get);
+
+/**
+ * drm_eld_sad_set - set SAD to ELD from struct cea_sad
+ * @eld: ELD buffer
+ * @sad_index: SAD index
+ * @cta_sad: source struct cea_sad
+ *
+ * @return: 0 on success, or negative on errors
+ */
+int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad)
+{
+ u8 *sad;
+
+ if (sad_index >= drm_eld_sad_count(eld))
+ return -EINVAL;
+
+ sad = eld + DRM_ELD_CEA_SAD(drm_eld_mnl(eld), sad_index);
+
+ drm_edid_cta_sad_get(cta_sad, sad);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_eld_sad_set);
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 1143bc7f3252..8f2bc6a28482 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -30,6 +30,7 @@
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
/**
* DOC: overview
@@ -74,6 +75,8 @@ int drm_encoder_register_all(struct drm_device *dev)
int ret = 0;
drm_for_each_encoder(encoder, dev) {
+ drm_debugfs_encoder_add(encoder);
+
if (encoder->funcs && encoder->funcs->late_register)
ret = encoder->funcs->late_register(encoder);
if (ret)
@@ -90,6 +93,7 @@ void drm_encoder_unregister_all(struct drm_device *dev)
drm_for_each_encoder(encoder, dev) {
if (encoder->funcs && encoder->funcs->early_unregister)
encoder->funcs->early_unregister(encoder);
+ drm_debugfs_encoder_remove(encoder);
}
}
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 446458aca8e9..8c87287c3e16 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -47,7 +47,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -55,14 +54,6 @@ DEFINE_MUTEX(drm_global_mutex);
bool drm_dev_needs_global_mutex(struct drm_device *dev)
{
/*
- * Legacy drivers rely on all kinds of BKL locking semantics, don't
- * bother. They also still need BKL locking for their ioctls, so better
- * safe than sorry.
- */
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- return true;
-
- /*
* The deprecated ->load callback must be called after the driver is
* already registered. This means such drivers rely on the BKL to make
* sure an open can't proceed until the driver is actually fully set up.
@@ -107,9 +98,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
* drm_send_event() as the main starting points.
*
* The memory mapping implementation will vary depending on how the driver
- * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
- * function, modern drivers should use one of the provided memory-manager
- * specific implementations. For GEM-based drivers this is drm_gem_mmap().
+ * manages memory. For GEM-based drivers this is drm_gem_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
* following is an example &file_operations structure::
@@ -254,18 +243,6 @@ void drm_file_free(struct drm_file *file)
(long)old_encode_dev(file->minor->kdev->devt),
atomic_read(&dev->open_count));
-#ifdef CONFIG_DRM_LEGACY
- if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
- dev->driver->preclose)
- dev->driver->preclose(dev, file);
-#endif
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_legacy_lock_release(dev, file->filp);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- drm_legacy_reclaim_buffers(dev, file);
-
drm_events_release(file);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -279,8 +256,6 @@ void drm_file_free(struct drm_file *file)
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file);
- drm_legacy_ctxbitmap_flush(dev, file);
-
if (drm_is_primary_client(file))
drm_master_release(file);
@@ -367,29 +342,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->filelist_mutex);
-#ifdef CONFIG_DRM_LEGACY
-#ifdef __alpha__
- /*
- * Default the hose
- */
- if (!dev->hose) {
- struct pci_dev *pci_dev;
-
- pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
- if (pci_dev) {
- dev->hose = pci_dev->sysdata;
- pci_dev_put(pci_dev);
- }
- if (!dev->hose) {
- struct pci_bus *b = list_entry(pci_root_buses.next,
- struct pci_bus, node);
- if (b)
- dev->hose = b->sysdata;
- }
- }
-#endif
-#endif
-
return 0;
}
@@ -411,7 +363,6 @@ int drm_open(struct inode *inode, struct file *filp)
struct drm_device *dev;
struct drm_minor *minor;
int retcode;
- int need_setup = 0;
minor = drm_minor_acquire(iminor(inode));
if (IS_ERR(minor))
@@ -421,8 +372,7 @@ int drm_open(struct inode *inode, struct file *filp)
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
- if (!atomic_fetch_inc(&dev->open_count))
- need_setup = 1;
+ atomic_fetch_inc(&dev->open_count);
/* share address_space across all char-devs of a single device */
filp->f_mapping = dev->anon_inode->i_mapping;
@@ -430,13 +380,6 @@ int drm_open(struct inode *inode, struct file *filp)
retcode = drm_open_helper(filp, minor);
if (retcode)
goto err_undo;
- if (need_setup) {
- retcode = drm_legacy_setup(dev);
- if (retcode) {
- drm_close_helper(filp);
- goto err_undo;
- }
- }
if (drm_dev_needs_global_mutex(dev))
mutex_unlock(&drm_global_mutex);
@@ -460,9 +403,6 @@ void drm_lastclose(struct drm_device * dev)
dev->driver->lastclose(dev);
drm_dbg_core(dev, "driver lastclose completed\n");
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_legacy_dev_reinit(dev);
-
drm_client_dev_restore(dev);
}
@@ -913,7 +853,7 @@ static void print_size(struct drm_printer *p, const char *stat,
unsigned u;
for (u = 0; u < ARRAY_SIZE(units) - 1; u++) {
- if (sz < SZ_1K)
+ if (sz == 0 || !IS_ALIGNED(sz, SZ_1K))
break;
sz = div_u64(sz, SZ_1K);
}
@@ -958,7 +898,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
{
struct drm_gem_object *obj;
struct drm_memory_stats status = {};
- enum drm_gem_object_status supported_status;
+ enum drm_gem_object_status supported_status = 0;
int id;
spin_lock(&file->table_lock);
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index 060b753881a2..8c6090a90d56 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -27,14 +27,12 @@
#include <drm/drm_print.h>
#include <drm/drm_util.h>
-/**
- * drm_flip_work_allocate_task - allocate a flip-work task
- * @data: data associated to the task
- * @flags: allocator flags
- *
- * Allocate a drm_flip_task object and attach private data to it.
- */
-struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
+struct drm_flip_task {
+ struct list_head node;
+ void *data;
+};
+
+static struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
{
struct drm_flip_task *task;
@@ -44,18 +42,8 @@ struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
return task;
}
-EXPORT_SYMBOL(drm_flip_work_allocate_task);
-/**
- * drm_flip_work_queue_task - queue a specific task
- * @work: the flip-work
- * @task: the task to handle
- *
- * Queues task, that will later be run (passed back to drm_flip_func_t
- * func) on a work queue after drm_flip_work_commit() is called.
- */
-void drm_flip_work_queue_task(struct drm_flip_work *work,
- struct drm_flip_task *task)
+static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task)
{
unsigned long flags;
@@ -63,7 +51,6 @@ void drm_flip_work_queue_task(struct drm_flip_work *work,
list_add_tail(&task->node, &work->queued);
spin_unlock_irqrestore(&work->lock, flags);
}
-EXPORT_SYMBOL(drm_flip_work_queue_task);
/**
* drm_flip_work_queue - queue work
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index f93a4efcee90..b1be458ed4dd 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -20,6 +20,97 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
+/**
+ * drm_format_conv_state_init - Initialize format-conversion state
+ * @state: The state to initialize
+ *
+ * Clears all fields in struct drm_format_conv_state. The state will
+ * be empty with no preallocated resources.
+ */
+void drm_format_conv_state_init(struct drm_format_conv_state *state)
+{
+ state->tmp.mem = NULL;
+ state->tmp.size = 0;
+ state->tmp.preallocated = false;
+}
+EXPORT_SYMBOL(drm_format_conv_state_init);
+
+/**
+ * drm_format_conv_state_copy - Copy format-conversion state
+ * @state: Destination state
+ * @old_state: Source state
+ *
+ * Copies format-conversion state from @old_state to @state; except for
+ * temporary storage.
+ */
+void drm_format_conv_state_copy(struct drm_format_conv_state *state,
+ const struct drm_format_conv_state *old_state)
+{
+ /*
+ * So far, there's only temporary storage here, which we don't
+ * duplicate. Just clear the fields.
+ */
+ state->tmp.mem = NULL;
+ state->tmp.size = 0;
+ state->tmp.preallocated = false;
+}
+EXPORT_SYMBOL(drm_format_conv_state_copy);
+
+/**
+ * drm_format_conv_state_reserve - Allocates storage for format conversion
+ * @state: The format-conversion state
+ * @new_size: The minimum allocation size
+ * @flags: Flags for kmalloc()
+ *
+ * Allocates at least @new_size bytes and returns a pointer to the memory
+ * range. After calling this function, previously returned memory blocks
+ * are invalid. It's best to collect all memory requirements of a format
+ * conversion and call this function once to allocate the range.
+ *
+ * Returns:
+ * A pointer to the allocated memory range, or NULL otherwise.
+ */
+void *drm_format_conv_state_reserve(struct drm_format_conv_state *state,
+ size_t new_size, gfp_t flags)
+{
+ void *mem;
+
+ if (new_size <= state->tmp.size)
+ goto out;
+ else if (state->tmp.preallocated)
+ return NULL;
+
+ mem = krealloc(state->tmp.mem, new_size, flags);
+ if (!mem)
+ return NULL;
+
+ state->tmp.mem = mem;
+ state->tmp.size = new_size;
+
+out:
+ return state->tmp.mem;
+}
+EXPORT_SYMBOL(drm_format_conv_state_reserve);
+
+/**
+ * drm_format_conv_state_release - Releases an format-conversion storage
+ * @state: The format-conversion state
+ *
+ * Releases the memory range references by the format-conversion state.
+ * After this call, all pointers to the memory are invalid. Prefer
+ * drm_format_conv_state_init() for cleaning up and unloading a driver.
+ */
+void drm_format_conv_state_release(struct drm_format_conv_state *state)
+{
+ if (state->tmp.preallocated)
+ return;
+
+ kfree(state->tmp.mem);
+ state->tmp.mem = NULL;
+ state->tmp.size = 0;
+}
+EXPORT_SYMBOL(drm_format_conv_state_release);
+
static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp)
{
return clip->y1 * pitch + clip->x1 * cpp;
@@ -45,6 +136,7 @@ EXPORT_SYMBOL(drm_fb_clip_offset);
static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
const void *vaddr, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool vaddr_cached_hint,
+ struct drm_format_conv_state *state,
void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
unsigned long linepixels = drm_rect_width(clip);
@@ -60,7 +152,7 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p
* one line at a time.
*/
if (!vaddr_cached_hint) {
- stmp = kmalloc(sbuf_len, GFP_KERNEL);
+ stmp = drm_format_conv_state_reserve(state, sbuf_len, GFP_KERNEL);
if (!stmp)
return -ENOMEM;
}
@@ -79,8 +171,6 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p
dst += dst_pitch;
}
- kfree(stmp);
-
return 0;
}
@@ -88,6 +178,7 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p
static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
const void *vaddr, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool vaddr_cached_hint,
+ struct drm_format_conv_state *state,
void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
unsigned long linepixels = drm_rect_width(clip);
@@ -101,9 +192,9 @@ static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsign
void *dbuf;
if (vaddr_cached_hint) {
- dbuf = kmalloc(dbuf_len, GFP_KERNEL);
+ dbuf = drm_format_conv_state_reserve(state, dbuf_len, GFP_KERNEL);
} else {
- dbuf = kmalloc(stmp_off + sbuf_len, GFP_KERNEL);
+ dbuf = drm_format_conv_state_reserve(state, stmp_off + sbuf_len, GFP_KERNEL);
stmp = dbuf + stmp_off;
}
if (!dbuf)
@@ -124,8 +215,6 @@ static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsign
dst += dst_pitch;
}
- kfree(dbuf);
-
return 0;
}
@@ -134,6 +223,7 @@ static int drm_fb_xfrm(struct iosys_map *dst,
const unsigned int *dst_pitch, const u8 *dst_pixsize,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool vaddr_cached_hint,
+ struct drm_format_conv_state *state,
void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
@@ -146,10 +236,12 @@ static int drm_fb_xfrm(struct iosys_map *dst,
/* TODO: handle src in I/O memory here */
if (dst[0].is_iomem)
return __drm_fb_xfrm_toio(dst[0].vaddr_iomem, dst_pitch[0], dst_pixsize[0],
- src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line);
+ src[0].vaddr, fb, clip, vaddr_cached_hint, state,
+ xfrm_line);
else
return __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0],
- src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line);
+ src[0].vaddr, fb, clip, vaddr_cached_hint, state,
+ xfrm_line);
}
/**
@@ -235,6 +327,7 @@ static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @cached: Source buffer is mapped cached (eg. not write-combined)
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and swaps per-pixel
* bytes during the process. Destination and framebuffer formats must match. The
@@ -249,7 +342,8 @@ static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels
*/
void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool cached)
+ const struct drm_rect *clip, bool cached,
+ struct drm_format_conv_state *state)
{
const struct drm_format_info *format = fb->format;
u8 cpp = DIV_ROUND_UP(drm_format_info_bpp(format, 0), 8);
@@ -268,7 +362,7 @@ void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
return;
}
- drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, swab_line);
+ drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, state, swab_line);
}
EXPORT_SYMBOL(drm_fb_swab);
@@ -295,6 +389,7 @@ static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigne
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -309,13 +404,13 @@ static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigne
*/
void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
1,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_rgb332_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
@@ -364,6 +459,7 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
* @swab: Swap bytes
*
* This function copies parts of a framebuffer to display memory and converts the
@@ -379,7 +475,8 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
*/
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool swab)
+ const struct drm_rect *clip, struct drm_format_conv_state *state,
+ bool swab)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
@@ -392,7 +489,7 @@ void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pi
else
xfrm_line = drm_fb_xrgb8888_to_rgb565_line;
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, xfrm_line);
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, xfrm_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
@@ -421,6 +518,7 @@ static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsig
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
@@ -436,13 +534,13 @@ static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsig
*/
void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xrgb1555_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
@@ -473,6 +571,7 @@ static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsig
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
@@ -488,13 +587,13 @@ static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsig
*/
void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_argb1555_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
@@ -525,6 +624,7 @@ static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsig
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
@@ -540,13 +640,13 @@ static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsig
*/
void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_rgba5551_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
@@ -575,6 +675,7 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -590,13 +691,13 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne
*/
void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
3,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_rgb888_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
@@ -623,6 +724,7 @@ static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsig
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. The parameters @dst, @dst_pitch and @src refer
@@ -638,13 +740,13 @@ static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsig
*/
void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_argb8888_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
@@ -669,13 +771,14 @@ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsig
static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src,
const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_abgr8888_line);
}
@@ -699,13 +802,14 @@ static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsig
static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src,
const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xbgr8888_line);
}
@@ -735,6 +839,7 @@ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, un
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -750,13 +855,14 @@ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, un
*/
void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xrgb2101010_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
@@ -788,6 +894,7 @@ static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, un
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
@@ -803,13 +910,14 @@ static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, un
*/
void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_argb2101010_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
@@ -839,6 +947,7 @@ static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -858,13 +967,13 @@ static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned
*/
void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
1,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_gray8_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
@@ -878,6 +987,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
* @src: The framebuffer memory to copy from
* @fb: The framebuffer to copy from
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory. If the
* formats of the display and the framebuffer mismatch, the blit function
@@ -896,7 +1006,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
*/
int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
uint32_t fb_format = fb->format->format;
@@ -904,44 +1014,44 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
return 0;
} else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
return 0;
} else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
return 0;
} else if (fb_format == DRM_FORMAT_XRGB8888) {
if (dst_format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false);
+ drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state, false);
return 0;
} else if (dst_format == DRM_FORMAT_XRGB1555) {
- drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_ARGB1555) {
- drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_RGBA5551) {
- drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_ARGB8888) {
- drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_XBGR8888) {
- drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_ABGR8888) {
- drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_XRGB2101010) {
- drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_ARGB2101010) {
- drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_BGRX8888) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
return 0;
}
}
@@ -978,6 +1088,7 @@ static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -1002,7 +1113,7 @@ static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int
*/
void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
0, 0, 0, 0
@@ -1042,7 +1153,7 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
* Allocate a buffer to be used for both copying from the cma
* memory and to store the intermediate grayscale line pixels.
*/
- src32 = kmalloc(len_src32 + linepixels, GFP_KERNEL);
+ src32 = drm_format_conv_state_reserve(state, len_src32 + linepixels, GFP_KERNEL);
if (!src32)
return;
@@ -1056,8 +1167,6 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
vaddr += fb->pitches[0];
mono += dst_pitch_0;
}
-
- kfree(src32);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index d3ba0698b84b..3cc0ffc28e86 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -394,6 +394,31 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w)
}
}
+static int drm_mode_closefb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv)
+{
+ struct drm_framebuffer *fbl;
+ bool found = false;
+
+ mutex_lock(&file_priv->fbs_lock);
+ list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+ if (fb == fbl)
+ found = true;
+
+ if (!found) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -ENOENT;
+ }
+
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ /* Drop the reference that was stored in the fbs list */
+ drm_framebuffer_put(fb);
+
+ return 0;
+}
+
/**
* drm_mode_rmfb - remove an FB from the configuration
* @dev: drm device
@@ -410,9 +435,8 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w)
int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
struct drm_file *file_priv)
{
- struct drm_framebuffer *fb = NULL;
- struct drm_framebuffer *fbl = NULL;
- int found = 0;
+ struct drm_framebuffer *fb;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
@@ -421,24 +445,13 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
if (!fb)
return -ENOENT;
- mutex_lock(&file_priv->fbs_lock);
- list_for_each_entry(fbl, &file_priv->fbs, filp_head)
- if (fb == fbl)
- found = 1;
- if (!found) {
- mutex_unlock(&file_priv->fbs_lock);
- goto fail_unref;
+ ret = drm_mode_closefb(fb, file_priv);
+ if (ret != 0) {
+ drm_framebuffer_put(fb);
+ return ret;
}
- list_del_init(&fb->filp_head);
- mutex_unlock(&file_priv->fbs_lock);
-
- /* drop the reference we picked up in framebuffer lookup */
- drm_framebuffer_put(fb);
-
/*
- * we now own the reference that was stored in the fbs list
- *
* drm_framebuffer_remove may fail with -EINTR on pending signals,
* so run this in a separate stack as there's no way to correctly
* handle this after the fb is already removed from the lookup table.
@@ -457,10 +470,6 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
drm_framebuffer_put(fb);
return 0;
-
-fail_unref:
- drm_framebuffer_put(fb);
- return -ENOENT;
}
int drm_mode_rmfb_ioctl(struct drm_device *dev,
@@ -471,6 +480,28 @@ int drm_mode_rmfb_ioctl(struct drm_device *dev,
return drm_mode_rmfb(dev, *fb_id, file_priv);
}
+int drm_mode_closefb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_closefb *r = data;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EOPNOTSUPP;
+
+ if (r->pad)
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
+ if (!fb)
+ return -ENOENT;
+
+ ret = drm_mode_closefb(fb, file_priv);
+ drm_framebuffer_put(fb);
+ return ret;
+}
+
/**
* drm_mode_getfb - get FB info
* @dev: drm device for the ioctl
@@ -552,7 +583,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r = data;
struct drm_framebuffer *fb;
unsigned int i;
- int ret;
+ int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 5d4b9cd077f7..e440f458b663 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -218,7 +218,14 @@ void
__drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
struct drm_shadow_plane_state *new_shadow_plane_state)
{
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_shadow_plane_state *shadow_plane_state =
+ to_drm_shadow_plane_state(plane_state);
+
__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
+
+ drm_format_conv_state_copy(&shadow_plane_state->fmtcnv_state,
+ &new_shadow_plane_state->fmtcnv_state);
}
EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
@@ -266,6 +273,7 @@ EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state);
*/
void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state)
{
+ drm_format_conv_state_release(&shadow_plane_state->fmtcnv_state);
__drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base);
}
EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state);
@@ -302,6 +310,7 @@ void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state)
{
__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
+ drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
}
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 08c088319652..9c0922c1a5a2 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -61,6 +61,42 @@
* contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva
* entries from within dma-fence signalling critical sections it is enough to
* pre-allocate the &drm_gpuva structures.
+ *
+ * &drm_gem_objects which are private to a single VM can share a common
+ * &dma_resv in order to improve locking efficiency (e.g. with &drm_exec).
+ * For this purpose drivers must pass a &drm_gem_object to drm_gpuvm_init(), in
+ * the following called 'resv object', which serves as the container of the
+ * GPUVM's shared &dma_resv. This resv object can be a driver specific
+ * &drm_gem_object, such as the &drm_gem_object containing the root page table,
+ * but it can also be a 'dummy' object, which can be allocated with
+ * drm_gpuvm_resv_object_alloc().
+ *
+ * In order to connect a struct drm_gpuva its backing &drm_gem_object each
+ * &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
+ * &drm_gpuvm_bo contains a list of &drm_gpuva structures.
+ *
+ * A &drm_gpuvm_bo is an abstraction that represents a combination of a
+ * &drm_gpuvm and a &drm_gem_object. Every such combination should be unique.
+ * This is ensured by the API through drm_gpuvm_bo_obtain() and
+ * drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
+ * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
+ * particular combination. If not existent a new instance is created and linked
+ * to the &drm_gem_object.
+ *
+ * &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
+ * as entry for the &drm_gpuvm's lists of external and evicted objects. Those
+ * lists are maintained in order to accelerate locking of dma-resv locks and
+ * validation of evicted objects bound in a &drm_gpuvm. For instance, all
+ * &drm_gem_object's &dma_resv of a given &drm_gpuvm can be locked by calling
+ * drm_gpuvm_exec_lock(). Once locked drivers can call drm_gpuvm_validate() in
+ * order to validate all evicted &drm_gem_objects. It is also possible to lock
+ * additional &drm_gem_objects by providing the corresponding parameters to
+ * drm_gpuvm_exec_lock() as well as open code the &drm_exec loop while making
+ * use of helper functions such as drm_gpuvm_prepare_range() or
+ * drm_gpuvm_prepare_objects().
+ *
+ * Every bound &drm_gem_object is treated as external object when its &dma_resv
+ * structure is different than the &drm_gpuvm's common &dma_resv structure.
*/
/**
@@ -386,21 +422,42 @@
/**
* DOC: Locking
*
- * Generally, the GPU VA manager does not take care of locking itself, it is
- * the drivers responsibility to take care about locking. Drivers might want to
- * protect the following operations: inserting, removing and iterating
- * &drm_gpuva objects as well as generating all kinds of operations, such as
- * split / merge or prefetch.
- *
- * The GPU VA manager also does not take care of the locking of the backing
- * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to
- * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively
- * a driver specific external lock. For the latter see also
- * drm_gem_gpuva_set_lock().
- *
- * However, the GPU VA manager contains lockdep checks to ensure callers of its
- * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is
- * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink().
+ * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
+ * locking itself, it is the drivers responsibility to take care about locking.
+ * Drivers might want to protect the following operations: inserting, removing
+ * and iterating &drm_gpuva objects as well as generating all kinds of
+ * operations, such as split / merge or prefetch.
+ *
+ * DRM GPUVM also does not take care of the locking of the backing
+ * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
+ * itself; drivers are responsible to enforce mutual exclusion using either the
+ * GEMs dma_resv lock or alternatively a driver specific external lock. For the
+ * latter see also drm_gem_gpuva_set_lock().
+ *
+ * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
+ * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
+ * by functions such as drm_gpuva_link() or drm_gpuva_unlink(), but also
+ * drm_gpuvm_bo_obtain() and drm_gpuvm_bo_put().
+ *
+ * The latter is required since on creation and destruction of a &drm_gpuvm_bo
+ * the &drm_gpuvm_bo is attached / removed from the &drm_gem_objects gpuva list.
+ * Subsequent calls to drm_gpuvm_bo_obtain() for the same &drm_gpuvm and
+ * &drm_gem_object must be able to observe previous creations and destructions
+ * of &drm_gpuvm_bos in order to keep instances unique.
+ *
+ * The &drm_gpuvm's lists for keeping track of external and evicted objects are
+ * protected against concurrent insertion / removal and iteration internally.
+ *
+ * However, drivers still need ensure to protect concurrent calls to functions
+ * iterating those lists, namely drm_gpuvm_prepare_objects() and
+ * drm_gpuvm_validate().
+ *
+ * Alternatively, drivers can set the &DRM_GPUVM_RESV_PROTECTED flag to indicate
+ * that the corresponding &dma_resv locks are held in order to protect the
+ * lists. If &DRM_GPUVM_RESV_PROTECTED is set, internal locking is disabled and
+ * the corresponding lockdep checks are enabled. This is an optimization for
+ * drivers which are capable of taking the corresponding &dma_resv locks and
+ * hence do not require internal locking.
*/
/**
@@ -430,6 +487,7 @@
* {
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
+ * struct drm_gpuvm_bo *vm_bo;
*
* driver_lock_va_space();
* ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
@@ -437,6 +495,10 @@
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
+ * vm_bo = drm_gpuvm_bo_obtain(gpuvm, obj);
+ * if (IS_ERR(vm_bo))
+ * return PTR_ERR(vm_bo);
+ *
* drm_gpuva_for_each_op(op, ops) {
* struct drm_gpuva *va;
*
@@ -449,7 +511,7 @@
*
* driver_vm_map();
* drm_gpuva_map(gpuvm, va, &op->map);
- * drm_gpuva_link(va);
+ * drm_gpuva_link(va, vm_bo);
*
* break;
* case DRM_GPUVA_OP_REMAP: {
@@ -476,11 +538,11 @@
* driver_vm_remap();
* drm_gpuva_remap(prev, next, &op->remap);
*
- * drm_gpuva_unlink(va);
* if (prev)
- * drm_gpuva_link(prev);
+ * drm_gpuva_link(prev, va->vm_bo);
* if (next)
- * drm_gpuva_link(next);
+ * drm_gpuva_link(next, va->vm_bo);
+ * drm_gpuva_unlink(va);
*
* break;
* }
@@ -496,6 +558,7 @@
* break;
* }
* }
+ * drm_gpuvm_bo_put(vm_bo);
* driver_unlock_va_space();
*
* return 0;
@@ -505,6 +568,7 @@
*
* struct driver_context {
* struct drm_gpuvm *gpuvm;
+ * struct drm_gpuvm_bo *vm_bo;
* struct drm_gpuva *new_va;
* struct drm_gpuva *prev_va;
* struct drm_gpuva *next_va;
@@ -525,6 +589,7 @@
* struct drm_gem_object *obj, u64 offset)
* {
* struct driver_context ctx;
+ * struct drm_gpuvm_bo *vm_bo;
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op;
* int ret = 0;
@@ -534,16 +599,23 @@
* ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
* ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
* ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
- * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) {
+ * ctx.vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
+ * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va || !vm_bo) {
* ret = -ENOMEM;
* goto out;
* }
*
+ * // Typically protected with a driver specific GEM gpuva lock
+ * // used in the fence signaling path for drm_gpuva_link() and
+ * // drm_gpuva_unlink(), hence pre-allocate.
+ * ctx.vm_bo = drm_gpuvm_bo_obtain_prealloc(ctx.vm_bo);
+ *
* driver_lock_va_space();
* ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset);
* driver_unlock_va_space();
*
* out:
+ * drm_gpuvm_bo_put(ctx.vm_bo);
* kfree(ctx.new_va);
* kfree(ctx.prev_va);
* kfree(ctx.next_va);
@@ -556,7 +628,7 @@
*
* drm_gpuva_map(ctx->vm, ctx->new_va, &op->map);
*
- * drm_gpuva_link(ctx->new_va);
+ * drm_gpuva_link(ctx->new_va, ctx->vm_bo);
*
* // prevent the new GPUVA from being freed in
* // driver_mapping_create()
@@ -568,22 +640,23 @@
* int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
* {
* struct driver_context *ctx = __ctx;
+ * struct drm_gpuva *va = op->remap.unmap->va;
*
* drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
*
- * drm_gpuva_unlink(op->remap.unmap->va);
- * kfree(op->remap.unmap->va);
- *
* if (op->remap.prev) {
- * drm_gpuva_link(ctx->prev_va);
+ * drm_gpuva_link(ctx->prev_va, va->vm_bo);
* ctx->prev_va = NULL;
* }
*
* if (op->remap.next) {
- * drm_gpuva_link(ctx->next_va);
+ * drm_gpuva_link(ctx->next_va, va->vm_bo);
* ctx->next_va = NULL;
* }
*
+ * drm_gpuva_unlink(va);
+ * kfree(va);
+ *
* return 0;
* }
*
@@ -597,6 +670,201 @@
* }
*/
+/**
+ * get_next_vm_bo_from_list() - get the next vm_bo element
+ * @__gpuvm: the &drm_gpuvm
+ * @__list_name: the name of the list we're iterating on
+ * @__local_list: a pointer to the local list used to store already iterated items
+ * @__prev_vm_bo: the previous element we got from get_next_vm_bo_from_list()
+ *
+ * This helper is here to provide lockless list iteration. Lockless as in, the
+ * iterator releases the lock immediately after picking the first element from
+ * the list, so list insertion deletion can happen concurrently.
+ *
+ * Elements popped from the original list are kept in a local list, so removal
+ * and is_empty checks can still happen while we're iterating the list.
+ */
+#define get_next_vm_bo_from_list(__gpuvm, __list_name, __local_list, __prev_vm_bo) \
+ ({ \
+ struct drm_gpuvm_bo *__vm_bo = NULL; \
+ \
+ drm_gpuvm_bo_put(__prev_vm_bo); \
+ \
+ spin_lock(&(__gpuvm)->__list_name.lock); \
+ if (!(__gpuvm)->__list_name.local_list) \
+ (__gpuvm)->__list_name.local_list = __local_list; \
+ else \
+ drm_WARN_ON((__gpuvm)->drm, \
+ (__gpuvm)->__list_name.local_list != __local_list); \
+ \
+ while (!list_empty(&(__gpuvm)->__list_name.list)) { \
+ __vm_bo = list_first_entry(&(__gpuvm)->__list_name.list, \
+ struct drm_gpuvm_bo, \
+ list.entry.__list_name); \
+ if (kref_get_unless_zero(&__vm_bo->kref)) { \
+ list_move_tail(&(__vm_bo)->list.entry.__list_name, \
+ __local_list); \
+ break; \
+ } else { \
+ list_del_init(&(__vm_bo)->list.entry.__list_name); \
+ __vm_bo = NULL; \
+ } \
+ } \
+ spin_unlock(&(__gpuvm)->__list_name.lock); \
+ \
+ __vm_bo; \
+ })
+
+/**
+ * for_each_vm_bo_in_list() - internal vm_bo list iterator
+ * @__gpuvm: the &drm_gpuvm
+ * @__list_name: the name of the list we're iterating on
+ * @__local_list: a pointer to the local list used to store already iterated items
+ * @__vm_bo: the struct drm_gpuvm_bo to assign in each iteration step
+ *
+ * This helper is here to provide lockless list iteration. Lockless as in, the
+ * iterator releases the lock immediately after picking the first element from the
+ * list, hence list insertion and deletion can happen concurrently.
+ *
+ * It is not allowed to re-assign the vm_bo pointer from inside this loop.
+ *
+ * Typical use:
+ *
+ * struct drm_gpuvm_bo *vm_bo;
+ * LIST_HEAD(my_local_list);
+ *
+ * ret = 0;
+ * for_each_vm_bo_in_list(gpuvm, <list_name>, &my_local_list, vm_bo) {
+ * ret = do_something_with_vm_bo(..., vm_bo);
+ * if (ret)
+ * break;
+ * }
+ * // Drop ref in case we break out of the loop.
+ * drm_gpuvm_bo_put(vm_bo);
+ * restore_vm_bo_list(gpuvm, <list_name>, &my_local_list);
+ *
+ *
+ * Only used for internal list iterations, not meant to be exposed to the outside
+ * world.
+ */
+#define for_each_vm_bo_in_list(__gpuvm, __list_name, __local_list, __vm_bo) \
+ for (__vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \
+ __local_list, NULL); \
+ __vm_bo; \
+ __vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \
+ __local_list, __vm_bo))
+
+static void
+__restore_vm_bo_list(struct drm_gpuvm *gpuvm, spinlock_t *lock,
+ struct list_head *list, struct list_head **local_list)
+{
+ /* Merge back the two lists, moving local list elements to the
+ * head to preserve previous ordering, in case it matters.
+ */
+ spin_lock(lock);
+ if (*local_list) {
+ list_splice(*local_list, list);
+ *local_list = NULL;
+ }
+ spin_unlock(lock);
+}
+
+/**
+ * restore_vm_bo_list() - move vm_bo elements back to their original list
+ * @__gpuvm: the &drm_gpuvm
+ * @__list_name: the name of the list we're iterating on
+ *
+ * When we're done iterating a vm_bo list, we should call restore_vm_bo_list()
+ * to restore the original state and let new iterations take place.
+ */
+#define restore_vm_bo_list(__gpuvm, __list_name) \
+ __restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock, \
+ &(__gpuvm)->__list_name.list, \
+ &(__gpuvm)->__list_name.local_list)
+
+static void
+cond_spin_lock(spinlock_t *lock, bool cond)
+{
+ if (cond)
+ spin_lock(lock);
+}
+
+static void
+cond_spin_unlock(spinlock_t *lock, bool cond)
+{
+ if (cond)
+ spin_unlock(lock);
+}
+
+static void
+__drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
+ struct list_head *entry, struct list_head *list)
+{
+ cond_spin_lock(lock, !!lock);
+ if (list_empty(entry))
+ list_add_tail(entry, list);
+ cond_spin_unlock(lock, !!lock);
+}
+
+/**
+ * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
+ * @__vm_bo: the &drm_gpuvm_bo
+ * @__list_name: the name of the list to insert into
+ * @__lock: whether to lock with the internal spinlock
+ *
+ * Inserts the given @__vm_bo into the list specified by @__list_name.
+ */
+#define drm_gpuvm_bo_list_add(__vm_bo, __list_name, __lock) \
+ __drm_gpuvm_bo_list_add((__vm_bo)->vm, \
+ __lock ? &(__vm_bo)->vm->__list_name.lock : \
+ NULL, \
+ &(__vm_bo)->list.entry.__list_name, \
+ &(__vm_bo)->vm->__list_name.list)
+
+static void
+__drm_gpuvm_bo_list_del(struct drm_gpuvm *gpuvm, spinlock_t *lock,
+ struct list_head *entry, bool init)
+{
+ cond_spin_lock(lock, !!lock);
+ if (init) {
+ if (!list_empty(entry))
+ list_del_init(entry);
+ } else {
+ list_del(entry);
+ }
+ cond_spin_unlock(lock, !!lock);
+}
+
+/**
+ * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list
+ * @__vm_bo: the &drm_gpuvm_bo
+ * @__list_name: the name of the list to insert into
+ * @__lock: whether to lock with the internal spinlock
+ *
+ * Removes the given @__vm_bo from the list specified by @__list_name.
+ */
+#define drm_gpuvm_bo_list_del_init(__vm_bo, __list_name, __lock) \
+ __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
+ __lock ? &(__vm_bo)->vm->__list_name.lock : \
+ NULL, \
+ &(__vm_bo)->list.entry.__list_name, \
+ true)
+
+/**
+ * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list
+ * @__vm_bo: the &drm_gpuvm_bo
+ * @__list_name: the name of the list to insert into
+ * @__lock: whether to lock with the internal spinlock
+ *
+ * Removes the given @__vm_bo from the list specified by @__list_name.
+ */
+#define drm_gpuvm_bo_list_del(__vm_bo, __list_name, __lock) \
+ __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
+ __lock ? &(__vm_bo)->vm->__list_name.lock : \
+ NULL, \
+ &(__vm_bo)->list.entry.__list_name, \
+ false)
+
#define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node)
#define GPUVA_START(node) ((node)->va.addr)
@@ -618,8 +886,14 @@ drm_gpuvm_check_overflow(u64 addr, u64 range)
{
u64 end;
- return WARN(check_add_overflow(addr, range, &end),
- "GPUVA address limited to %zu bytes.\n", sizeof(end));
+ return check_add_overflow(addr, range, &end);
+}
+
+static bool
+drm_gpuvm_warn_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
+{
+ return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range),
+ "GPUVA address limited to %zu bytes.\n", sizeof(addr));
}
static bool
@@ -643,7 +917,18 @@ drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
return krange && addr < kend && kstart < end;
}
-static bool
+/**
+ * drm_gpuvm_range_valid() - checks whether the given range is valid for the
+ * given &drm_gpuvm
+ * @gpuvm: the GPUVM to check the range for
+ * @addr: the base address
+ * @range: the range starting from the base address
+ *
+ * Checks whether the range is within the GPUVM's managed boundaries.
+ *
+ * Returns: true for a valid range, false otherwise
+ */
+bool
drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
u64 addr, u64 range)
{
@@ -651,11 +936,52 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
!drm_gpuvm_in_kernel_node(gpuvm, addr, range);
}
+EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid);
+
+static void
+drm_gpuvm_gem_object_free(struct drm_gem_object *obj)
+{
+ drm_gem_object_release(obj);
+ kfree(obj);
+}
+
+static const struct drm_gem_object_funcs drm_gpuvm_object_funcs = {
+ .free = drm_gpuvm_gem_object_free,
+};
+
+/**
+ * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object
+ * @drm: the drivers &drm_device
+ *
+ * Allocates a dummy &drm_gem_object which can be passed to drm_gpuvm_init() in
+ * order to serve as root GEM object providing the &drm_resv shared across
+ * &drm_gem_objects local to a single GPUVM.
+ *
+ * Returns: the &drm_gem_object on success, NULL on failure
+ */
+struct drm_gem_object *
+drm_gpuvm_resv_object_alloc(struct drm_device *drm)
+{
+ struct drm_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ obj->funcs = &drm_gpuvm_object_funcs;
+ drm_gem_private_object_init(drm, obj, 0);
+
+ return obj;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc);
/**
* drm_gpuvm_init() - initialize a &drm_gpuvm
* @gpuvm: pointer to the &drm_gpuvm to initialize
* @name: the name of the GPU VA space
+ * @flags: the &drm_gpuvm_flags for this GPUVM
+ * @drm: the &drm_device this VM resides in
+ * @r_obj: the resv &drm_gem_object providing the GPUVM's common &dma_resv
* @start_offset: the start offset of the GPU VA space
* @range: the size of the GPU VA space
* @reserve_offset: the start of the kernel reserved GPU VA area
@@ -668,8 +994,10 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
* &name is expected to be managed by the surrounding driver structures.
*/
void
-drm_gpuvm_init(struct drm_gpuvm *gpuvm,
- const char *name,
+drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+ enum drm_gpuvm_flags flags,
+ struct drm_device *drm,
+ struct drm_gem_object *r_obj,
u64 start_offset, u64 range,
u64 reserve_offset, u64 reserve_range,
const struct drm_gpuvm_ops *ops)
@@ -677,45 +1005,713 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm,
gpuvm->rb.tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpuvm->rb.list);
- drm_gpuvm_check_overflow(start_offset, range);
- gpuvm->mm_start = start_offset;
- gpuvm->mm_range = range;
+ INIT_LIST_HEAD(&gpuvm->extobj.list);
+ spin_lock_init(&gpuvm->extobj.lock);
+
+ INIT_LIST_HEAD(&gpuvm->evict.list);
+ spin_lock_init(&gpuvm->evict.lock);
+
+ kref_init(&gpuvm->kref);
gpuvm->name = name ? name : "unknown";
+ gpuvm->flags = flags;
gpuvm->ops = ops;
+ gpuvm->drm = drm;
+ gpuvm->r_obj = r_obj;
- memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
+ drm_gem_object_get(r_obj);
+
+ drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range);
+ gpuvm->mm_start = start_offset;
+ gpuvm->mm_range = range;
+ memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
if (reserve_range) {
gpuvm->kernel_alloc_node.va.addr = reserve_offset;
gpuvm->kernel_alloc_node.va.range = reserve_range;
- if (likely(!drm_gpuvm_check_overflow(reserve_offset,
- reserve_range)))
+ if (likely(!drm_gpuvm_warn_check_overflow(gpuvm, reserve_offset,
+ reserve_range)))
__drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node);
}
}
EXPORT_SYMBOL_GPL(drm_gpuvm_init);
+static void
+drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
+{
+ gpuvm->name = NULL;
+
+ if (gpuvm->kernel_alloc_node.va.range)
+ __drm_gpuva_remove(&gpuvm->kernel_alloc_node);
+
+ drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
+ "GPUVA tree is not empty, potentially leaking memory.\n");
+
+ drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list),
+ "Extobj list should be empty.\n");
+ drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
+ "Evict list should be empty.\n");
+
+ drm_gem_object_put(gpuvm->r_obj);
+}
+
+static void
+drm_gpuvm_free(struct kref *kref)
+{
+ struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref);
+
+ drm_gpuvm_fini(gpuvm);
+
+ if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free))
+ return;
+
+ gpuvm->ops->vm_free(gpuvm);
+}
+
/**
- * drm_gpuvm_destroy() - cleanup a &drm_gpuvm
- * @gpuvm: pointer to the &drm_gpuvm to clean up
+ * drm_gpuvm_put() - drop a struct drm_gpuvm reference
+ * @gpuvm: the &drm_gpuvm to release the reference of
+ *
+ * This releases a reference to @gpuvm.
*
- * Note that it is a bug to call this function on a manager that still
- * holds GPU VA mappings.
+ * This function may be called from atomic context.
*/
void
-drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
+drm_gpuvm_put(struct drm_gpuvm *gpuvm)
{
- gpuvm->name = NULL;
+ if (gpuvm)
+ kref_put(&gpuvm->kref, drm_gpuvm_free);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_put);
- if (gpuvm->kernel_alloc_node.va.range)
- __drm_gpuva_remove(&gpuvm->kernel_alloc_node);
+static int
+exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+ unsigned int num_fences)
+{
+ return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) :
+ drm_exec_lock_obj(exec, obj);
+}
- WARN(!RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
- "GPUVA tree is not empty, potentially leaking memory.");
+/**
+ * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
+ * @gpuvm: the &drm_gpuvm
+ * @exec: the &drm_exec context
+ * @num_fences: the amount of &dma_fences to reserve
+ *
+ * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if
+ * @num_fences is zero drm_exec_lock_obj() is called instead.
+ *
+ * Using this function directly, it is the drivers responsibility to call
+ * drm_exec_init() and drm_exec_fini() accordingly.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ return exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm);
+
+static int
+__drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct drm_gpuvm_bo *vm_bo;
+ LIST_HEAD(extobjs);
+ int ret = 0;
+
+ for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) {
+ ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
+ if (ret)
+ break;
+ }
+ /* Drop ref in case we break out of the loop. */
+ drm_gpuvm_bo_put(vm_bo);
+ restore_vm_bo_list(gpuvm, extobj);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
+
+static int
+drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct drm_gpuvm_bo *vm_bo;
+ int ret = 0;
+
+ drm_gpuvm_resv_assert_held(gpuvm);
+ list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
+ ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
+ if (ret)
+ break;
+
+ if (vm_bo->evicted)
+ drm_gpuvm_bo_list_add(vm_bo, evict, false);
+ }
+
+ return ret;
+}
+
+/**
+ * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
+ * @gpuvm: the &drm_gpuvm
+ * @exec: the &drm_exec locking context
+ * @num_fences: the amount of &dma_fences to reserve
+ *
+ * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given
+ * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj()
+ * is called instead.
+ *
+ * Using this function directly, it is the drivers responsibility to call
+ * drm_exec_init() and drm_exec_fini() accordingly.
+ *
+ * Note: This function is safe against concurrent insertion and removal of
+ * external objects, however it is not safe against concurrent usage itself.
+ *
+ * Drivers need to make sure to protect this case with either an outer VM lock
+ * or by calling drm_gpuvm_prepare_vm() before this function within the
+ * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures
+ * mutual exclusion.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ if (drm_gpuvm_resv_protected(gpuvm))
+ return drm_gpuvm_prepare_objects_locked(gpuvm, exec,
+ num_fences);
+ else
+ return __drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects);
+
+/**
+ * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range
+ * @gpuvm: the &drm_gpuvm
+ * @exec: the &drm_exec locking context
+ * @addr: the start address within the VA space
+ * @range: the range to iterate within the VA space
+ * @num_fences: the amount of &dma_fences to reserve
+ *
+ * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr
+ * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called
+ * instead.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 addr, u64 range, unsigned int num_fences)
+{
+ struct drm_gpuva *va;
+ u64 end = addr + range;
+ int ret;
+
+ drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
+ struct drm_gem_object *obj = va->gem.obj;
+
+ ret = exec_prepare_obj(exec, obj, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
+
+/**
+ * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ *
+ * Acquires all dma-resv locks of all &drm_gem_objects the given
+ * &drm_gpuvm contains mappings of.
+ *
+ * Addionally, when calling this function with struct drm_gpuvm_exec::extra
+ * being set the driver receives the given @fn callback to lock additional
+ * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
+ * would call drm_exec_prepare_obj() from within this callback.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec)
+{
+ struct drm_gpuvm *gpuvm = vm_exec->vm;
+ struct drm_exec *exec = &vm_exec->exec;
+ unsigned int num_fences = vm_exec->num_fences;
+ int ret;
+
+ drm_exec_init(exec, vm_exec->flags);
+
+ drm_exec_until_all_locked(exec) {
+ ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ goto err;
+
+ ret = drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ goto err;
+
+ if (vm_exec->extra.fn) {
+ ret = vm_exec->extra.fn(vm_exec);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ drm_exec_fini(exec);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock);
+
+static int
+fn_lock_array(struct drm_gpuvm_exec *vm_exec)
+{
+ struct {
+ struct drm_gem_object **objs;
+ unsigned int num_objs;
+ } *args = vm_exec->extra.priv;
+
+ return drm_exec_prepare_array(&vm_exec->exec, args->objs,
+ args->num_objs, vm_exec->num_fences);
+}
+
+/**
+ * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ * @objs: additional &drm_gem_objects to lock
+ * @num_objs: the number of additional &drm_gem_objects to lock
+ *
+ * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm
+ * contains mappings of, plus the ones given through @objs.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
+ struct drm_gem_object **objs,
+ unsigned int num_objs)
+{
+ struct {
+ struct drm_gem_object **objs;
+ unsigned int num_objs;
+ } args;
+
+ args.objs = objs;
+ args.num_objs = num_objs;
+
+ vm_exec->extra.fn = fn_lock_array;
+ vm_exec->extra.priv = &args;
+
+ return drm_gpuvm_exec_lock(vm_exec);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_array);
+
+/**
+ * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ * @addr: the start address within the VA space
+ * @range: the range to iterate within the VA space
+ *
+ * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and
+ * @addr + @range.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
+ u64 addr, u64 range)
+{
+ struct drm_gpuvm *gpuvm = vm_exec->vm;
+ struct drm_exec *exec = &vm_exec->exec;
+ int ret;
+
+ drm_exec_init(exec, vm_exec->flags);
+
+ drm_exec_until_all_locked(exec) {
+ ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
+ vm_exec->num_fences);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ goto err;
+ }
+
+ return ret;
+
+err:
+ drm_exec_fini(exec);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_range);
+
+static int
+__drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gpuvm_bo *vm_bo;
+ LIST_HEAD(evict);
+ int ret = 0;
+
+ for_each_vm_bo_in_list(gpuvm, evict, &evict, vm_bo) {
+ ret = ops->vm_bo_validate(vm_bo, exec);
+ if (ret)
+ break;
+ }
+ /* Drop ref in case we break out of the loop. */
+ drm_gpuvm_bo_put(vm_bo);
+ restore_vm_bo_list(gpuvm, evict);
+
+ return ret;
+}
+
+static int
+drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gpuvm_bo *vm_bo, *next;
+ int ret = 0;
+
+ drm_gpuvm_resv_assert_held(gpuvm);
+
+ list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
+ list.entry.evict) {
+ ret = ops->vm_bo_validate(vm_bo, exec);
+ if (ret)
+ break;
+
+ dma_resv_assert_held(vm_bo->obj->resv);
+ if (!vm_bo->evicted)
+ drm_gpuvm_bo_list_del_init(vm_bo, evict, false);
+ }
+
+ return ret;
+}
+
+/**
+ * drm_gpuvm_validate() - validate all BOs marked as evicted
+ * @gpuvm: the &drm_gpuvm to validate evicted BOs
+ * @exec: the &drm_exec instance used for locking the GPUVM
+ *
+ * Calls the &drm_gpuvm_ops::vm_bo_validate callback for all evicted buffer
+ * objects being mapped in the given &drm_gpuvm.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+
+ if (unlikely(!ops || !ops->vm_bo_validate))
+ return -EOPNOTSUPP;
+
+ if (drm_gpuvm_resv_protected(gpuvm))
+ return drm_gpuvm_validate_locked(gpuvm, exec);
+ else
+ return __drm_gpuvm_validate(gpuvm, exec);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_validate);
+
+/**
+ * drm_gpuvm_resv_add_fence - add fence to private and all extobj
+ * dma-resv
+ * @gpuvm: the &drm_gpuvm to add a fence to
+ * @exec: the &drm_exec locking context
+ * @fence: fence to add
+ * @private_usage: private dma-resv usage
+ * @extobj_usage: extobj dma-resv usage
+ */
+void
+drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage)
+{
+ struct drm_gem_object *obj;
+ unsigned long index;
+
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ dma_resv_assert_held(obj->resv);
+ dma_resv_add_fence(obj->resv, fence,
+ drm_gpuvm_is_extobj(gpuvm, obj) ?
+ extobj_usage : private_usage);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_resv_add_fence);
+
+/**
+ * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo
+ * @gpuvm: The &drm_gpuvm the @obj is mapped in.
+ * @obj: The &drm_gem_object being mapped in the @gpuvm.
+ *
+ * If provided by the driver, this function uses the &drm_gpuvm_ops
+ * vm_bo_alloc() callback to allocate.
+ *
+ * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gpuvm_bo *vm_bo;
+
+ if (ops && ops->vm_bo_alloc)
+ vm_bo = ops->vm_bo_alloc();
+ else
+ vm_bo = kzalloc(sizeof(*vm_bo), GFP_KERNEL);
+
+ if (unlikely(!vm_bo))
+ return NULL;
+
+ vm_bo->vm = drm_gpuvm_get(gpuvm);
+ vm_bo->obj = obj;
+ drm_gem_object_get(obj);
+
+ kref_init(&vm_bo->kref);
+ INIT_LIST_HEAD(&vm_bo->list.gpuva);
+ INIT_LIST_HEAD(&vm_bo->list.entry.gem);
+
+ INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
+ INIT_LIST_HEAD(&vm_bo->list.entry.evict);
+
+ return vm_bo;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
+
+static void
+drm_gpuvm_bo_destroy(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gem_object *obj = vm_bo->obj;
+ bool lock = !drm_gpuvm_resv_protected(gpuvm);
+
+ if (!lock)
+ drm_gpuvm_resv_assert_held(gpuvm);
+
+ drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
+ drm_gpuvm_bo_list_del(vm_bo, evict, lock);
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ list_del(&vm_bo->list.entry.gem);
+
+ if (ops && ops->vm_bo_free)
+ ops->vm_bo_free(vm_bo);
+ else
+ kfree(vm_bo);
+
+ drm_gpuvm_put(gpuvm);
+ drm_gem_object_put(obj);
+}
+
+/**
+ * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
+ * @vm_bo: the &drm_gpuvm_bo to release the reference of
+ *
+ * This releases a reference to @vm_bo.
+ *
+ * If the reference count drops to zero, the &gpuvm_bo is destroyed, which
+ * includes removing it from the GEMs gpuva list. Hence, if a call to this
+ * function can potentially let the reference count drop to zero the caller must
+ * hold the dma-resv or driver specific GEM gpuva lock.
+ *
+ * This function may only be called from non-atomic context.
+ *
+ * Returns: true if vm_bo was destroyed, false otherwise.
+ */
+bool
+drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
+{
+ might_sleep();
+
+ if (vm_bo)
+ return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
+
+static struct drm_gpuvm_bo *
+__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj)
+ if (vm_bo->vm == gpuvm)
+ return vm_bo;
+
+ return NULL;
+}
+
+/**
+ * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given
+ * &drm_gpuvm and &drm_gem_object
+ * @gpuvm: The &drm_gpuvm the @obj is mapped in.
+ * @obj: The &drm_gem_object being mapped in the @gpuvm.
+ *
+ * Find the &drm_gpuvm_bo representing the combination of the given
+ * &drm_gpuvm and &drm_gem_object. If found, increases the reference
+ * count of the &drm_gpuvm_bo accordingly.
+ *
+ * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ struct drm_gpuvm_bo *vm_bo = __drm_gpuvm_bo_find(gpuvm, obj);
+
+ return vm_bo ? drm_gpuvm_bo_get(vm_bo) : NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
+
+/**
+ * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
+ * given &drm_gpuvm and &drm_gem_object
+ * @gpuvm: The &drm_gpuvm the @obj is mapped in.
+ * @obj: The &drm_gem_object being mapped in the @gpuvm.
+ *
+ * Find the &drm_gpuvm_bo representing the combination of the given
+ * &drm_gpuvm and &drm_gem_object. If found, increases the reference
+ * count of the &drm_gpuvm_bo accordingly. If not found, allocates a new
+ * &drm_gpuvm_bo.
+ *
+ * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
+ *
+ * Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
+ if (vm_bo)
+ return vm_bo;
+
+ vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
+ if (!vm_bo)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
+
+ return vm_bo;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
+
+/**
+ * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
+ * for the given &drm_gpuvm and &drm_gem_object
+ * @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
+ *
+ * Find the &drm_gpuvm_bo representing the combination of the given
+ * &drm_gpuvm and &drm_gem_object. If found, increases the reference
+ * count of the found &drm_gpuvm_bo accordingly, while the @__vm_bo reference
+ * count is decreased. If not found @__vm_bo is returned without further
+ * increase of the reference count.
+ *
+ * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
+ *
+ * Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
+ * &drm_gpuvm_bo was found
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
+{
+ struct drm_gpuvm *gpuvm = __vm_bo->vm;
+ struct drm_gem_object *obj = __vm_bo->obj;
+ struct drm_gpuvm_bo *vm_bo;
+
+ vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
+ if (vm_bo) {
+ drm_gpuvm_bo_put(__vm_bo);
+ return vm_bo;
+ }
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
+
+ return __vm_bo;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_prealloc);
+
+/**
+ * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's
+ * extobj list
+ * @vm_bo: The &drm_gpuvm_bo to add to its &drm_gpuvm's the extobj list.
+ *
+ * Adds the given @vm_bo to its &drm_gpuvm's extobj list if not on the list
+ * already and if the corresponding &drm_gem_object is an external object,
+ * actually.
+ */
+void
+drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo)
+{
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+ bool lock = !drm_gpuvm_resv_protected(gpuvm);
+
+ if (!lock)
+ drm_gpuvm_resv_assert_held(gpuvm);
+
+ if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj))
+ drm_gpuvm_bo_list_add(vm_bo, extobj, lock);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
+
+/**
+ * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms
+ * evicted list
+ * @vm_bo: the &drm_gpuvm_bo to add or remove
+ * @evict: indicates whether the object is evicted
+ *
+ * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
+ */
+void
+drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
+{
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+ struct drm_gem_object *obj = vm_bo->obj;
+ bool lock = !drm_gpuvm_resv_protected(gpuvm);
+
+ dma_resv_assert_held(obj->resv);
+ vm_bo->evicted = evict;
+
+ /* Can't add external objects to the evicted list directly if not using
+ * internal spinlocks, since in this case the evicted list is protected
+ * with the VM's common dma-resv lock.
+ */
+ if (drm_gpuvm_is_extobj(gpuvm, obj) && !lock)
+ return;
+
+ if (evict)
+ drm_gpuvm_bo_list_add(vm_bo, evict, lock);
+ else
+ drm_gpuvm_bo_list_del_init(vm_bo, evict, lock);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_evict);
static int
__drm_gpuva_insert(struct drm_gpuvm *gpuvm,
@@ -764,11 +1760,21 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm,
{
u64 addr = va->va.addr;
u64 range = va->va.range;
+ int ret;
if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
return -EINVAL;
- return __drm_gpuva_insert(gpuvm, va);
+ ret = __drm_gpuva_insert(gpuvm, va);
+ if (likely(!ret))
+ /* Take a reference of the GPUVM for the successfully inserted
+ * drm_gpuva. We can't take the reference in
+ * __drm_gpuva_insert() itself, since we don't want to increse
+ * the reference count for the GPUVM's kernel_alloc_node.
+ */
+ drm_gpuvm_get(gpuvm);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(drm_gpuva_insert);
@@ -795,35 +1801,46 @@ drm_gpuva_remove(struct drm_gpuva *va)
struct drm_gpuvm *gpuvm = va->vm;
if (unlikely(va == &gpuvm->kernel_alloc_node)) {
- WARN(1, "Can't destroy kernel reserved node.\n");
+ drm_WARN(gpuvm->drm, 1,
+ "Can't destroy kernel reserved node.\n");
return;
}
__drm_gpuva_remove(va);
+ drm_gpuvm_put(va->vm);
}
EXPORT_SYMBOL_GPL(drm_gpuva_remove);
/**
* drm_gpuva_link() - link a &drm_gpuva
* @va: the &drm_gpuva to link
+ * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to
*
- * This adds the given &va to the GPU VA list of the &drm_gem_object it is
- * associated with.
+ * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
+ * &drm_gpuvm_bo to the &drm_gem_object it is associated with.
+ *
+ * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional
+ * reference of the latter is taken.
*
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using the GEMs dma_resv lock.
+ * concurrent access using either the GEMs dma_resv lock or a driver specific
+ * lock set through drm_gem_gpuva_set_lock().
*/
void
-drm_gpuva_link(struct drm_gpuva *va)
+drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
{
struct drm_gem_object *obj = va->gem.obj;
+ struct drm_gpuvm *gpuvm = va->vm;
if (unlikely(!obj))
return;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj);
- list_add_tail(&va->gem.entry, &obj->gpuva.list);
+ va->vm_bo = drm_gpuvm_bo_get(vm_bo);
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
}
EXPORT_SYMBOL_GPL(drm_gpuva_link);
@@ -834,20 +1851,31 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link);
* This removes the given &va from the GPU VA list of the &drm_gem_object it is
* associated with.
*
+ * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
+ * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case
+ * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo.
+ *
+ * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of
+ * the latter is dropped.
+ *
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using the GEMs dma_resv lock.
+ * concurrent access using either the GEMs dma_resv lock or a driver specific
+ * lock set through drm_gem_gpuva_set_lock().
*/
void
drm_gpuva_unlink(struct drm_gpuva *va)
{
struct drm_gem_object *obj = va->gem.obj;
+ struct drm_gpuvm_bo *vm_bo = va->vm_bo;
if (unlikely(!obj))
return;
drm_gem_gpuva_assert_lock_held(obj);
-
list_del_init(&va->gem.entry);
+
+ va->vm_bo = NULL;
+ drm_gpuvm_bo_put(vm_bo);
}
EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
@@ -992,10 +2020,10 @@ drm_gpuva_remap(struct drm_gpuva *prev,
struct drm_gpuva *next,
struct drm_gpuva_op_remap *op)
{
- struct drm_gpuva *curr = op->unmap->va;
- struct drm_gpuvm *gpuvm = curr->vm;
+ struct drm_gpuva *va = op->unmap->va;
+ struct drm_gpuvm *gpuvm = va->vm;
- drm_gpuva_remove(curr);
+ drm_gpuva_remove(va);
if (op->prev) {
drm_gpuva_init_from_op(prev, op->prev);
@@ -1637,9 +2665,8 @@ err_free_ops:
EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
/**
- * drm_gpuvm_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
- * @gpuvm: the &drm_gpuvm representing the GPU VA space
- * @obj: the &drm_gem_object to unmap
+ * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
+ * @vm_bo: the &drm_gpuvm_bo abstraction
*
* This function creates a list of operations to perform unmapping for every
* GPUVA attached to a GEM.
@@ -1656,15 +2683,14 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
-drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
- struct drm_gem_object *obj)
+drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
{
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *op;
struct drm_gpuva *va;
int ret;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(vm_bo->obj);
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
@@ -1672,8 +2698,8 @@ drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
INIT_LIST_HEAD(&ops->list);
- drm_gem_for_each_gpuva(va, obj) {
- op = gpuva_op_alloc(gpuvm);
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ op = gpuva_op_alloc(vm_bo->vm);
if (!op) {
ret = -ENOMEM;
goto err_free_ops;
@@ -1687,10 +2713,10 @@ drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
return ops;
err_free_ops:
- drm_gpuva_ops_free(gpuvm, ops);
+ drm_gpuva_ops_free(vm_bo->vm, ops);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gpuvm_gem_unmap_ops_create);
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap_ops_create);
/**
* drm_gpuva_ops_free() - free the given &drm_gpuva_ops
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
deleted file mode 100644
index 60afa1865559..000000000000
--- a/drivers/gpu/drm/drm_hashtab.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/hash.h>
-#include <linux/mm.h>
-#include <linux/rculist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
-{
- unsigned int size = 1 << order;
-
- ht->order = order;
- ht->table = NULL;
- if (size <= PAGE_SIZE / sizeof(*ht->table))
- ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
- else
- ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
- if (!ht->table) {
- DRM_ERROR("Out of memory for hash table\n");
- return -ENOMEM;
- }
- return 0;
-}
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
- int count = 0;
-
- hashed_key = hash_long(key, ht->order);
- DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, h_list, head)
- DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
-}
-
-static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
- unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, h_list, head) {
- if (entry->key == key)
- return &entry->head;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
- unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry_rcu(entry, h_list, head) {
- if (entry->key == key)
- return &entry->head;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *parent;
- unsigned int hashed_key;
- unsigned long key = item->key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- parent = NULL;
- hlist_for_each_entry(entry, h_list, head) {
- if (entry->key == key)
- return -EINVAL;
- if (entry->key > key)
- break;
- parent = &entry->head;
- }
- if (parent) {
- hlist_add_behind_rcu(&item->head, parent);
- } else {
- hlist_add_head_rcu(&item->head, h_list);
- }
- return 0;
-}
-
-/*
- * Just insert an item and return any "bits" bit key that hasn't been
- * used before.
- */
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add)
-{
- int ret;
- unsigned long mask = (1UL << bits) - 1;
- unsigned long first, unshifted_key;
-
- unshifted_key = hash_long(seed, bits);
- first = unshifted_key;
- do {
- item->key = (unshifted_key << shift) + add;
- ret = drm_ht_insert_item(ht, item);
- if (ret)
- unshifted_key = (unshifted_key + 1) & mask;
- } while(ret && (unshifted_key != first));
-
- if (ret) {
- DRM_ERROR("Available key bit space exhausted\n");
- return -EINVAL;
- }
- return 0;
-}
-
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
- struct drm_hash_item **item)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key_rcu(ht, key);
- if (!list)
- return -EINVAL;
-
- *item = hlist_entry(list, struct drm_hash_item, head);
- return 0;
-}
-
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key(ht, key);
- if (list) {
- hlist_del_init_rcu(list);
- return 0;
- }
- return -EINVAL;
-}
-
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- hlist_del_init_rcu(&item->head);
- return 0;
-}
-
-void drm_ht_remove(struct drm_open_hash *ht)
-{
- if (ht->table) {
- kvfree(ht->table);
- ht->table = NULL;
- }
-}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 8462b657c375..8e4faf0a28e6 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -22,6 +22,7 @@
*/
#include <linux/kthread.h>
+#include <linux/types.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_vblank.h>
@@ -31,6 +32,7 @@
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+struct cea_sad;
struct dentry;
struct dma_buf;
struct iosys_map;
@@ -115,17 +117,10 @@ void drm_handle_vblank_works(struct drm_vblank_crtc *vblank);
/* IOCTLS */
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
/* drm_irq.c */
/* IOCTLS */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_irq_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-#endif
-
int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -192,6 +187,8 @@ void drm_debugfs_connector_remove(struct drm_connector *connector);
void drm_debugfs_crtc_add(struct drm_crtc *crtc);
void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
+void drm_debugfs_encoder_add(struct drm_encoder *encoder);
+void drm_debugfs_encoder_remove(struct drm_encoder *encoder);
#else
static inline void drm_debugfs_dev_fini(struct drm_device *dev)
{
@@ -229,6 +226,14 @@ static inline void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
}
+static inline void drm_debugfs_encoder_add(struct drm_encoder *encoder)
+{
+}
+
+static inline void drm_debugfs_encoder_remove(struct drm_encoder *encoder)
+{
+}
+
#endif
drm_ioctl_t drm_version;
@@ -267,3 +272,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
void drm_framebuffer_debugfs_init(struct drm_device *dev);
+
+/* drm_edid.c */
+void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad);
+void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 025dc558c94e..129e2b91dbfe 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -31,12 +31,12 @@
#include <linux/ratelimit.h>
#include <linux/export.h>
+#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
@@ -163,92 +163,6 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
return -EINVAL;
}
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-typedef struct drm_map32 {
- u32 offset; /* Requested physical address (0 for SAREA) */
- u32 size; /* Requested physical size (bytes) */
- enum drm_map_type type; /* Type of memory to map */
- enum drm_map_flags flags; /* Flags */
- u32 handle; /* User-space: "Handle" to pass to mmap() */
- int mtrr; /* MTRR slot used */
-} drm_map32_t;
-
-static int compat_drm_getmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- drm_map32_t m32;
- struct drm_map map;
- int err;
-
- if (copy_from_user(&m32, argp, sizeof(m32)))
- return -EFAULT;
-
- map.offset = m32.offset;
- err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0);
- if (err)
- return err;
-
- m32.offset = map.offset;
- m32.size = map.size;
- m32.type = map.type;
- m32.flags = map.flags;
- m32.handle = ptr_to_compat((void __user *)map.handle);
- m32.mtrr = map.mtrr;
- if (copy_to_user(argp, &m32, sizeof(m32)))
- return -EFAULT;
- return 0;
-
-}
-
-static int compat_drm_addmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- drm_map32_t m32;
- struct drm_map map;
- int err;
-
- if (copy_from_user(&m32, argp, sizeof(m32)))
- return -EFAULT;
-
- map.offset = m32.offset;
- map.size = m32.size;
- map.type = m32.type;
- map.flags = m32.flags;
-
- err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- m32.offset = map.offset;
- m32.mtrr = map.mtrr;
- m32.handle = ptr_to_compat((void __user *)map.handle);
- if (map.handle != compat_ptr(m32.handle))
- pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
- map.handle, m32.type, m32.offset);
-
- if (copy_to_user(argp, &m32, sizeof(m32)))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_rmmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- struct drm_map map;
- u32 handle;
-
- if (get_user(handle, &argp->handle))
- return -EFAULT;
- map.handle = compat_ptr(handle);
- return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH);
-}
-#endif
-
typedef struct drm_client32 {
int idx; /* Which client desired? */
int auth; /* Is client authenticated? */
@@ -308,501 +222,6 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
return 0;
}
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-typedef struct drm_buf_desc32 {
- int count; /* Number of buffers of this size */
- int size; /* Size in bytes */
- int low_mark; /* Low water mark */
- int high_mark; /* High water mark */
- int flags;
- u32 agp_start; /* Start address in the AGP aperture */
-} drm_buf_desc32_t;
-
-static int compat_drm_addbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_desc32_t __user *argp = (void __user *)arg;
- drm_buf_desc32_t desc32;
- struct drm_buf_desc desc;
- int err;
-
- if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t)))
- return -EFAULT;
-
- desc = (struct drm_buf_desc){
- desc32.count, desc32.size, desc32.low_mark, desc32.high_mark,
- desc32.flags, desc32.agp_start
- };
-
- err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- desc32 = (drm_buf_desc32_t){
- desc.count, desc.size, desc.low_mark, desc.high_mark,
- desc.flags, desc.agp_start
- };
- if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t)))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_markbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_desc32_t b32;
- drm_buf_desc32_t __user *argp = (void __user *)arg;
- struct drm_buf_desc buf;
-
- if (copy_from_user(&b32, argp, sizeof(b32)))
- return -EFAULT;
-
- buf.size = b32.size;
- buf.low_mark = b32.low_mark;
- buf.high_mark = b32.high_mark;
-
- return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_buf_info32 {
- int count; /**< Entries in list */
- u32 list;
-} drm_buf_info32_t;
-
-static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from)
-{
- drm_buf_info32_t *request = data;
- drm_buf_desc32_t __user *to = compat_ptr(request->list);
- drm_buf_desc32_t v = {.count = from->buf_count,
- .size = from->buf_size,
- .low_mark = from->low_mark,
- .high_mark = from->high_mark};
-
- if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)))
- return -EFAULT;
- return 0;
-}
-
-static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_buf_info32_t *request = data;
-
- return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32);
-}
-
-static int compat_drm_infobufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_info32_t req32;
- drm_buf_info32_t __user *argp = (void __user *)arg;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- if (req32.count < 0)
- req32.count = 0;
-
- err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH);
- if (err)
- return err;
-
- if (put_user(req32.count, &argp->count))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_buf_pub32 {
- int idx; /**< Index into the master buffer list */
- int total; /**< Buffer size */
- int used; /**< Amount of buffer in use (for DMA) */
- u32 address; /**< Address of buffer */
-} drm_buf_pub32_t;
-
-typedef struct drm_buf_map32 {
- int count; /**< Length of the buffer list */
- u32 virtual; /**< Mmap'd area in user-virtual */
- u32 list; /**< Buffer information */
-} drm_buf_map32_t;
-
-static int map_one_buf32(void *data, int idx, unsigned long virtual,
- struct drm_buf *buf)
-{
- drm_buf_map32_t *request = data;
- drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx;
- drm_buf_pub32_t v;
-
- v.idx = buf->idx;
- v.total = buf->total;
- v.used = 0;
- v.address = virtual + buf->offset;
- if (copy_to_user(to, &v, sizeof(v)))
- return -EFAULT;
- return 0;
-}
-
-static int drm_legacy_mapbufs32(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_buf_map32_t *request = data;
- void __user *v;
- int err = __drm_legacy_mapbufs(dev, data, &request->count,
- &v, map_one_buf32,
- file_priv);
- request->virtual = ptr_to_compat(v);
- return err;
-}
-
-static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_map32_t __user *argp = (void __user *)arg;
- drm_buf_map32_t req32;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
- if (req32.count < 0)
- return -EINVAL;
-
- err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH);
- if (err)
- return err;
-
- if (put_user(req32.count, &argp->count)
- || put_user(req32.virtual, &argp->virtual))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_buf_free32 {
- int count;
- u32 list;
-} drm_buf_free32_t;
-
-static int compat_drm_freebufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_free32_t req32;
- struct drm_buf_free request;
- drm_buf_free32_t __user *argp = (void __user *)arg;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.count = req32.count;
- request.list = compat_ptr(req32.list);
- return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH);
-}
-
-typedef struct drm_ctx_priv_map32 {
- unsigned int ctx_id; /**< Context requesting private mapping */
- u32 handle; /**< Handle of map */
-} drm_ctx_priv_map32_t;
-
-static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_priv_map32_t req32;
- struct drm_ctx_priv_map request;
- drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.ctx_id = req32.ctx_id;
- request.handle = compat_ptr(req32.handle);
- return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct drm_ctx_priv_map req;
- drm_ctx_priv_map32_t req32;
- drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- req.ctx_id = req32.ctx_id;
- err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH);
- if (err)
- return err;
-
- req32.handle = ptr_to_compat((void __user *)req.handle);
- if (copy_to_user(argp, &req32, sizeof(req32)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_ctx_res32 {
- int count;
- u32 contexts;
-} drm_ctx_res32_t;
-
-static int compat_drm_resctx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_res32_t __user *argp = (void __user *)arg;
- drm_ctx_res32_t res32;
- struct drm_ctx_res res;
- int err;
-
- if (copy_from_user(&res32, argp, sizeof(res32)))
- return -EFAULT;
-
- res.count = res32.count;
- res.contexts = compat_ptr(res32.contexts);
- err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH);
- if (err)
- return err;
-
- res32.count = res.count;
- if (copy_to_user(argp, &res32, sizeof(res32)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_dma32 {
- int context; /**< Context handle */
- int send_count; /**< Number of buffers to send */
- u32 send_indices; /**< List of handles to buffers */
- u32 send_sizes; /**< Lengths of data to send */
- enum drm_dma_flags flags; /**< Flags */
- int request_count; /**< Number of buffers requested */
- int request_size; /**< Desired size for buffers */
- u32 request_indices; /**< Buffer information */
- u32 request_sizes;
- int granted_count; /**< Number of buffers granted */
-} drm_dma32_t;
-
-static int compat_drm_dma(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_dma32_t d32;
- drm_dma32_t __user *argp = (void __user *)arg;
- struct drm_dma d;
- int err;
-
- if (copy_from_user(&d32, argp, sizeof(d32)))
- return -EFAULT;
-
- d.context = d32.context;
- d.send_count = d32.send_count;
- d.send_indices = compat_ptr(d32.send_indices);
- d.send_sizes = compat_ptr(d32.send_sizes);
- d.flags = d32.flags;
- d.request_count = d32.request_count;
- d.request_indices = compat_ptr(d32.request_indices);
- d.request_sizes = compat_ptr(d32.request_sizes);
- err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH);
- if (err)
- return err;
-
- if (put_user(d.request_size, &argp->request_size)
- || put_user(d.granted_count, &argp->granted_count))
- return -EFAULT;
-
- return 0;
-}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-#if IS_ENABLED(CONFIG_AGP)
-typedef struct drm_agp_mode32 {
- u32 mode; /**< AGP mode */
-} drm_agp_mode32_t;
-
-static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_mode32_t __user *argp = (void __user *)arg;
- struct drm_agp_mode mode;
-
- if (get_user(mode.mode, &argp->mode))
- return -EFAULT;
-
- return drm_ioctl_kernel(file, drm_legacy_agp_enable_ioctl, &mode,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_agp_info32 {
- int agp_version_major;
- int agp_version_minor;
- u32 mode;
- u32 aperture_base; /* physical address */
- u32 aperture_size; /* bytes */
- u32 memory_allowed; /* bytes */
- u32 memory_used;
-
- /* PCI information */
- unsigned short id_vendor;
- unsigned short id_device;
-} drm_agp_info32_t;
-
-static int compat_drm_agp_info(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_info32_t __user *argp = (void __user *)arg;
- drm_agp_info32_t i32;
- struct drm_agp_info info;
- int err;
-
- err = drm_ioctl_kernel(file, drm_legacy_agp_info_ioctl, &info, DRM_AUTH);
- if (err)
- return err;
-
- i32.agp_version_major = info.agp_version_major;
- i32.agp_version_minor = info.agp_version_minor;
- i32.mode = info.mode;
- i32.aperture_base = info.aperture_base;
- i32.aperture_size = info.aperture_size;
- i32.memory_allowed = info.memory_allowed;
- i32.memory_used = info.memory_used;
- i32.id_vendor = info.id_vendor;
- i32.id_device = info.id_device;
- if (copy_to_user(argp, &i32, sizeof(i32)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_agp_buffer32 {
- u32 size; /**< In bytes -- will round to page boundary */
- u32 handle; /**< Used for binding / unbinding */
- u32 type; /**< Type of memory to allocate */
- u32 physical; /**< Physical used by i810 */
-} drm_agp_buffer32_t;
-
-static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_buffer32_t __user *argp = (void __user *)arg;
- drm_agp_buffer32_t req32;
- struct drm_agp_buffer request;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.size = req32.size;
- request.type = req32.type;
- err = drm_ioctl_kernel(file, drm_legacy_agp_alloc_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- req32.handle = request.handle;
- req32.physical = request.physical;
- if (copy_to_user(argp, &req32, sizeof(req32))) {
- drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int compat_drm_agp_free(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_buffer32_t __user *argp = (void __user *)arg;
- struct drm_agp_buffer request;
-
- if (get_user(request.handle, &argp->handle))
- return -EFAULT;
-
- return drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_agp_binding32 {
- u32 handle; /**< From drm_agp_buffer */
- u32 offset; /**< In bytes -- will round to page boundary */
-} drm_agp_binding32_t;
-
-static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_binding32_t __user *argp = (void __user *)arg;
- drm_agp_binding32_t req32;
- struct drm_agp_binding request;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.handle = req32.handle;
- request.offset = req32.offset;
- return drm_ioctl_kernel(file, drm_legacy_agp_bind_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_binding32_t __user *argp = (void __user *)arg;
- struct drm_agp_binding request;
-
- if (get_user(request.handle, &argp->handle))
- return -EFAULT;
-
- return drm_ioctl_kernel(file, drm_legacy_agp_unbind_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-#endif /* CONFIG_AGP */
-
-typedef struct drm_scatter_gather32 {
- u32 size; /**< In bytes -- will round to page boundary */
- u32 handle; /**< Used for mapping / unmapping */
-} drm_scatter_gather32_t;
-
-static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather request;
- int err;
-
- if (get_user(request.size, &argp->size))
- return -EFAULT;
-
- err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- /* XXX not sure about the handle conversion here... */
- if (put_user(request.handle >> PAGE_SHIFT, &argp->handle))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_sg_free(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather request;
- unsigned long x;
-
- if (get_user(x, &argp->handle))
- return -EFAULT;
- request.handle = x << PAGE_SHIFT;
- return drm_ioctl_kernel(file, drm_legacy_sg_free, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-#endif
#if defined(CONFIG_X86)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
@@ -854,7 +273,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.type = req32.request.type;
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
- err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, 0);
req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence;
@@ -914,37 +333,9 @@ static struct {
#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n}
DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique),
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap),
-#endif
DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats),
DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique),
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap),
- DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap),
- DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx),
- DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
- DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
- DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
-#if IS_ENABLED(CONFIG_AGP)
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
-#endif
-#endif
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
- DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
-#endif
#if defined(CONFIG_X86)
DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw),
#endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 77590b0f38fa..e368fc084c77 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -42,7 +42,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
/**
* DOC: getunique and setversion story
@@ -301,6 +300,10 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
case DRM_CAP_CRTC_IN_VBLANK_EVENT:
req->value = 1;
break;
+ case DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP:
+ req->value = drm_core_check_feature(dev, DRIVER_ATOMIC) &&
+ dev->mode_config.async_page_flip;
+ break;
default:
return -EINVAL;
}
@@ -361,6 +364,15 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->writeback_connectors = req->value;
break;
+ case DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT:
+ if (!drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT))
+ return -EOPNOTSUPP;
+ if (!file_priv->atomic)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->supports_virtualized_cursor_plane = req->value;
+ break;
default:
return -EINVAL;
}
@@ -559,21 +571,11 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
.name = #ioctl \
}
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags)
-#else
-#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags)
-#endif
-
/* Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid,
- DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
@@ -586,63 +588,15 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
-
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
-
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
-
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
-
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-#if IS_ENABLED(CONFIG_AGP)
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_legacy_agp_acquire_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_legacy_agp_release_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_legacy_agp_enable_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_legacy_agp_info_ioctl, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_legacy_agp_alloc_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_legacy_agp_free_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_legacy_agp_bind_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_legacy_agp_unbind_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#endif
-
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -675,6 +629,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CLOSEFB, drm_mode_closefb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0),
@@ -774,7 +729,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
{
struct drm_file *file_priv = file->private_data;
struct drm_device *dev = file_priv->minor->dev;
- int retcode;
+ int ret;
/* Update drm_file owner if fd was passed along. */
drm_file_update_pid(file_priv);
@@ -782,20 +737,11 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
- retcode = drm_ioctl_permit(flags, file_priv);
- if (unlikely(retcode))
- return retcode;
-
- /* Enforce sane locking for modern driver ioctls. */
- if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) ||
- (flags & DRM_UNLOCKED))
- retcode = func(dev, kdata, file_priv);
- else {
- mutex_lock(&drm_global_mutex);
- retcode = func(dev, kdata, file_priv);
- mutex_unlock(&drm_global_mutex);
- }
- return retcode;
+ ret = drm_ioctl_permit(flags, file_priv);
+ if (unlikely(ret))
+ return ret;
+
+ return func(dev, kdata, file_priv);
}
EXPORT_SYMBOL(drm_ioctl_kernel);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
deleted file mode 100644
index d327638e15ee..000000000000
--- a/drivers/gpu/drm/drm_irq.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * drm_irq.c IRQ and vblank support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-
-#include <linux/export.h>
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/pci.h>
-#include <linux/vgaarb.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_print.h>
-#include <drm/drm_vblank.h>
-
-#include "drm_internal.h"
-
-static int drm_legacy_irq_install(struct drm_device *dev, int irq)
-{
- int ret;
- unsigned long sh_flags = 0;
-
- if (irq == 0)
- return -EINVAL;
-
- if (dev->irq_enabled)
- return -EBUSY;
- dev->irq_enabled = true;
-
- DRM_DEBUG("irq=%d\n", irq);
-
- /* Before installing handler */
- if (dev->driver->irq_preinstall)
- dev->driver->irq_preinstall(dev);
-
- /* PCI devices require shared interrupts. */
- if (dev_is_pci(dev->dev))
- sh_flags = IRQF_SHARED;
-
- ret = request_irq(irq, dev->driver->irq_handler,
- sh_flags, dev->driver->name, dev);
-
- if (ret < 0) {
- dev->irq_enabled = false;
- return ret;
- }
-
- /* After installing handler */
- if (dev->driver->irq_postinstall)
- ret = dev->driver->irq_postinstall(dev);
-
- if (ret < 0) {
- dev->irq_enabled = false;
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_unregister(to_pci_dev(dev->dev));
- free_irq(irq, dev);
- } else {
- dev->irq = irq;
- }
-
- return ret;
-}
-
-int drm_legacy_irq_uninstall(struct drm_device *dev)
-{
- unsigned long irqflags;
- bool irq_enabled;
- int i;
-
- irq_enabled = dev->irq_enabled;
- dev->irq_enabled = false;
-
- /*
- * Wake up any waiters so they don't hang. This is just to paper over
- * issues for UMS drivers which aren't in full control of their
- * vblank/irq handling. KMS drivers must ensure that vblanks are all
- * disabled when uninstalling the irq handler.
- */
- if (drm_dev_has_vblank(dev)) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for (i = 0; i < dev->num_crtcs; i++) {
- struct drm_vblank_crtc *vblank = &dev->vblank[i];
-
- if (!vblank->enabled)
- continue;
-
- WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
-
- drm_vblank_disable_and_save(dev, i);
- wake_up(&vblank->queue);
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- }
-
- if (!irq_enabled)
- return -EINVAL;
-
- DRM_DEBUG("irq=%d\n", dev->irq);
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_unregister(to_pci_dev(dev->dev));
-
- if (dev->driver->irq_uninstall)
- dev->driver->irq_uninstall(dev);
-
- free_irq(dev->irq, dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_irq_uninstall);
-
-int drm_legacy_irq_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_control *ctl = data;
- int ret = 0, irq;
- struct pci_dev *pdev;
-
- /* if we haven't irq we fallback for compatibility reasons -
- * this used to be a separate function in drm_dma.h
- */
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return 0;
- /* UMS was only ever supported on pci devices. */
- if (WARN_ON(!dev_is_pci(dev->dev)))
- return -EINVAL;
-
- switch (ctl->func) {
- case DRM_INST_HANDLER:
- pdev = to_pci_dev(dev->dev);
- irq = pdev->irq;
-
- if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != irq)
- return -EINVAL;
- mutex_lock(&dev->struct_mutex);
- ret = drm_legacy_irq_install(dev, irq);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
- case DRM_UNINST_HANDLER:
- mutex_lock(&dev->struct_mutex);
- ret = drm_legacy_irq_uninstall(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
- default:
- return -EINVAL;
- }
-}
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 0bf0fc1abf54..0c7550c0462b 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -27,38 +27,6 @@
#include <linux/module.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_print.h>
-
-#include "drm_crtc_helper_internal.h"
-
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
-
-#if IS_ENABLED(CONFIG_DRM_LOAD_EDID_FIRMWARE)
-
-/* Backward compatibility for drm_kms_helper.edid_firmware */
-static int edid_firmware_set(const char *val, const struct kernel_param *kp)
-{
- DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
-
- return __drm_set_edid_firmware_path(val);
-}
-
-static int edid_firmware_get(char *buffer, const struct kernel_param *kp)
-{
- return __drm_get_edid_firmware_path(buffer, PAGE_SIZE);
-}
-
-static const struct kernel_param_ops edid_firmware_ops = {
- .set = edid_firmware_set,
- .get = edid_firmware_get,
-};
-
-module_param_cb(edid_firmware, &edid_firmware_ops, NULL, 0644);
-__MODULE_PARM_TYPE(edid_firmware, "charp");
-MODULE_PARM_DESC(edid_firmware,
- "DEPRECATED. Use drm.edid_firmware module parameter instead.");
-
-#endif
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
deleted file mode 100644
index 70c9dba114a6..000000000000
--- a/drivers/gpu/drm/drm_legacy.h
+++ /dev/null
@@ -1,290 +0,0 @@
-#ifndef __DRM_LEGACY_H__
-#define __DRM_LEGACY_H__
-
-/*
- * Copyright (c) 2014 David Herrmann <dh.herrmann@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * This file contains legacy interfaces that modern drm drivers
- * should no longer be using. They cannot be removed as legacy
- * drivers use them, and removing them are API breaks.
- */
-#include <linux/list.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_legacy.h>
-
-struct agp_memory;
-struct drm_buf_desc;
-struct drm_device;
-struct drm_file;
-struct drm_hash_item;
-struct drm_open_hash;
-
-/*
- * Hash-table Support
- */
-
-#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-
-/* drm_hashtab.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add);
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-void drm_ht_remove(struct drm_open_hash *ht);
-#endif
-
-/*
- * RCU-safe interface
- *
- * The user of this API needs to make sure that two or more instances of the
- * hash table manipulation functions are never run simultaneously.
- * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
- * with any of the manipulation functions as long as it's called from within
- * an RCU read-locked section.
- */
-#define drm_ht_insert_item_rcu drm_ht_insert_item
-#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
-#define drm_ht_remove_key_rcu drm_ht_remove_key
-#define drm_ht_remove_item_rcu drm_ht_remove_item
-#define drm_ht_find_item_rcu drm_ht_find_item
-
-/*
- * Generic DRM Contexts
- */
-
-#define DRM_KERNEL_CONTEXT 0
-#define DRM_RESERVED_CONTEXTS 1
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_ctxbitmap_init(struct drm_device *dev);
-void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
-void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
-#else
-static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {}
-static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {}
-static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {}
-#endif
-
-void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
-
-int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
-#endif
-
-/*
- * Generic Buffer Management
- */
-
-#define DRM_MAP_HASH_OFFSET 0x10000000
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-static inline int drm_legacy_create_map_hash(struct drm_device *dev)
-{
- return drm_ht_create(&dev->map_hash, 12);
-}
-
-static inline void drm_legacy_remove_map_hash(struct drm_device *dev)
-{
- drm_ht_remove(&dev->map_hash);
-}
-#else
-static inline int drm_legacy_create_map_hash(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {}
-#endif
-
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-
-int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-#endif
-
-int __drm_legacy_infobufs(struct drm_device *, void *, int *,
- int (*)(void *, int, struct drm_buf_entry *));
-int __drm_legacy_mapbufs(struct drm_device *, void *, int *,
- void __user **,
- int (*)(void *, int, unsigned long, struct drm_buf *),
- struct drm_file *);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_master_rmmaps(struct drm_device *dev,
- struct drm_master *master);
-void drm_legacy_rmmaps(struct drm_device *dev);
-#else
-static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
- struct drm_master *master) {}
-static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_vma_flush(struct drm_device *d);
-#else
-static inline void drm_legacy_vma_flush(struct drm_device *d)
-{
- /* do nothing */
-}
-#endif
-
-/*
- * AGP Support
- */
-
-struct drm_agp_mem {
- unsigned long handle;
- struct agp_memory *memory;
- unsigned long bound;
- int pages;
- struct list_head head;
-};
-
-/* drm_agpsupport.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
-void drm_legacy_agp_clear(struct drm_device *dev);
-
-int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-#else
-static inline void drm_legacy_agp_clear(struct drm_device *dev) {}
-#endif
-
-/* drm_lock.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
-void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
-#else
-static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {}
-#endif
-
-/* DMA support */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_dma_setup(struct drm_device *dev);
-void drm_legacy_dma_takedown(struct drm_device *dev);
-#else
-static inline int drm_legacy_dma_setup(struct drm_device *dev)
-{
- return 0;
-}
-#endif
-
-void drm_legacy_free_buffer(struct drm_device *dev,
- struct drm_buf * buf);
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_reclaim_buffers(struct drm_device *dev,
- struct drm_file *filp);
-#else
-static inline void drm_legacy_reclaim_buffers(struct drm_device *dev,
- struct drm_file *filp) {}
-#endif
-
-/* Scatter Gather Support */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_sg_cleanup(struct drm_device *dev);
-int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_sg_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_init_members(struct drm_device *dev);
-void drm_legacy_destroy_members(struct drm_device *dev);
-void drm_legacy_dev_reinit(struct drm_device *dev);
-int drm_legacy_setup(struct drm_device * dev);
-#else
-static inline void drm_legacy_init_members(struct drm_device *dev) {}
-static inline void drm_legacy_destroy_members(struct drm_device *dev) {}
-static inline void drm_legacy_dev_reinit(struct drm_device *dev) {}
-static inline int drm_legacy_setup(struct drm_device * dev) { return 0; }
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master);
-#else
-static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_master_legacy_init(struct drm_master *master);
-#else
-static inline void drm_master_legacy_init(struct drm_master *master) {}
-#endif
-
-/* drm_pci.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_PCI)
-int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv);
-void drm_legacy_pci_agp_destroy(struct drm_device *dev);
-#else
-static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return -EINVAL;
-}
-
-static inline void drm_legacy_pci_agp_destroy(struct drm_device *dev) {}
-#endif
-
-#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
deleted file mode 100644
index d4c5434062d7..000000000000
--- a/drivers/gpu/drm/drm_legacy_misc.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * \file drm_legacy_misc.c
- * Misc legacy support functions.
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-void drm_legacy_init_members(struct drm_device *dev)
-{
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- spin_lock_init(&dev->buf_lock);
- mutex_init(&dev->ctxlist_mutex);
-}
-
-void drm_legacy_destroy_members(struct drm_device *dev)
-{
- mutex_destroy(&dev->ctxlist_mutex);
-}
-
-int drm_legacy_setup(struct drm_device * dev)
-{
- int ret;
-
- if (dev->driver->firstopen &&
- drm_core_check_feature(dev, DRIVER_LEGACY)) {
- ret = dev->driver->firstopen(dev);
- if (ret != 0)
- return ret;
- }
-
- ret = drm_legacy_dma_setup(dev);
- if (ret < 0)
- return ret;
-
-
- DRM_DEBUG("\n");
- return 0;
-}
-
-void drm_legacy_dev_reinit(struct drm_device *dev)
-{
- if (dev->irq_enabled)
- drm_legacy_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- drm_legacy_agp_clear(dev);
-
- drm_legacy_sg_cleanup(dev);
- drm_legacy_vma_flush(dev);
- drm_legacy_dma_takedown(dev);
-
- mutex_unlock(&dev->struct_mutex);
-
- dev->sigdata.lock = NULL;
-
- dev->context_flag = 0;
- dev->last_context = 0;
- dev->if_version = 0;
-
- DRM_DEBUG("lastclose completed\n");
-}
-
-void drm_master_legacy_init(struct drm_master *master)
-{
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
-}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
deleted file mode 100644
index 1efbd5389d89..000000000000
--- a/drivers/gpu/drm/drm_lock.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * \file drm_lock.c
- * IOCTLs for locking
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/sched/signal.h>
-
-#include <drm/drm.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
-
-/*
- * Take the heavyweight lock.
- *
- * \param lock lock pointer.
- * \param context locking context.
- * \return one if the lock is held, or zero otherwise.
- *
- * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static
-int drm_lock_take(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- do {
- old = *lock;
- if (old & _DRM_LOCK_HELD)
- new = old | _DRM_LOCK_CONT;
- else {
- new = context | _DRM_LOCK_HELD |
- ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
- _DRM_LOCK_CONT : 0);
- }
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- spin_unlock_bh(&lock_data->spinlock);
-
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
- }
- }
-
- if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
- /* Have lock */
- return 1;
- }
- return 0;
-}
-
-/*
- * This takes a lock forcibly and hands it to context. Should ONLY be used
- * inside *_unlock to give lock to kernel before calling *_dma_schedule.
- *
- * \param dev DRM device.
- * \param lock lock pointer.
- * \param context locking context.
- * \return always one.
- *
- * Resets the lock file pointer.
- * Marks the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static int drm_lock_transfer(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- lock_data->file_priv = NULL;
- do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- return 1;
-}
-
-static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (lock_data->kernel_waiters != 0) {
- drm_lock_transfer(lock_data, 0);
- lock_data->idle_has_lock = 1;
- spin_unlock_bh(&lock_data->spinlock);
- return 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-
- do {
- old = *lock;
- new = _DRM_LOCKING_CONTEXT(old);
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
-
- if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
- DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- return 1;
- }
- wake_up_interruptible(&lock_data->lock_queue);
- return 0;
-}
-
-/*
- * Lock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Add the current task to the lock wait queue, and attempt to take to lock.
- */
-int drm_legacy_lock(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DECLARE_WAITQUEUE(entry, current);
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- ++file_priv->lock_count;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock->context, task_pid_nr(current),
- master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
- lock->flags);
-
- add_wait_queue(&master->lock.lock_queue, &entry);
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters++;
- spin_unlock_bh(&master->lock.spinlock);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (!master->lock.hw_lock) {
- /* Device has been unregistered */
- send_sig(SIGTERM, current, 0);
- ret = -EINTR;
- break;
- }
- if (drm_lock_take(&master->lock, lock->context)) {
- master->lock.file_priv = file_priv;
- master->lock.lock_time = jiffies;
- break; /* Got lock */
- }
-
- /* Contention */
- mutex_unlock(&drm_global_mutex);
- schedule();
- mutex_lock(&drm_global_mutex);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters--;
- spin_unlock_bh(&master->lock.spinlock);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&master->lock.lock_queue, &entry);
-
- DRM_DEBUG("%d %s\n", lock->context,
- ret ? "interrupted" : "has lock");
- if (ret) return ret;
-
- /* don't set the block all signals on the master process for now
- * really probably not the correct answer but lets us debug xkb
- * xserver for now */
- if (!drm_is_current_master(file_priv)) {
- dev->sigdata.context = lock->context;
- dev->sigdata.lock = master->lock.hw_lock;
- }
-
- if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
- {
- if (dev->driver->dma_quiescent(dev)) {
- DRM_DEBUG("%d waiting for DMA quiescent\n",
- lock->context);
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
-/*
- * Unlock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Transfer and free the lock.
- */
-int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- if (drm_legacy_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
- }
-
- return 0;
-}
-
-/*
- * This function returns immediately and takes the hw lock
- * with the kernel context if it is free, otherwise it gets the highest priority when and if
- * it is eventually released.
- *
- * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
- * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
- * a deadlock, which is why the "idlelock" was invented).
- *
- * This should be sufficient to wait for GPU idle without
- * having to worry about starvation.
- */
-void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
-{
- int ret;
-
- spin_lock_bh(&lock_data->spinlock);
- lock_data->kernel_waiters++;
- if (!lock_data->idle_has_lock) {
-
- spin_unlock_bh(&lock_data->spinlock);
- ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
- spin_lock_bh(&lock_data->spinlock);
-
- if (ret == 1)
- lock_data->idle_has_lock = 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_legacy_idlelock_take);
-
-void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
-{
- unsigned int old, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (--lock_data->kernel_waiters == 0) {
- if (lock_data->idle_has_lock) {
- do {
- old = *lock;
- prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
- } while (prev != old);
- wake_up_interruptible(&lock_data->lock_queue);
- lock_data->idle_has_lock = 0;
- }
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_legacy_idlelock_release);
-
-static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
-
- return (file_priv->lock_count && master->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
- master->lock.file_priv == file_priv);
-}
-
-void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
-
- /* if the master has gone away we can't do anything with the lock */
- if (!dev->master)
- return;
-
- if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- drm_legacy_lock_free(&file_priv->master->lock,
- _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- }
-}
-
-void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- /*
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
- mutex_lock(&dev->struct_mutex);
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- mutex_unlock(&dev->struct_mutex);
-}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
deleted file mode 100644
index d2e1dccd8113..000000000000
--- a/drivers/gpu/drm/drm_memory.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * \file drm_memory.c
- * Memory management wrappers for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/highmem.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_cache.h>
-#include <drm/drm_device.h>
-
-#include "drm_legacy.h"
-
-#if IS_ENABLED(CONFIG_AGP)
-
-#ifdef HAVE_PAGE_AGP
-# include <asm/agp.h>
-#else
-# ifdef __powerpc__
-# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
-# else
-# define PAGE_AGP PAGE_KERNEL
-# endif
-#endif
-
-static void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device *dev)
-{
- unsigned long i, num_pages =
- PAGE_ALIGN(size) / PAGE_SIZE;
- struct drm_agp_mem *agpmem;
- struct page **page_map;
- struct page **phys_page_map;
- void *addr;
-
- size = PAGE_ALIGN(size);
-
-#ifdef __alpha__
- offset -= dev->hose->mem_space->start;
-#endif
-
- list_for_each_entry(agpmem, &dev->agp->memory, head)
- if (agpmem->bound <= offset
- && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
- (offset + size))
- break;
- if (&agpmem->head == &dev->agp->memory)
- return NULL;
-
- /*
- * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
- * the CPU do not get remapped by the GART. We fix this by using the kernel's
- * page-table instead (that's probably faster anyhow...).
- */
- /* note: use vmalloc() because num_pages could be large... */
- page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
- if (!page_map)
- return NULL;
-
- phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
- for (i = 0; i < num_pages; ++i)
- page_map[i] = phys_page_map[i];
- addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
- vfree(page_map);
-
- return addr;
-}
-
-#else /* CONFIG_AGP */
-static inline void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device *dev)
-{
- return NULL;
-}
-
-#endif /* CONFIG_AGP */
-
-void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
-{
- if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_legacy_ioremap);
-
-void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
-{
- if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap_wc(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_legacy_ioremap_wc);
-
-void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
-{
- if (!map->handle || !map->size)
- return;
-
- if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- vunmap(map->handle);
- else
- iounmap(map->handle);
-}
-EXPORT_SYMBOL(drm_legacy_ioremapfree);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index e90f0bf895b3..daac649aabdb 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -197,12 +197,14 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
* @fb: The source framebuffer
* @clip: Clipping rectangle of the area to be copied
* @swap: When true, swap MSB/LSB of 16-bit values
+ * @fmtcnv_state: Format-conversion state
*
* Returns:
* Zero on success, negative error code on failure.
*/
int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap)
+ struct drm_rect *clip, bool swap,
+ struct drm_format_conv_state *fmtcnv_state)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
struct iosys_map dst_map = IOSYS_MAP_INIT_VADDR(dst);
@@ -215,12 +217,13 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach);
+ drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach,
+ fmtcnv_state);
else
drm_fb_memcpy(&dst_map, NULL, src, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, fmtcnv_state, swap);
break;
default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
@@ -252,7 +255,7 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
}
static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *rect)
+ struct drm_rect *rect, struct drm_format_conv_state *fmtcnv_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
@@ -270,7 +273,7 @@ static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
- ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
+ ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap, fmtcnv_state);
if (ret)
goto err_msg;
} else {
@@ -332,7 +335,8 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
drm_dev_exit(idx);
}
@@ -368,7 +372,8 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
if (!drm_dev_enter(&dbidev->drm, &idx))
return;
- mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
backlight_enable(dbidev->backlight);
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 14201f73aab1..843a6dbda93a 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -347,7 +347,8 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
- mipi_dsi_detach(dsi);
+ if (dsi->attached)
+ mipi_dsi_detach(dsi);
mipi_dsi_device_unregister(dsi);
return 0;
@@ -370,11 +371,18 @@ EXPORT_SYMBOL(mipi_dsi_host_unregister);
int mipi_dsi_attach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ int ret;
if (!ops || !ops->attach)
return -ENOSYS;
- return ops->attach(dsi->host, dsi);
+ ret = ops->attach(dsi->host, dsi);
+ if (ret)
+ return ret;
+
+ dsi->attached = true;
+
+ return 0;
}
EXPORT_SYMBOL(mipi_dsi_attach);
@@ -386,9 +394,14 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ if (WARN_ON(!dsi->attached))
+ return -EINVAL;
+
if (!ops || !ops->detach)
return -ENOSYS;
+ dsi->attached = false;
+
return ops->detach(dsi->host, dsi);
}
EXPORT_SYMBOL(mipi_dsi_detach);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index ac0d2ce3f870..0e8355063eee 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -538,7 +538,7 @@ retry:
obj_to_connector(obj),
prop_value);
} else {
- ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value, false);
if (ret)
goto out;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index d5c15292ae93..3d92f66e550c 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -336,6 +336,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Legion Go 8APU1 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8APU1"),
+ },
+ .driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* Lenovo Yoga Book X90F / X90L */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 39d35fc3a43b..c585f1e8803e 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -29,18 +29,12 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#include <drm/drm_auth.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
-#include "drm_legacy.h"
-
-#ifdef CONFIG_DRM_LEGACY
-/* List of devices hanging off drivers with stealth attach. */
-static LIST_HEAD(legacy_dev_list);
-static DEFINE_MUTEX(legacy_dev_list_lock);
-#endif
static int drm_get_pci_domain(struct drm_device *dev)
{
@@ -71,199 +65,3 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
master->unique_len = strlen(master->unique);
return 0;
}
-
-#ifdef CONFIG_DRM_LEGACY
-
-static int drm_legacy_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != pdev->bus->number ||
- p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
- return -EINVAL;
-
- p->irq = pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
- return 0;
-}
-
-/**
- * drm_legacy_irq_by_busid - Get interrupt from bus ID
- * @dev: DRM device
- * @data: IOCTL parameter pointing to a drm_irq_busid structure
- * @file_priv: DRM file private.
- *
- * Finds the PCI device with the specified bus id and gets its IRQ number.
- * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
- * to that of the device that this DRM instance attached to.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_irq_busid *p = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- /* UMS was only ever support on PCI devices. */
- if (WARN_ON(!dev_is_pci(dev->dev)))
- return -EINVAL;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EOPNOTSUPP;
-
- return drm_legacy_pci_irq_by_busid(dev, p);
-}
-
-void drm_legacy_pci_agp_destroy(struct drm_device *dev)
-{
- if (dev->agp) {
- arch_phys_wc_del(dev->agp->agp_mtrr);
- drm_legacy_agp_clear(dev);
- kfree(dev->agp);
- dev->agp = NULL;
- }
-}
-
-static void drm_legacy_pci_agp_init(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
- if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
- dev->agp = drm_legacy_agp_init(dev);
- if (dev->agp) {
- dev->agp->agp_mtrr = arch_phys_wc_add(
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024);
- }
- }
-}
-
-static int drm_legacy_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- const struct drm_driver *driver)
-{
- struct drm_device *dev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = drm_dev_alloc(driver, &pdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
-
- ret = pci_enable_device(pdev);
- if (ret)
- goto err_free;
-
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
- drm_legacy_pci_agp_init(dev);
-
- ret = drm_dev_register(dev, ent->driver_data);
- if (ret)
- goto err_agp;
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
- mutex_lock(&legacy_dev_list_lock);
- list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
- mutex_unlock(&legacy_dev_list_lock);
- }
-
- return 0;
-
-err_agp:
- drm_legacy_pci_agp_destroy(dev);
- pci_disable_device(pdev);
-err_free:
- drm_dev_put(dev);
- return ret;
-}
-
-/**
- * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
- * @driver: DRM device driver
- * @pdriver: PCI device driver
- *
- * This is only used by legacy dri1 drivers and deprecated.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *pid;
- int i;
-
- DRM_DEBUG("\n");
-
- if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
- return -EINVAL;
-
- /* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
- pid = &pdriver->id_table[i];
-
- /* Loop around setting up a DRM device for each PCI device
- * matching our ID and device class. If we had the internal
- * function that pci_get_subsys and pci_get_class used, we'd
- * be able to just pass pid in instead of doing a two-stage
- * thing.
- */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
- pid->subdevice, pdev)) != NULL) {
- if ((pdev->class & pid->class_mask) != pid->class)
- continue;
-
- /* stealth mode requires a manual probe */
- pci_dev_get(pdev);
- drm_legacy_get_pci_dev(pdev, pid, driver);
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_pci_init);
-
-/**
- * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
- * @driver: DRM device driver
- * @pdriver: PCI device driver
- *
- * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
- * is deprecated and only used by dri1 drivers.
- */
-void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- struct drm_device *dev, *tmp;
-
- DRM_DEBUG("\n");
-
- if (!(driver->driver_features & DRIVER_LEGACY)) {
- WARN_ON(1);
- } else {
- mutex_lock(&legacy_dev_list_lock);
- list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
- legacy_dev_list) {
- if (dev->driver == driver) {
- list_del(&dev->legacy_dev_list);
- drm_put_dev(dev);
- }
- }
- mutex_unlock(&legacy_dev_list_lock);
- }
- DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_legacy_pci_exit);
-
-#endif
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 24e7998d1731..9e8e4c60983d 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -230,6 +230,103 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
return 0;
}
+/**
+ * DOC: hotspot properties
+ *
+ * HOTSPOT_X: property to set mouse hotspot x offset.
+ * HOTSPOT_Y: property to set mouse hotspot y offset.
+ *
+ * When the plane is being used as a cursor image to display a mouse pointer,
+ * the "hotspot" is the offset within the cursor image where mouse events
+ * are expected to go.
+ *
+ * Positive values move the hotspot from the top-left corner of the cursor
+ * plane towards the right and bottom.
+ *
+ * Most display drivers do not need this information because the
+ * hotspot is not actually connected to anything visible on screen.
+ * However, this is necessary for display drivers like the para-virtualized
+ * drivers (eg qxl, vbox, virtio, vmwgfx), that are attached to a user console
+ * with a mouse pointer. Since these consoles are often being remoted over a
+ * network, they would otherwise have to wait to display the pointer movement to
+ * the user until a full network round-trip has occurred. New mouse events have
+ * to be sent from the user's console, over the network to the virtual input
+ * devices, forwarded to the desktop for processing, and then the cursor plane's
+ * position can be updated and sent back to the user's console over the network.
+ * Instead, with the hotspot information, the console can anticipate the new
+ * location, and draw the mouse cursor there before the confirmation comes in.
+ * To do that correctly, the user's console must be able predict how the
+ * desktop will process mouse events, which normally requires the desktop's
+ * mouse topology information, ie where each CRTC sits in the mouse coordinate
+ * space. This is typically sent to the para-virtualized drivers using some
+ * driver-specific method, and the driver then forwards it to the console by
+ * way of the virtual display device or hypervisor.
+ *
+ * The assumption is generally made that there is only one cursor plane being
+ * used this way at a time, and that the desktop is feeding all mouse devices
+ * into the same global pointer. Para-virtualized drivers that require this
+ * should only be exposing a single cursor plane, or find some other way
+ * to coordinate with a userspace desktop that supports multiple pointers.
+ * If the hotspot properties are set, the cursor plane is therefore assumed to be
+ * used only for displaying a mouse cursor image, and the position of the combined
+ * cursor plane + offset can therefore be used for coordinating with input from a
+ * mouse device.
+ *
+ * The cursor will then be drawn either at the location of the plane in the CRTC
+ * console, or as a free-floating cursor plane on the user's console
+ * corresponding to their desktop mouse position.
+ *
+ * DRM clients which would like to work correctly on drivers which expose
+ * hotspot properties should advertise DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT.
+ * Setting this property on drivers which do not special case
+ * cursor planes will return EOPNOTSUPP, which can be used by userspace to
+ * gauge requirements of the hardware/drivers they're running on. Advertising
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT implies that the userspace client will be
+ * correctly setting the hotspot properties.
+ */
+
+/**
+ * drm_plane_create_hotspot_properties - creates the mouse hotspot
+ * properties and attaches them to the given cursor plane
+ *
+ * @plane: drm cursor plane
+ *
+ * This function enables the mouse hotspot property on a given
+ * cursor plane. Look at the documentation for hotspot properties
+ * to get a better understanding for what they're used for.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+static int drm_plane_create_hotspot_properties(struct drm_plane *plane)
+{
+ struct drm_property *prop_x;
+ struct drm_property *prop_y;
+
+ drm_WARN_ON(plane->dev,
+ !drm_core_check_feature(plane->dev,
+ DRIVER_CURSOR_HOTSPOT));
+
+ prop_x = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_X",
+ INT_MIN, INT_MAX);
+ if (IS_ERR(prop_x))
+ return PTR_ERR(prop_x);
+
+ prop_y = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_Y",
+ INT_MIN, INT_MAX);
+ if (IS_ERR(prop_y)) {
+ drm_property_destroy(plane->dev, prop_x);
+ return PTR_ERR(prop_y);
+ }
+
+ drm_object_attach_property(&plane->base, prop_x, 0);
+ drm_object_attach_property(&plane->base, prop_y, 0);
+ plane->hotspot_x_property = prop_x;
+ plane->hotspot_y_property = prop_y;
+
+ return 0;
+}
+
__printf(9, 0)
static int __drm_universal_plane_init(struct drm_device *dev,
struct drm_plane *plane,
@@ -348,6 +445,10 @@ static int __drm_universal_plane_init(struct drm_device *dev,
drm_object_attach_property(&plane->base, config->prop_src_w, 0);
drm_object_attach_property(&plane->base, config->prop_src_h, 0);
}
+ if (drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) &&
+ type == DRM_PLANE_TYPE_CURSOR) {
+ drm_plane_create_hotspot_properties(plane);
+ }
if (format_modifier_count)
create_in_format_blob(dev, plane);
@@ -678,6 +779,19 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
!file_priv->universal_planes)
continue;
+ /*
+ * If we're running on a virtualized driver then,
+ * unless userspace advertizes support for the
+ * virtualized cursor plane, disable cursor planes
+ * because they'll be broken due to missing cursor
+ * hotspot info.
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) &&
+ file_priv->atomic &&
+ !file_priv->supports_virtualized_cursor_plane)
+ continue;
+
if (drm_lease_held(file_priv, plane->base.id)) {
if (count < plane_resp->count_planes &&
put_user(plane->base.id, plane_ptr + count))
@@ -1052,8 +1166,10 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
return PTR_ERR(fb);
}
- fb->hot_x = req->hot_x;
- fb->hot_y = req->hot_y;
+ if (plane->hotspot_x_property && plane->state)
+ plane->state->hotspot_x = req->hot_x;
+ if (plane->hotspot_y_property && plane->state)
+ plane->state->hotspot_y = req->hot_y;
} else {
fb = NULL;
}
@@ -1442,6 +1558,36 @@ out:
* Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and
* drm_atomic_helper_damage_iter_next() helper iterator function to get damage
* rectangles clipped to &drm_plane_state.src.
+ *
+ * Note that there are two types of damage handling: frame damage and buffer
+ * damage, the type of damage handling implemented depends on a driver's upload
+ * target. Drivers implementing a per-plane or per-CRTC upload target need to
+ * handle frame damage, while drivers implementing a per-buffer upload target
+ * need to handle buffer damage.
+ *
+ * The existing damage helpers only support the frame damage type, there is no
+ * buffer age support or similar damage accumulation algorithm implemented yet.
+ *
+ * Only drivers handling frame damage can use the mentioned damage helpers to
+ * iterate over the damaged regions. Drivers that handle buffer damage, must set
+ * &drm_plane_state.ignore_damage_clips for drm_atomic_helper_damage_iter_init()
+ * to know that damage clips should be ignored and return &drm_plane_state.src
+ * as the damage rectangle, to force a full plane update.
+ *
+ * Drivers with a per-buffer upload target could compare the &drm_plane_state.fb
+ * of the old and new plane states to determine if the framebuffer attached to a
+ * plane has changed or not since the last plane update. If &drm_plane_state.fb
+ * has changed, then &drm_plane_state.ignore_damage_clips must be set to true.
+ *
+ * That is because drivers with a per-plane upload target, expect the backing
+ * storage buffer to not change for a given plane. If the upload buffer changes
+ * between page flips, the new upload buffer has to be updated as a whole. This
+ * can be improved in the future if support for frame damage is added to the DRM
+ * damage helpers, similarly to how user-space already handle this case as it is
+ * explained in the following documents:
+ *
+ * https://registry.khronos.org/EGL/extensions/KHR/EGL_KHR_swap_buffers_with_damage.txt
+ * https://emersion.fr/blog/2019/intro-to-damage-tracking/
*/
/**
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 5e95089676ff..7982be4b0306 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -279,35 +279,3 @@ void drm_plane_helper_destroy(struct drm_plane *plane)
kfree(plane);
}
EXPORT_SYMBOL(drm_plane_helper_destroy);
-
-/**
- * drm_plane_helper_atomic_check() - Helper to check plane atomic-state
- * @plane: plane to check
- * @state: atomic state object
- *
- * Provides a default plane-state check handler for planes whose atomic-state
- * scale and positioning are not expected to change since the plane is always
- * a fullscreen scanout buffer.
- *
- * This is often the case for the primary plane of simple framebuffers. See
- * also drm_crtc_helper_atomic_check() for the respective CRTC-state check
- * helper function.
- *
- * RETURNS:
- * Zero on success, or an errno code otherwise.
- */
-int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
- return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
-}
-EXPORT_SYMBOL(drm_plane_helper_atomic_check);
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
deleted file mode 100644
index f4e6184d1877..000000000000
--- a/drivers/gpu/drm/drm_scatter.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * \file drm_scatter.c
- * IOCTLs to manage scatter/gather memory
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-#define DEBUG_SCATTER 0
-
-static void drm_sg_cleanup(struct drm_sg_mem * entry)
-{
- struct page *page;
- int i;
-
- for (i = 0; i < entry->pages; i++) {
- page = entry->pagelist[i];
- if (page)
- ClearPageReserved(page);
- }
-
- vfree(entry->virtual);
-
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
-}
-
-void drm_legacy_sg_cleanup(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
- drm_core_check_feature(dev, DRIVER_LEGACY)) {
- drm_sg_cleanup(dev->sg);
- dev->sg = NULL;
- }
-}
-#ifdef _LP64
-# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
-#else
-# define ScatterHandle(x) (unsigned int)(x)
-#endif
-
-int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
- struct drm_sg_mem *entry;
- unsigned long pages, i, j;
-
- DRM_DEBUG("\n");
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EOPNOTSUPP;
-
- if (request->size > SIZE_MAX - PAGE_SIZE)
- return -EINVAL;
-
- if (dev->sg)
- return -EINVAL;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
-
- entry->pages = pages;
- entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
- if (!entry->pagelist) {
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
- if (!entry->busaddr) {
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
- if (!entry->virtual) {
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
-
- /* This also forces the mapping of COW pages, so our page list
- * will be valid. Please don't remove it...
- */
- memset(entry->virtual, 0, pages << PAGE_SHIFT);
-
- entry->handle = ScatterHandle((unsigned long)entry->virtual);
-
- DRM_DEBUG("handle = %08lx\n", entry->handle);
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- for (i = (unsigned long)entry->virtual, j = 0; j < pages;
- i += PAGE_SIZE, j++) {
- entry->pagelist[j] = vmalloc_to_page((void *)i);
- if (!entry->pagelist[j])
- goto failed;
- SetPageReserved(entry->pagelist[j]);
- }
-
- request->handle = entry->handle;
-
- dev->sg = entry;
-
-#if DEBUG_SCATTER
- /* Verify that each page points to its virtual address, and vice
- * versa.
- */
- {
- int error = 0;
-
- for (i = 0; i < pages; i++) {
- unsigned long *tmp;
-
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0xcafebabe;
- }
- tmp = (unsigned long *)((u8 *) entry->virtual +
- (PAGE_SIZE * i));
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- if (*tmp != 0xcafebabe && error == 0) {
- error = 1;
- DRM_ERROR("Scatter allocation error, "
- "pagelist does not match "
- "virtual mapping\n");
- }
- }
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0;
- }
- }
- if (error == 0)
- DRM_ERROR("Scatter allocation matches pagelist\n");
- }
-#endif
-
- return 0;
-
- failed:
- drm_sg_cleanup(entry);
- return -ENOMEM;
-}
-
-int drm_legacy_sg_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
- struct drm_sg_mem *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EOPNOTSUPP;
-
- entry = dev->sg;
- dev->sg = NULL;
-
- if (!entry || entry->handle != request->handle)
- return -EINVAL;
-
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- drm_sg_cleanup(entry);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 01da6789d044..cbb65b7ba425 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -126,6 +126,11 @@
* synchronize between the two.
* This requirement is inherited from the Vulkan fence API.
*
+ * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE is set, the ioctl will also set
+ * a fence deadline hint on the backing fences before waiting, to provide the
+ * fence signaler with an appropriate sense of urgency. The deadline is
+ * specified as an absolute &CLOCK_MONOTONIC value in units of ns.
+ *
* Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
* handles as well as an array of u64 points and does a host-side wait on all
* of syncobj fences at the given points simultaneously.
@@ -1027,7 +1032,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint32_t count,
uint32_t flags,
signed long timeout,
- uint32_t *idx)
+ uint32_t *idx,
+ ktime_t *deadline)
{
struct syncobj_wait_entry *entries;
struct dma_fence *fence;
@@ -1108,6 +1114,15 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
+ if (deadline) {
+ for (i = 0; i < count; ++i) {
+ fence = entries[i].fence;
+ if (!fence)
+ continue;
+ dma_fence_set_deadline(fence, *deadline);
+ }
+ }
+
do {
set_current_state(TASK_INTERRUPTIBLE);
@@ -1206,7 +1221,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
struct drm_syncobj_timeline_wait *timeline_wait,
- struct drm_syncobj **syncobjs, bool timeline)
+ struct drm_syncobj **syncobjs, bool timeline,
+ ktime_t *deadline)
{
signed long timeout = 0;
uint32_t first = ~0;
@@ -1217,7 +1233,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
NULL,
wait->count_handles,
wait->flags,
- timeout, &first);
+ timeout, &first,
+ deadline);
if (timeout < 0)
return timeout;
wait->first_signaled = first;
@@ -1227,7 +1244,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
u64_to_user_ptr(timeline_wait->points),
timeline_wait->count_handles,
timeline_wait->flags,
- timeout, &first);
+ timeout, &first,
+ deadline);
if (timeout < 0)
return timeout;
timeline_wait->first_signaled = first;
@@ -1298,17 +1316,22 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
{
struct drm_syncobj_wait *args = data;
struct drm_syncobj **syncobjs;
+ unsigned int possible_flags;
+ ktime_t t, *tp = NULL;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
- if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
- DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE;
+
+ if (args->flags & ~possible_flags)
return -EINVAL;
if (args->count_handles == 0)
- return -EINVAL;
+ return 0;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
@@ -1317,8 +1340,13 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
+ if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) {
+ t = ns_to_ktime(args->deadline_nsec);
+ tp = &t;
+ }
+
ret = drm_syncobj_array_wait(dev, file_private,
- args, NULL, syncobjs, false);
+ args, NULL, syncobjs, false, tp);
drm_syncobj_array_free(syncobjs, args->count_handles);
@@ -1331,18 +1359,23 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
{
struct drm_syncobj_timeline_wait *args = data;
struct drm_syncobj **syncobjs;
+ unsigned int possible_flags;
+ ktime_t t, *tp = NULL;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
- DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
- DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE;
+
+ if (args->flags & ~possible_flags)
return -EINVAL;
if (args->count_handles == 0)
- return -EINVAL;
+ return 0;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
@@ -1351,8 +1384,13 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
+ if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) {
+ t = ns_to_ktime(args->deadline_nsec);
+ tp = &t;
+ }
+
ret = drm_syncobj_array_wait(dev, file_private,
- NULL, args, syncobjs, true);
+ NULL, args, syncobjs, true, tp);
drm_syncobj_array_free(syncobjs, args->count_handles);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 877e2067534f..702a12bc93bd 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -210,11 +210,6 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->get_vblank_counter)
return crtc->funcs->get_vblank_counter(crtc);
}
-#ifdef CONFIG_DRM_LEGACY
- else if (dev->driver->get_vblank_counter) {
- return dev->driver->get_vblank_counter(dev, pipe);
- }
-#endif
return drm_vblank_no_hw_counter(dev, pipe);
}
@@ -433,11 +428,6 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->disable_vblank)
crtc->funcs->disable_vblank(crtc);
}
-#ifdef CONFIG_DRM_LEGACY
- else {
- dev->driver->disable_vblank(dev, pipe);
- }
-#endif
}
/*
@@ -1151,11 +1141,6 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->enable_vblank)
return crtc->funcs->enable_vblank(crtc);
}
-#ifdef CONFIG_DRM_LEGACY
- else if (dev->driver->enable_vblank) {
- return dev->driver->enable_vblank(dev, pipe);
- }
-#endif
return -EINVAL;
}
@@ -1574,88 +1559,6 @@ void drm_crtc_vblank_restore(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_restore);
-static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
- unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- /* vblank is not initialized (IRQ not installed ?), or has been freed */
- if (!drm_dev_has_vblank(dev))
- return;
-
- if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
- return;
-
- /*
- * To avoid all the problems that might happen if interrupts
- * were enabled/disabled around or between these calls, we just
- * have the kernel take a reference on the CRTC (just once though
- * to avoid corrupting the count if multiple, mismatch calls occur),
- * so that interrupts remain enabled in the interim.
- */
- if (!vblank->inmodeset) {
- vblank->inmodeset = 0x1;
- if (drm_vblank_get(dev, pipe) == 0)
- vblank->inmodeset |= 0x2;
- }
-}
-
-static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
- unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- /* vblank is not initialized (IRQ not installed ?), or has been freed */
- if (!drm_dev_has_vblank(dev))
- return;
-
- if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
- return;
-
- if (vblank->inmodeset) {
- spin_lock_irq(&dev->vbl_lock);
- drm_reset_vblank_timestamp(dev, pipe);
- spin_unlock_irq(&dev->vbl_lock);
-
- if (vblank->inmodeset & 0x2)
- drm_vblank_put(dev, pipe);
-
- vblank->inmodeset = 0;
- }
-}
-
-int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_modeset_ctl *modeset = data;
- unsigned int pipe;
-
- /* If drm_vblank_init() hasn't been called yet, just no-op */
- if (!drm_dev_has_vblank(dev))
- return 0;
-
- /* KMS drivers handle this internally */
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return 0;
-
- pipe = modeset->crtc;
- if (pipe >= dev->num_crtcs)
- return -EINVAL;
-
- switch (modeset->cmd) {
- case _DRM_PRE_MODESET:
- drm_legacy_vblank_pre_modeset(dev, pipe);
- break;
- case _DRM_POST_MODESET:
- drm_legacy_vblank_post_modeset(dev, pipe);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
u64 req_seq,
union drm_wait_vblank *vblwait,
@@ -1780,10 +1683,6 @@ static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe,
static bool drm_wait_vblank_supported(struct drm_device *dev)
{
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY)))
- return dev->irq_enabled;
-#endif
return drm_dev_has_vblank(dev);
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
deleted file mode 100644
index 87c9fe55dec7..000000000000
--- a/drivers/gpu/drm/drm_vm.c
+++ /dev/null
@@ -1,665 +0,0 @@
-/*
- * \file drm_vm.c
- * Memory mapping for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-#include <linux/vmalloc.h>
-#include <linux/pgtable.h>
-
-#if defined(__ia64__)
-#include <linux/efi.h>
-#include <linux/slab.h>
-#endif
-#include <linux/mem_encrypt.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-struct drm_vma_entry {
- struct list_head head;
- struct vm_area_struct *vma;
- pid_t pid;
-};
-
-static void drm_vm_open(struct vm_area_struct *vma);
-static void drm_vm_close(struct vm_area_struct *vma);
-
-static pgprot_t drm_io_prot(struct drm_local_map *map,
- struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__mips__) || defined(__loongarch__)
- if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
- tmp = pgprot_noncached(tmp);
- else
- tmp = pgprot_writecombine(tmp);
-#elif defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end -
- vma->vm_start))
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__) || defined(__arm__)
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
-}
-
-static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- tmp = pgprot_noncached_wc(tmp);
-#endif
- return tmp;
-}
-
-/*
- * \c fault method for AGP virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Find the right map and if it's AGP memory find the real physical page to
- * map, get the page, increment the use count and return it.
- */
-#if IS_ENABLED(CONFIG_AGP)
-static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- struct drm_hash_item *hash;
-
- /*
- * Find the right map
- */
- if (!dev->agp)
- goto vm_fault_error;
-
- if (!dev->agp || !dev->agp->cant_use_aperture)
- goto vm_fault_error;
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
- goto vm_fault_error;
-
- r_list = drm_hash_entry(hash, struct drm_map_list, hash);
- map = r_list->map;
-
- if (map && map->type == _DRM_AGP) {
- /*
- * Using vm_pgoff as a selector forces us to use this unusual
- * addressing scheme.
- */
- resource_size_t offset = vmf->address - vma->vm_start;
- resource_size_t baddr = map->offset + offset;
- struct drm_agp_mem *agpmem;
- struct page *page;
-
-#ifdef __alpha__
- /*
- * Adjust to a bus-relative address
- */
- baddr -= dev->hose->mem_space->start;
-#endif
-
- /*
- * It's AGP memory - find the real physical page to map
- */
- list_for_each_entry(agpmem, &dev->agp->memory, head) {
- if (agpmem->bound <= baddr &&
- agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
- break;
- }
-
- if (&agpmem->head == &dev->agp->memory)
- goto vm_fault_error;
-
- /*
- * Get the page, inc the use count, and return it
- */
- offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
- page = agpmem->memory->pages[offset];
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG
- ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
- (unsigned long long)baddr,
- agpmem->memory->pages[offset],
- (unsigned long long)offset,
- page_count(page));
- return 0;
- }
-vm_fault_error:
- return VM_FAULT_SIGBUS; /* Disallow mremap */
-}
-#else
-static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
-{
- return VM_FAULT_SIGBUS;
-}
-#endif
-
-/*
- * \c nopage method for shared virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Get the mapping, find the real physical page to map, get the page, and
- * return it.
- */
-static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_local_map *map = vma->vm_private_data;
- unsigned long offset;
- unsigned long i;
- struct page *page;
-
- if (!map)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = vmf->address - vma->vm_start;
- i = (unsigned long)map->handle + offset;
- page = vmalloc_to_page((void *)i);
- if (!page)
- return VM_FAULT_SIGBUS;
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("shm_fault 0x%lx\n", offset);
- return 0;
-}
-
-/*
- * \c close method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Deletes map information if we are the last
- * person to close a mapping and it's not in the global maplist.
- */
-static void drm_vm_shm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
- int found_maps = 0;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
-
- map = vma->vm_private_data;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma->vm_private_data == map)
- found_maps++;
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- }
- }
-
- /* We were the only map that was found */
- if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
- /* Check to see if we are in the maplist, if we are not, then
- * we delete this mappings information.
- */
- found_maps = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map == map)
- found_maps++;
- }
-
- if (!found_maps) {
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- arch_phys_wc_del(map->mtrr);
- iounmap(map->handle);
- break;
- case _DRM_SHM:
- vfree(map->handle);
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dma_free_coherent(dev->dev,
- map->size,
- map->handle,
- map->offset);
- break;
- }
- kfree(map);
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * \c fault method for DMA virtual memory.
- *
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
- */
-static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_device_dma *dma = dev->dma;
- unsigned long offset;
- unsigned long page_nr;
- struct page *page;
-
- if (!dma)
- return VM_FAULT_SIGBUS; /* Error */
- if (!dma->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = vmf->address - vma->vm_start;
- /* vm_[pg]off[set] should be 0 */
- page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
- page = virt_to_page((void *)dma->pagelist[page_nr]);
-
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
- return 0;
-}
-
-/*
- * \c fault method for scatter-gather virtual memory.
- *
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
- */
-static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_local_map *map = vma->vm_private_data;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_sg_mem *entry = dev->sg;
- unsigned long offset;
- unsigned long map_offset;
- unsigned long page_offset;
- struct page *page;
-
- if (!entry)
- return VM_FAULT_SIGBUS; /* Error */
- if (!entry->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = vmf->address - vma->vm_start;
- map_offset = map->offset - (unsigned long)dev->sg->virtual;
- page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
- page = entry->pagelist[page_offset];
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-/** AGP virtual memory operations */
-static const struct vm_operations_struct drm_vm_ops = {
- .fault = drm_vm_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Shared virtual memory operations */
-static const struct vm_operations_struct drm_vm_shm_ops = {
- .fault = drm_vm_shm_fault,
- .open = drm_vm_open,
- .close = drm_vm_shm_close,
-};
-
-/** DMA virtual memory operations */
-static const struct vm_operations_struct drm_vm_dma_ops = {
- .fault = drm_vm_dma_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Scatter-gather virtual memory operations */
-static const struct vm_operations_struct drm_vm_sg_ops = {
- .fault = drm_vm_sg_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-static void drm_vm_open_locked(struct drm_device *dev,
- struct vm_area_struct *vma)
-{
- struct drm_vma_entry *vma_entry;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
-
- vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
- if (vma_entry) {
- vma_entry->vma = vma;
- vma_entry->pid = current->pid;
- list_add(&vma_entry->head, &dev->vmalist);
- }
-}
-
-static void drm_vm_open(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(dev, vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
-static void drm_vm_close_locked(struct drm_device *dev,
- struct vm_area_struct *vma)
-{
- struct drm_vma_entry *pt, *temp;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
-
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- break;
- }
- }
-}
-
-/*
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(dev, vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * Sets the virtual memory area operations structure to vm_dma_ops, the file
- * pointer, and calls vm_open().
- */
-static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- struct drm_device_dma *dma;
- unsigned long length = vma->vm_end - vma->vm_start;
-
- dev = priv->minor->dev;
- dma = dev->dma;
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- /* Length must match exact page count */
- if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
- return -EINVAL;
- }
-
- if (!capable(CAP_SYS_ADMIN) &&
- (dma->flags & _DRM_DMA_USE_PCI_RO)) {
- vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- vma->vm_ops = &drm_vm_dma_ops;
-
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
-
- drm_vm_open_locked(dev, vma);
- return 0;
-}
-
-static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
-{
-#ifdef __alpha__
- return dev->hose->dense_mem_base;
-#else
- return 0;
-#endif
-}
-
-/*
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * If the virtual memory area has no offset associated with it then it's a DMA
- * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
- * checks that the restricted flag is not set, sets the virtual memory operations
- * according to the mapping type and remaps the pages. Finally sets the file
- * pointer and calls vm_open().
- */
-static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- resource_size_t offset = 0;
- struct drm_hash_item *hash;
-
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- if (!priv->authenticated)
- return -EACCES;
-
- /* We check for "dma". On Apple's UniNorth, it's valid to have
- * the AGP mapped at physical address 0
- * --BenH.
- */
- if (!vma->vm_pgoff
-#if IS_ENABLED(CONFIG_AGP)
- && (!dev->agp
- || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
-#endif
- )
- return drm_mmap_dma(filp, vma);
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
- DRM_ERROR("Could not find map\n");
- return -EINVAL;
- }
-
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
- return -EPERM;
-
- /* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
- vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- switch (map->type) {
-#if !defined(__arm__)
- case _DRM_AGP:
- if (dev->agp && dev->agp->cant_use_aperture) {
- /*
- * On some platforms we can't talk to bus dma address from the CPU, so for
- * memory of type DRM_AGP, we'll deal with sorting out the real physical
- * pages and mappings in fault()
- */
-#if defined(__powerpc__)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
- vma->vm_ops = &drm_vm_ops;
- break;
- }
- fallthrough; /* to _DRM_FRAME_BUFFER... */
-#endif
- case _DRM_FRAME_BUFFER:
- case _DRM_REGISTERS:
- offset = drm_core_get_reg_ofs(dev);
- vma->vm_page_prot = drm_io_prot(map, vma);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
- DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
- " offset = 0x%llx\n",
- map->type,
- vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
-
- vma->vm_ops = &drm_vm_ops;
- break;
- case _DRM_CONSISTENT:
- /* Consistent memory is really like shared memory. But
- * it's allocated in a different way, so avoid fault */
- if (remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(virt_to_page(map->handle)),
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- fallthrough; /* to _DRM_SHM */
- case _DRM_SHM:
- vma->vm_ops = &drm_vm_shm_ops;
- vma->vm_private_data = (void *)map;
- break;
- case _DRM_SCATTER_GATHER:
- vma->vm_ops = &drm_vm_sg_ops;
- vma->vm_private_data = (void *)map;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- break;
- default:
- return -EINVAL; /* This should never happen. */
- }
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
-
- drm_vm_open_locked(dev, vma);
- return 0;
-}
-
-int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- int ret;
-
- if (drm_dev_is_unplugged(dev))
- return -ENODEV;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm_mmap_locked(filp, vma);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_legacy_mmap);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_vma_flush(struct drm_device *dev)
-{
- struct drm_vma_entry *vma, *vma_temp;
-
- /* Clear vma list (only needed for legacy drivers) */
- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
- list_del(&vma->head);
- kfree(vma);
- }
-}
-#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index a8d3fa81e4ec..6228ce603248 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -640,16 +640,14 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
return component_master_add_with_match(dev, &etnaviv_master_ops, match);
}
-static int etnaviv_pdev_remove(struct platform_device *pdev)
+static void etnaviv_pdev_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &etnaviv_master_ops);
-
- return 0;
}
static struct platform_driver etnaviv_platform_driver = {
.probe = etnaviv_pdev_probe,
- .remove = etnaviv_pdev_remove,
+ .remove_new = etnaviv_pdev_remove,
.driver = {
.name = "etnaviv",
},
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 2416c526f9b0..3d0f8d182506 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -535,7 +535,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&submit->sched_job,
&ctx->sched_entity[args->pipe],
- submit->ctx);
+ 1, submit->ctx);
if (ret)
goto err_submit_put;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 9276756e1397..9b8445d2a128 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1904,11 +1904,10 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
return 0;
}
-static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
+static void etnaviv_gpu_platform_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &gpu_ops);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int etnaviv_gpu_rpm_suspend(struct device *dev)
@@ -1917,7 +1916,7 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
u32 idle, mask;
/* If there are any jobs in the HW queue, we're not idle */
- if (atomic_read(&gpu->sched.hw_rq_count))
+ if (atomic_read(&gpu->sched.credit_count))
return -EBUSY;
/* Check whether the hardware (except FE and MC) is idle */
@@ -1970,6 +1969,6 @@ struct platform_driver etnaviv_gpu_driver = {
.of_match_table = etnaviv_gpu_match,
},
.probe = etnaviv_gpu_platform_probe,
- .remove = etnaviv_gpu_platform_remove,
+ .remove_new = etnaviv_gpu_platform_remove,
.id_table = gpu_ids,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 9b79f218e21a..c4b04b0dee16 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -134,7 +134,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
{
int ret;
- ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
+ ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
msecs_to_jiffies(500), NULL, NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 34cdabc30b4f..35771fb4e85d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -11,9 +11,10 @@
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <drm/drm_fourcc.h>
@@ -103,7 +104,7 @@ struct gsc_context {
unsigned int num_formats;
void __iomem *regs;
- const char **clk_names;
+ const char *const *clk_names;
struct clk *clocks[GSC_MAX_CLOCKS];
int num_clocks;
struct gsc_scaler sc;
@@ -1217,7 +1218,7 @@ static const unsigned int gsc_tiled_formats[] = {
static int gsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct gsc_driverdata *driver_data;
+ const struct gsc_driverdata *driver_data;
struct exynos_drm_ipp_formats *formats;
struct gsc_context *ctx;
int num_formats, ret, i, j;
@@ -1226,7 +1227,7 @@ static int gsc_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev);
+ driver_data = device_get_match_data(dev);
ctx->dev = dev;
ctx->num_clocks = driver_data->num_clocks;
ctx->clk_names = driver_data->clk_names;
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index a02f75be81f0..e163649816d5 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -51,7 +51,8 @@ static bool gud_is_big_endian(void)
static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format,
void *src, struct drm_framebuffer *fb,
- struct drm_rect *rect)
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
{
unsigned int block_width = drm_format_info_block_width(format, 0);
unsigned int bits_per_pixel = 8 / block_width;
@@ -75,7 +76,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
iosys_map_set_vaddr(&dst_map, buf);
iosys_map_set_vaddr(&vmap, src);
- drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect, fmtcnv_state);
pix8 = buf;
for (y = 0; y < height; y++) {
@@ -152,7 +153,8 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
const struct iosys_map *src, bool cached_reads,
const struct drm_format_info *format, struct drm_rect *rect,
- struct gud_set_buffer_req *req)
+ struct gud_set_buffer_req *req,
+ struct drm_format_conv_state *fmtcnv_state)
{
u8 compression = gdrm->compression;
struct iosys_map dst;
@@ -178,23 +180,23 @@ retry:
*/
if (format != fb->format) {
if (format->format == GUD_DRM_FORMAT_R1) {
- len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
+ len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect, fmtcnv_state);
if (!len)
return -ENOMEM;
} else if (format->format == DRM_FORMAT_R8) {
- drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect, fmtcnv_state);
} else if (format->format == DRM_FORMAT_RGB332) {
- drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect);
+ drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect, fmtcnv_state);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, fmtcnv_state,
gud_is_big_endian());
} else if (format->format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect);
+ drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect, fmtcnv_state);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
} else if (gud_is_big_endian() && format->cpp[0] > 1) {
- drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads);
+ drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads, fmtcnv_state);
} else if (compression && cached_reads && pitch == fb->pitches[0]) {
/* can compress directly from the framebuffer */
buf = vaddr + rect->y1 * pitch;
@@ -266,7 +268,8 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
const struct iosys_map *src, bool cached_reads,
- const struct drm_format_info *format, struct drm_rect *rect)
+ const struct drm_format_info *format, struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
{
struct gud_set_buffer_req req;
size_t len, trlen;
@@ -274,7 +277,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req);
+ ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req, fmtcnv_state);
if (ret)
return ret;
@@ -318,6 +321,7 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb
const struct iosys_map *src, bool cached_reads,
struct drm_rect *damage)
{
+ struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
const struct drm_format_info *format;
unsigned int i, lines;
size_t pitch;
@@ -340,7 +344,7 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb
rect.y1 += i * lines;
rect.y2 = min_t(u32, rect.y1 + lines, damage->y2);
- ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect);
+ ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect, &fmtcnv_state);
if (ret) {
if (ret != -ENODEV && ret != -ECONNRESET &&
ret != -ESHUTDOWN && ret != -EPROTO)
@@ -350,6 +354,8 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb
break;
}
}
+
+ drm_format_conv_state_release(&fmtcnv_state);
}
void gud_flush_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index ce397a8797f7..b5d6e3352071 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -94,7 +94,7 @@ config DRM_I915_CAPTURE_ERROR
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
Please report any hang for triaging according to:
- https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
+ https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html
If in doubt, say "Y".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 88b2bb005014..65e984242089 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -47,33 +47,34 @@ subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
# core driver code
-i915-y += i915_driver.o \
- i915_drm_client.o \
- i915_config.o \
- i915_getparam.o \
- i915_ioctl.o \
- i915_irq.o \
- i915_mitigations.o \
- i915_module.o \
- i915_params.o \
- i915_pci.o \
- i915_scatterlist.o \
- i915_suspend.o \
- i915_switcheroo.o \
- i915_sysfs.o \
- i915_utils.o \
- intel_clock_gating.o \
- intel_device_info.o \
- intel_memory_region.o \
- intel_pcode.o \
- intel_region_ttm.o \
- intel_runtime_pm.o \
- intel_sbi.o \
- intel_step.o \
- intel_uncore.o \
- intel_wakeref.o \
- vlv_sideband.o \
- vlv_suspend.o
+i915-y += \
+ i915_config.o \
+ i915_driver.o \
+ i915_drm_client.o \
+ i915_getparam.o \
+ i915_ioctl.o \
+ i915_irq.o \
+ i915_mitigations.o \
+ i915_module.o \
+ i915_params.o \
+ i915_pci.o \
+ i915_scatterlist.o \
+ i915_suspend.o \
+ i915_switcheroo.o \
+ i915_sysfs.o \
+ i915_utils.o \
+ intel_clock_gating.o \
+ intel_device_info.o \
+ intel_memory_region.o \
+ intel_pcode.o \
+ intel_region_ttm.o \
+ intel_runtime_pm.o \
+ intel_sbi.o \
+ intel_step.o \
+ intel_uncore.o \
+ intel_wakeref.o \
+ vlv_sideband.o \
+ vlv_suspend.o
# core peripheral code
i915-y += \
@@ -90,13 +91,13 @@ i915-y += \
i915_syncmap.o \
i915_user_extensions.o
-i915-$(CONFIG_COMPAT) += i915_ioc32.o
+i915-$(CONFIG_COMPAT) += \
+ i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += \
i915_debugfs.o \
- i915_debugfs_params.o \
- display/intel_display_debugfs.o \
- display/intel_pipe_crc.o
-i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
+ i915_debugfs_params.o
+i915-$(CONFIG_PERF_EVENTS) += \
+ i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
gt-y += \
@@ -153,7 +154,8 @@ gt-y += \
gt/sysfs_engines.o
# x86 intel-gtt module support
-gt-$(CONFIG_X86) += gt/intel_ggtt_gmch.o
+gt-$(CONFIG_X86) += \
+ gt/intel_ggtt_gmch.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -172,9 +174,9 @@ gem-y += \
gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \
gem/i915_gem_internal.o \
- gem/i915_gem_object.o \
gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
+ gem/i915_gem_object.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
@@ -191,57 +193,61 @@ gem-y += \
gem/i915_gem_wait.o \
gem/i915_gemfs.o
i915-y += \
- $(gem-y) \
- i915_active.o \
- i915_cmd_parser.o \
- i915_deps.o \
- i915_gem_evict.o \
- i915_gem_gtt.o \
- i915_gem_ww.o \
- i915_gem.o \
- i915_query.o \
- i915_request.o \
- i915_scheduler.o \
- i915_trace_points.o \
- i915_ttm_buddy_manager.o \
- i915_vma.o \
- i915_vma_resource.o
+ $(gem-y) \
+ i915_active.o \
+ i915_cmd_parser.o \
+ i915_deps.o \
+ i915_gem.o \
+ i915_gem_evict.o \
+ i915_gem_gtt.o \
+ i915_gem_ww.o \
+ i915_query.o \
+ i915_request.o \
+ i915_scheduler.o \
+ i915_trace_points.o \
+ i915_ttm_buddy_manager.o \
+ i915_vma.o \
+ i915_vma_resource.o
# general-purpose microcontroller (GuC) support
i915-y += \
- gt/uc/intel_gsc_fw.o \
- gt/uc/intel_gsc_proxy.o \
- gt/uc/intel_gsc_uc.o \
- gt/uc/intel_gsc_uc_debugfs.o \
- gt/uc/intel_gsc_uc_heci_cmd_submit.o \
- gt/uc/intel_guc.o \
- gt/uc/intel_guc_ads.o \
- gt/uc/intel_guc_capture.o \
- gt/uc/intel_guc_ct.o \
- gt/uc/intel_guc_debugfs.o \
- gt/uc/intel_guc_fw.o \
- gt/uc/intel_guc_hwconfig.o \
- gt/uc/intel_guc_log.o \
- gt/uc/intel_guc_log_debugfs.o \
- gt/uc/intel_guc_rc.o \
- gt/uc/intel_guc_slpc.o \
- gt/uc/intel_guc_submission.o \
- gt/uc/intel_huc.o \
- gt/uc/intel_huc_debugfs.o \
- gt/uc/intel_huc_fw.o \
- gt/uc/intel_uc.o \
- gt/uc/intel_uc_debugfs.o \
- gt/uc/intel_uc_fw.o
+ gt/uc/intel_gsc_fw.o \
+ gt/uc/intel_gsc_proxy.o \
+ gt/uc/intel_gsc_uc.o \
+ gt/uc/intel_gsc_uc_debugfs.o \
+ gt/uc/intel_gsc_uc_heci_cmd_submit.o\
+ gt/uc/intel_guc.o \
+ gt/uc/intel_guc_ads.o \
+ gt/uc/intel_guc_capture.o \
+ gt/uc/intel_guc_ct.o \
+ gt/uc/intel_guc_debugfs.o \
+ gt/uc/intel_guc_fw.o \
+ gt/uc/intel_guc_hwconfig.o \
+ gt/uc/intel_guc_log.o \
+ gt/uc/intel_guc_log_debugfs.o \
+ gt/uc/intel_guc_rc.o \
+ gt/uc/intel_guc_slpc.o \
+ gt/uc/intel_guc_submission.o \
+ gt/uc/intel_huc.o \
+ gt/uc/intel_huc_debugfs.o \
+ gt/uc/intel_huc_fw.o \
+ gt/uc/intel_uc.o \
+ gt/uc/intel_uc_debugfs.o \
+ gt/uc/intel_uc_fw.o
# graphics system controller (GSC) support
-i915-y += gt/intel_gsc.o
+i915-y += \
+ gt/intel_gsc.o
# graphics hardware monitoring (HWMON) support
-i915-$(CONFIG_HWMON) += i915_hwmon.o
+i915-$(CONFIG_HWMON) += \
+ i915_hwmon.o
# modesetting core code
i915-y += \
display/hsw_ips.o \
+ display/i9xx_plane.o \
+ display/i9xx_wm.o \
display/intel_atomic.o \
display/intel_atomic_plane.o \
display/intel_audio.o \
@@ -257,6 +263,7 @@ i915-y += \
display/intel_display.o \
display/intel_display_driver.o \
display/intel_display_irq.o \
+ display/intel_display_params.o \
display/intel_display_power.o \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
@@ -268,8 +275,10 @@ i915-y += \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
display/intel_dpt.o \
+ display/intel_dpt_common.o \
display/intel_drrs.o \
display/intel_dsb.o \
+ display/intel_dsb_buffer.o \
display/intel_fb.o \
display/intel_fb_pin.o \
display/intel_fbc.o \
@@ -287,8 +296,8 @@ i915-y += \
display/intel_load_detect.o \
display/intel_lpe_audio.o \
display/intel_modeset_lock.o \
- display/intel_modeset_verify.o \
display/intel_modeset_setup.o \
+ display/intel_modeset_verify.o \
display/intel_overlay.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
@@ -302,8 +311,6 @@ i915-y += \
display/intel_vblank.o \
display/intel_vga.o \
display/intel_wm.o \
- display/i9xx_plane.o \
- display/i9xx_wm.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
display/skl_watermark.o
@@ -312,6 +319,10 @@ i915-$(CONFIG_ACPI) += \
display/intel_opregion.o
i915-$(CONFIG_DRM_FBDEV_EMULATION) += \
display/intel_fbdev.o
+i915-$(CONFIG_DEBUG_FS) += \
+ display/intel_display_debugfs.o \
+ display/intel_display_debugfs_params.o \
+ display/intel_pipe_crc.o
# modesetting output/encoder code
i915-y += \
@@ -357,13 +368,14 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
-i915-y += i915_perf.o
+i915-y += \
+ i915_perf.o
# Protected execution platform (PXP) support. Base support is required for HuC
i915-y += \
pxp/intel_pxp.o \
- pxp/intel_pxp_tee.o \
- pxp/intel_pxp_huc.o
+ pxp/intel_pxp_huc.o \
+ pxp/intel_pxp_tee.o
i915-$(CONFIG_DRM_I915_PXP) += \
pxp/intel_pxp_cmd.o \
@@ -374,11 +386,11 @@ i915-$(CONFIG_DRM_I915_PXP) += \
pxp/intel_pxp_session.o
# Post-mortem debug and GPU hang state capture
-i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
+i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += \
+ i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
gem/selftests/i915_gem_client_blt.o \
gem/selftests/igt_gem_utils.o \
- selftests/intel_scheduler_helpers.o \
selftests/i915_random.o \
selftests/i915_selftest.o \
selftests/igt_atomic.o \
@@ -387,10 +399,12 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/igt_mmap.o \
selftests/igt_reset.o \
selftests/igt_spinner.o \
+ selftests/intel_scheduler_helpers.o \
selftests/librapl.o
# virtual gpu code
-i915-y += i915_vgpu.o
+i915-y += \
+ i915_vgpu.o
i915-$(CONFIG_DRM_I915_GVT) += \
intel_gvt.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index e8ee0a08947e..dfe0b07a122d 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -432,7 +432,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
- intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP &= ~DP_PORT_EN;
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
@@ -475,6 +475,40 @@ intel_dp_link_down(struct intel_encoder *encoder,
}
}
+static void g4x_dp_audio_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!crtc_state->has_audio)
+ return;
+
+ /* Enable audio presence detect */
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
+}
+
+static void g4x_dp_audio_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!old_crtc_state->has_audio)
+ return;
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+
+ /* Disable audio presence detect */
+ intel_dp->DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+ intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+}
+
static void intel_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -484,8 +518,6 @@ static void intel_disable_dp(struct intel_atomic_state *state,
intel_dp->link_trained = false;
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
/*
* Make sure the panel is off before trying to change the mode.
* But also ensure that we have vdd while we switch off the panel.
@@ -631,8 +663,6 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
* fail when the power sequencer is freshly used for this port.
*/
intel_dp->DP |= DP_PORT_EN;
- if (crtc_state->has_audio)
- intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
@@ -686,8 +716,8 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
intel_enable_dp(state, encoder, pipe_config, conn_state);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
+ encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void vlv_enable_dp(struct intel_atomic_state *state,
@@ -695,8 +725,8 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
+ encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void g4x_pre_enable_dp(struct intel_atomic_state *state,
@@ -1325,6 +1355,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->disable = g4x_disable_dp;
intel_encoder->post_disable = g4x_post_disable_dp;
}
+ intel_encoder->audio_enable = g4x_dp_audio_enable;
+ intel_encoder->audio_disable = g4x_dp_audio_disable;
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A))
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 45e044b4a88d..8096492b3fad 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -228,25 +228,51 @@ static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
+static void g4x_hdmi_audio_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
+
+ if (!crtc_state->has_audio)
+ return;
+
+ drm_WARN_ON(&i915->drm, !crtc_state->has_hdmi_sink);
+
+ /* Enable audio presence detect */
+ intel_de_rmw(i915, hdmi->hdmi_reg, 0, HDMI_AUDIO_ENABLE);
+
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
+}
+
+static void g4x_hdmi_audio_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
+
+ if (!old_crtc_state->has_audio)
+ return;
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+
+ /* Disable audio presence detect */
+ intel_de_rmw(i915, hdmi->hdmi_reg, HDMI_AUDIO_ENABLE, 0);
+}
+
static void g4x_enable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
g4x_hdmi_enable_port(encoder, pipe_config);
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void ibx_enable_hdmi(struct intel_atomic_state *state,
@@ -262,8 +288,6 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
/*
* HW workaround, need to write this twice for issue
@@ -296,10 +320,6 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void cpt_enable_hdmi(struct intel_atomic_state *state,
@@ -317,8 +337,6 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
/*
* WaEnableHDMI8bpcBefore12bpc:snb,ivb
@@ -351,10 +369,6 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0);
}
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void vlv_enable_hdmi(struct intel_atomic_state *state,
@@ -362,11 +376,6 @@ static void vlv_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void intel_disable_hdmi(struct intel_atomic_state *state,
@@ -384,7 +393,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
- temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
+ temp &= ~SDVO_ENABLE;
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
@@ -433,8 +442,6 @@ static void g4x_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
}
@@ -443,7 +450,6 @@ static void pch_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}
static void pch_post_disable_hdmi(struct intel_atomic_state *state,
@@ -750,6 +756,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
else
intel_encoder->enable = g4x_enable_hdmi;
}
+ intel_encoder->audio_enable = g4x_hdmi_audio_enable;
+ intel_encoder->audio_disable = g4x_hdmi_audio_disable;
intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 7dc38ac02092..611a7d6ef80c 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -193,7 +193,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
if (!hsw_crtc_supports_ips(crtc))
return false;
- if (!i915->params.enable_ips)
+ if (!i915->display.params.enable_ips)
return false;
if (crtc_state->pipe_bpp > 24)
@@ -329,7 +329,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
- str_yes_no(i915->params.enable_ips));
+ str_yes_no(i915->display.params.enable_ips));
if (DISPLAY_VER(i915) >= 8) {
seq_puts(m, "Currently: unknown\n");
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index af0c79a4c9a4..b37c0d02d500 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -2993,7 +2993,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
+ dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
for (level = 2; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index c4585e445198..481fcb650850 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -330,7 +330,7 @@ static int afe_clk(struct intel_encoder *encoder,
int bpp;
if (crtc_state->dsc.compression_enable)
- bpp = crtc_state->dsc.compressed_bpp;
+ bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
else
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -860,7 +860,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* compressed and non-compressed bpp.
*/
if (crtc_state->dsc.compression_enable) {
- mul = crtc_state->dsc.compressed_bpp;
+ mul = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
}
@@ -884,7 +884,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
int bpp, line_time_us, byte_clk_period_ns;
if (crtc_state->dsc.compression_enable)
- bpp = crtc_state->dsc.compressed_bpp;
+ bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
else
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1451,8 +1451,8 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- if (pipe_config->dsc.compressed_bpp) {
- int div = pipe_config->dsc.compressed_bpp;
+ if (pipe_config->dsc.compressed_bpp_x16) {
+ int div = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
adjusted_mode->crtc_htotal =
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 5d18145da279..ec0d5168b503 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -331,9 +331,6 @@ void intel_atomic_state_free(struct drm_atomic_state *_state)
drm_atomic_state_default_release(&state->base);
kfree(state->global_objs);
-
- i915_sw_fence_fini(&state->commit_ready);
-
kfree(state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index b1074350616c..06c2455bdd78 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -31,7 +31,10 @@
* prepare/check/commit/cleanup steps.
*/
+#include <linux/dma-fence-chain.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
@@ -1012,6 +1015,41 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
+static int add_dma_resv_fences(struct dma_resv *resv,
+ struct drm_plane_state *new_plane_state)
+{
+ struct dma_fence *fence = dma_fence_get(new_plane_state->fence);
+ struct dma_fence *new;
+ int ret;
+
+ ret = dma_resv_get_singleton(resv, dma_resv_usage_rw(false), &new);
+ if (ret)
+ goto error;
+
+ if (new && fence) {
+ struct dma_fence_chain *chain = dma_fence_chain_alloc();
+
+ if (!chain) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dma_fence_chain_init(chain, fence, new, 1);
+ fence = &chain->base;
+
+ } else if (new) {
+ fence = new;
+ }
+
+ dma_fence_put(new_plane_state->fence);
+ new_plane_state->fence = fence;
+ return 0;
+
+error:
+ dma_fence_put(fence);
+ return ret;
+}
+
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @_plane: drm plane to prepare for
@@ -1035,7 +1073,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
struct intel_atomic_state *state =
to_intel_atomic_state(new_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct intel_plane_state *old_plane_state =
+ struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
@@ -1058,55 +1096,28 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* can safely continue.
*/
if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- old_obj->base.resv,
- false, 0,
- GFP_KERNEL);
+ ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv,
+ &new_plane_state->uapi);
if (ret < 0)
return ret;
}
}
- if (new_plane_state->uapi.fence) { /* explicit fencing */
- i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
- &attr);
- ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
- new_plane_state->uapi.fence,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
if (!obj)
return 0;
-
ret = intel_plane_pin_fb(new_plane_state);
if (ret)
return ret;
- i915_gem_object_wait_priority(obj, 0, &attr);
+ ret = drm_gem_plane_helper_prepare_fb(&plane->base, &new_plane_state->uapi);
+ if (ret < 0)
+ goto unpin_fb;
- if (!new_plane_state->uapi.fence) { /* implicit fencing */
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
-
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- obj->base.resv, false,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- goto unpin_fb;
+ if (new_plane_state->uapi.fence) {
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
- dma_resv_iter_begin(&cursor, obj->base.resv,
- DMA_RESV_USAGE_WRITE);
- dma_resv_for_each_fence_unlocked(&cursor, fence) {
- intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
- }
- dma_resv_iter_end(&cursor);
- } else {
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
new_plane_state->uapi.fence);
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 19605264a35c..07e0c73204f3 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -25,6 +25,7 @@
#include <linux/kernel.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include <drm/i915_component.h>
#include "i915_drv.h"
@@ -521,25 +522,25 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
unsigned int link_clks_available, link_clks_required;
unsigned int tu_data, tu_line, link_clks_active;
unsigned int h_active, h_total, hblank_delta, pixel_clk;
- unsigned int fec_coeff, cdclk, vdsc_bpp;
+ unsigned int fec_coeff, cdclk, vdsc_bppx16;
unsigned int link_clk, lanes;
unsigned int hblank_rise;
h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay;
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
- vdsc_bpp = crtc_state->dsc.compressed_bpp;
+ vdsc_bppx16 = crtc_state->dsc.compressed_bpp_x16;
cdclk = i915->display.cdclk.hw.cdclk;
/* fec= 0.972261, using rounding multiplier of 1000000 */
fec_coeff = 972261;
link_clk = crtc_state->port_clock;
lanes = crtc_state->lane_count;
- drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :"
- "lanes = %u vdsc_bpp = %u cdclk = %u\n",
- h_active, link_clk, lanes, vdsc_bpp, cdclk);
+ drm_dbg_kms(&i915->drm,
+ "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " BPP_X16_FMT " cdclk = %u\n",
+ h_active, link_clk, lanes, BPP_X16_ARGS(vdsc_bppx16), cdclk);
- if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk))
+ if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bppx16 || !cdclk))
return 0;
link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28;
@@ -551,8 +552,8 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk),
mul_u32_u32(link_clk, cdclk));
- tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000),
- mul_u32_u32(link_clk * lanes, fec_coeff));
+ tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bppx16 * 8, 1000000),
+ mul_u32_u32(link_clk * lanes * 16, fec_coeff));
tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff),
mul_u32_u32(64 * pixel_clk, 1000000));
link_clks_active = (tu_line - 1) * 64 + tu_data;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 2e8f17c04522..612d4cd9dacb 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -88,10 +88,10 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
- if (i915->params.invert_brightness < 0)
+ if (i915->display.params.invert_brightness < 0)
return val;
- if (i915->params.invert_brightness > 0 ||
+ if (i915->display.params.invert_brightness > 0 ||
intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -132,8 +132,9 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
drm_WARN_ON_ONCE(&i915->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
- if (i915->params.invert_brightness > 0 ||
- (i915->params.invert_brightness == 0 && intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
+ if (i915->display.params.invert_brightness > 0 ||
+ (i915->display.params.invert_brightness == 0 &&
+ intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 4e8f1e91bb08..2fd72b2fd109 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1116,7 +1116,7 @@ parse_sdvo_panel_data(struct drm_i915_private *i915,
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915->params.vbt_sdvo_panel_type;
+ index = i915->display.params.vbt_sdvo_panel_type;
if (index == -2) {
drm_dbg_kms(&i915->drm,
"Ignore SDVO panel mode from BIOS VBT tables.\n");
@@ -1514,9 +1514,9 @@ parse_edp(struct drm_i915_private *i915,
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
- if (i915->params.edp_vswing) {
+ if (i915->display.params.edp_vswing) {
panel->vbt.edp.low_vswing =
- i915->params.edp_vswing == 1;
+ i915->display.params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
panel->vbt.edp.low_vswing = vswing == 0;
@@ -2473,6 +2473,27 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
devdata->child.device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
}
+static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+
+ if (!intel_bios_encoder_supports_dvi(devdata))
+ return;
+
+ /*
+ * Some BDW machines (eg. HP Pavilion 15-ab) shipped
+ * with a HSW VBT where the level shifter value goes
+ * up to 11, whereas the BDW max is 9.
+ */
+ if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) {
+ drm_dbg_kms(&i915->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n",
+ port_name(port), devdata->child.hdmi_level_shifter_value, 9);
+
+ devdata->child.hdmi_level_shifter_value = 9;
+ }
+}
+
static bool
intel_bios_encoder_supports_crt(const struct intel_bios_encoder_data *devdata)
{
@@ -2652,6 +2673,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
}
sanitize_device_type(devdata, port);
+ sanitize_hdmi_level_shift(devdata, port);
}
static bool has_ddi_port_info(struct drm_i915_private *i915)
@@ -3392,8 +3414,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
crtc_state->pipe_bpp = bpc * 3;
- crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp,
- VBT_DSC_MAX_BPP(dsc->max_bpp));
+ crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(min(crtc_state->pipe_bpp,
+ VBT_DSC_MAX_BPP(dsc->max_bpp)));
/*
* FIXME: This is ugly, and slice count should take DSC engine
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c4839c67cb0f..b93d1ad7936d 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2598,8 +2598,9 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
* => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
*/
int bigjoiner_interface_bits = DISPLAY_VER(i915) > 13 ? 36 : 24;
- int min_cdclk_bj = (crtc_state->dsc.compressed_bpp * pixel_clock) /
- (2 * bigjoiner_interface_bits);
+ int min_cdclk_bj =
+ (to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
+ pixel_clock) / (2 * bigjoiner_interface_bits);
min_cdclk = max(min_cdclk, min_cdclk_bj);
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 1d26be54ddfc..c5092b7e87d5 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -785,14 +785,12 @@ static void chv_assign_csc(struct intel_crtc_state *crtc_state)
/* convert hw value with given bit_precision to lut property val */
static u32 intel_color_lut_pack(u32 val, int bit_precision)
{
- u32 max = 0xffff >> (16 - bit_precision);
-
- val = clamp_val(val, 0, max);
-
- if (bit_precision < 16)
- val <<= 16 - bit_precision;
-
- return val;
+ if (bit_precision > 16)
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(val, (1 << 16) - 1),
+ (1 << bit_precision) - 1);
+ else
+ return DIV_ROUND_CLOSEST(val * ((1 << 16) - 1),
+ (1 << bit_precision) - 1);
}
static u32 i9xx_lut_8(const struct drm_color_lut *color)
@@ -911,7 +909,7 @@ static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
static u16 i965_lut_11p6_max_pack(u32 val)
{
/* PIPEGCMAX is 11.6, clamp to 10.6 */
- return clamp_val(val, 0, 0xffff);
+ return min(val, 0xffffu);
}
static u32 ilk_lut_10(const struct drm_color_lut *color)
@@ -1528,14 +1526,27 @@ static int glk_degamma_lut_size(struct drm_i915_private *i915)
return 35;
}
-/*
- * change_lut_val_precision: helper function to upscale or downscale lut values.
- * Parameters 'to' and 'from' needs to be less than 32. This should be sufficient
- * as currently there are no lut values exceeding 32 bit.
- */
-static u32 change_lut_val_precision(u32 lut_val, int to, int from)
+static u32 glk_degamma_lut(const struct drm_color_lut *color)
+{
+ return color->green;
+}
+
+static void glk_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
+{
+ /* PRE_CSC_GAMC_DATA is 3.16, clamp to 0.16 */
+ entry->red = entry->green = entry->blue = min(val, 0xffffu);
+}
+
+static u32 mtl_degamma_lut(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->green, 24);
+}
+
+static void mtl_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
{
- return mul_u32_u32(lut_val, (1 << to)) / (1 << from);
+ /* PRE_CSC_GAMC_DATA is 3.24, clamp to 0.16 */
+ entry->red = entry->green = entry->blue =
+ intel_color_lut_pack(min(val, 0xffffffu), 24);
}
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
@@ -1572,20 +1583,16 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- u32 lut_val;
-
- if (DISPLAY_VER(i915) >= 14)
- lut_val = change_lut_val_precision(lut[i].green, 24, 16);
- else
- lut_val = lut[i].green;
-
ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
- lut_val);
+ DISPLAY_VER(i915) >= 14 ?
+ mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
}
/* Clamp values > 1.0. */
while (i++ < glk_degamma_lut_size(i915))
- ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
+ DISPLAY_VER(i915) >= 14 ?
+ 1 << 24 : 1 << 16);
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
}
@@ -3572,17 +3579,10 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc)
for (i = 0; i < lut_size; i++) {
u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe));
- /*
- * For MTL and beyond, convert back the 24 bit lut values
- * read from HW to 16 bit values to maintain parity with
- * userspace values
- */
if (DISPLAY_VER(dev_priv) >= 14)
- val = change_lut_val_precision(val, 16, 24);
-
- lut[i].red = val;
- lut[i].green = val;
- lut[i].blue = val;
+ mtl_degamma_lut_pack(&lut[i], val);
+ else
+ glk_degamma_lut_pack(&lut[i], val);
}
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 913e5d230a4d..0e33a0523a75 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -841,7 +841,7 @@ intel_crt_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
- if (dev_priv->params.load_detect_test) {
+ if (dev_priv->display.params.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
goto load_detect;
@@ -901,7 +901,7 @@ load_detect:
else if (DISPLAY_VER(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
- else if (dev_priv->params.load_detect_test)
+ else if (dev_priv->display.params.load_detect_test)
status = connector_status_disconnected;
else
status = connector_status_unknown;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 66fe880af8f3..2d15e82c0b3d 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index d414f6b7f993..a8fa76580802 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -31,7 +31,7 @@
bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
{
- if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0) && phy < PHY_C)
+ if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
return true;
return false;
@@ -206,6 +206,13 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
intel_clear_response_ready_flag(i915, port, lane);
+ /*
+ * FIXME: Workaround to let HW to settle
+ * down and let the message bus to end up
+ * in a known state
+ */
+ intel_cx0_bus_reset(i915, port, lane);
+
return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
}
@@ -285,6 +292,13 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
intel_clear_response_ready_flag(i915, port, lane);
+ /*
+ * FIXME: Workaround to let HW to settle
+ * down and let the message bus to end up
+ * in a known state
+ */
+ intel_cx0_bus_reset(i915, port, lane);
+
return 0;
}
@@ -1850,8 +1864,8 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
-void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
- struct intel_c10pll_state *pll_state)
+static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_c10pll_state *pll_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 lane = INTEL_CX0_LANE0;
@@ -2103,8 +2117,8 @@ static bool intel_c20_use_mplla(u32 clock)
return false;
}
-void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
- struct intel_c20pll_state *pll_state)
+static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_c20pll_state *pll_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
bool cntx;
@@ -2378,8 +2392,8 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
}
-int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c10pll_state *pll_state)
+static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state)
{
unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400;
@@ -2405,8 +2419,8 @@ int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
return tmpclk;
}
-int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c20pll_state *pll_state)
+static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c20pll_state *pll_state)
{
unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
unsigned int multiplier, refclk = 38400;
@@ -3003,17 +3017,110 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
return ICL_PORT_DPLL_DEFAULT;
}
-void intel_c10pll_state_verify(struct intel_atomic_state *state,
+static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder,
+ struct intel_c10pll_state *mpllb_hw_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
+ u8 expected = mpllb_sw_state->pll[i];
+
+ I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name, i,
+ expected, mpllb_hw_state->pll[i]);
+ }
+
+ I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ mpllb_sw_state->tx, mpllb_hw_state->tx);
+
+ I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ mpllb_sw_state->cmn, mpllb_hw_state->cmn);
+}
+
+void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_cx0pll_state *pll_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_is_c10phy(i915, phy))
+ intel_c10pll_readout_hw_state(encoder, &pll_state->c10);
+ else
+ intel_c20pll_readout_hw_state(encoder, &pll_state->c20);
+}
+
+int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_cx0pll_state *pll_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_is_c10phy(i915, phy))
+ return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
+
+ return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
+}
+
+static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder,
+ struct intel_c20pll_state *mpll_hw_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
+ bool use_mplla;
+ int i;
+
+ use_mplla = intel_c20_use_mplla(mpll_hw_state->clock);
+ if (use_mplla) {
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
+ "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
+ "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i],
+ "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->tx[i], mpll_hw_state->tx[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i],
+ "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]);
+ }
+}
+
+void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_c10pll_state mpllb_hw_state = {};
- const struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10;
struct intel_encoder *encoder;
+ struct intel_cx0pll_state mpll_hw_state = {};
enum phy phy;
- int i;
if (DISPLAY_VER(i915) < 14)
return;
@@ -3029,27 +3136,10 @@ void intel_c10pll_state_verify(struct intel_atomic_state *state,
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
phy = intel_port_to_phy(i915, encoder->port);
- if (!intel_is_c10phy(i915, phy))
- return;
-
- intel_c10pll_readout_hw_state(encoder, &mpllb_hw_state);
+ intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
- for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
- u8 expected = mpllb_sw_state->pll[i];
-
- I915_STATE_WARN(i915, mpllb_hw_state.pll[i] != expected,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name, i,
- expected, mpllb_hw_state.pll[i]);
- }
-
- I915_STATE_WARN(i915, mpllb_hw_state.tx != mpllb_sw_state->tx,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name,
- mpllb_sw_state->tx, mpllb_hw_state.tx);
-
- I915_STATE_WARN(i915, mpllb_hw_state.cmn != mpllb_sw_state->cmn,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name,
- mpllb_sw_state->cmn, mpllb_hw_state.cmn);
+ if (intel_is_c10phy(i915, phy))
+ intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10);
+ else
+ intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index 0e0a38dac8cd..c6682677253a 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -16,6 +16,7 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_c10pll_state;
struct intel_c20pll_state;
+struct intel_cx0pll_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
@@ -28,20 +29,19 @@ void intel_mtl_pll_disable(struct intel_encoder *encoder);
enum icl_port_dpll_id
intel_mtl_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state);
+
int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder);
+void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_cx0pll_state *pll_state);
+int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_cx0pll_state *pll_state);
+
void intel_c10pll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_c10pll_state *hw_state);
-int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c10pll_state *pll_state);
-void intel_c10pll_state_verify(struct intel_atomic_state *state,
+void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
- struct intel_c20pll_state *pll_state);
void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
const struct intel_c20pll_state *hw_state);
-int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c20pll_state *pll_state);
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9151d5add960..38f28c480b38 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -25,6 +25,7 @@
*
*/
+#include <linux/iopoll.h>
#include <linux/string_helpers.h>
#include <drm/display/drm_scdc_helper.h>
@@ -2210,16 +2211,87 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!crtc_state->fec_enable)
return;
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
- drm_dbg_kms(&i915->drm,
- "Failed to set FEC_READY in the sink\n");
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION,
+ enable ? DP_FEC_READY : 0) <= 0)
+ drm_dbg_kms(&i915->drm, "Failed to set FEC_READY to %s in the sink\n",
+ enable ? "enabled" : "disabled");
+
+ if (enable &&
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS,
+ DP_FEC_DECODE_EN_DETECTED | DP_FEC_DECODE_DIS_DETECTED) <= 0)
+ drm_dbg_kms(&i915->drm, "Failed to clear FEC detected flags\n");
+}
+
+static int read_fec_detected_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
+
+static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
+{
+ struct drm_i915_private *i915 = to_i915(aux->drm_dev);
+ int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED;
+ int status;
+ int err;
+
+ err = readx_poll_timeout(read_fec_detected_status, aux, status,
+ status & mask || status < 0,
+ 10000, 200000);
+
+ if (!err && status >= 0)
+ return;
+
+ if (err == -ETIMEDOUT)
+ drm_dbg_kms(&i915->drm, "Timeout waiting for FEC %s to get detected\n",
+ str_enabled_disabled(enabled));
+ else
+ drm_dbg_kms(&i915->drm, "FEC detected status read error: %d\n", status);
+}
+
+void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool enabled)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int ret;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ if (enabled)
+ ret = intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+ else
+ ret = intel_de_wait_for_clear(i915, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+
+ if (ret)
+ drm_err(&i915->drm,
+ "Timeout waiting for FEC live state to get %s\n",
+ str_enabled_disabled(enabled));
+
+ /*
+ * At least the Synoptics MST hub doesn't set the detected flag for
+ * FEC decoding disabling so skip waiting for that.
+ */
+ if (enabled)
+ wait_for_fec_detected(&intel_dp->aux, enabled);
}
static void intel_ddi_enable_fec(struct intel_encoder *encoder,
@@ -2234,8 +2306,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
0, DP_TP_CTL_FEC_ENABLE);
}
-static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void intel_ddi_disable_fec(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -2466,13 +2538,17 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
- intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ if (!is_mst)
+ intel_dp_sink_enable_decompression(state,
+ to_intel_connector(conn_state->connector),
+ crtc_state);
+
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
* in the FEC_CONFIGURATION register to 1 before initiating link
* training
*/
- intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_check_frl_training(intel_dp);
intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
@@ -2505,7 +2581,8 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 6.o Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
- intel_dsc_dp_pps_write(encoder, crtc_state);
+ if (!is_mst)
+ intel_dsc_dp_pps_write(encoder, crtc_state);
}
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2616,13 +2693,16 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
- intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ if (!is_mst)
+ intel_dp_sink_enable_decompression(state,
+ to_intel_connector(conn_state->connector),
+ crtc_state);
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
* in the FEC_CONFIGURATION register to 1 before initiating link
* training
*/
- intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_check_frl_training(intel_dp);
intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
@@ -2643,7 +2723,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
- intel_dsc_dp_pps_write(encoder, crtc_state);
+ if (!is_mst)
+ intel_dsc_dp_pps_write(encoder, crtc_state);
}
static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2695,9 +2776,11 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
- intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
- true);
- intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ if (!is_mst)
+ intel_dp_sink_enable_decompression(state,
+ to_intel_connector(conn_state->connector),
+ crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_start_link_train(intel_dp, crtc_state);
if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
!is_trans_port_sync_mode(crtc_state))
@@ -2705,10 +2788,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_fec(encoder, crtc_state);
- if (!is_mst)
+ if (!is_mst) {
intel_ddi_enable_transcoder_clock(encoder, crtc_state);
-
- intel_dsc_dp_pps_write(encoder, crtc_state);
+ intel_dsc_dp_pps_write(encoder, crtc_state);
+ }
}
static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2717,10 +2800,15 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (HAS_DP20(dev_priv))
+ if (HAS_DP20(dev_priv)) {
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
+ if (crtc_state->has_panel_replay)
+ drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
+ DP_PANEL_REPLAY_ENABLE);
+ }
if (DISPLAY_VER(dev_priv) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -2866,8 +2954,7 @@ static void disable_ddi_buf(struct intel_encoder *encoder,
intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_ENABLE, 0);
- /* Disable FEC in DP Sink */
- intel_ddi_disable_fec_state(encoder, crtc_state);
+ intel_ddi_disable_fec(encoder, crtc_state);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
@@ -2882,10 +2969,12 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
mtl_disable_ddi_buf(encoder, crtc_state);
/* 3.f Disable DP_TP_CTL FEC Enable if it is needed */
- intel_ddi_disable_fec_state(encoder, crtc_state);
+ intel_ddi_disable_fec(encoder, crtc_state);
} else {
disable_ddi_buf(encoder, crtc_state);
}
+
+ intel_ddi_wait_for_fec_status(encoder, crtc_state, false);
}
static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
@@ -2925,6 +3014,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_disable_ddi_buf(encoder, old_crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false);
+
/*
* From TGL spec: "If single stream or multi-stream master transcoder:
* Configure Transcoder Clock select to direct no clock to the
@@ -3110,11 +3201,18 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
if (!dig_port->lspcon.active || intel_dp_has_hdmi_sink(&dig_port->dp))
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
- intel_audio_codec_enable(encoder, crtc_state, conn_state);
-
trans_port_sync_stop_link_train(state, encoder, crtc_state);
}
+/* FIXME bad home for this function */
+i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder)
+{
+ return DISPLAY_VER(i915) >= 14 ?
+ MTL_CHICKEN_TRANS(cpu_transcoder) :
+ CHICKEN_TRANS(cpu_transcoder);
+}
+
static i915_reg_t
gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
enum port port)
@@ -3233,8 +3331,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
intel_wait_ddi_buf_active(dev_priv, port);
-
- intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
static void intel_enable_ddi(struct intel_atomic_state *state,
@@ -3252,6 +3348,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_enable_transcoder(crtc_state);
+ intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
+
intel_crtc_vblank_on(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -3259,10 +3357,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
else
intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
- /* Enable hdcp if it's desired */
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+ intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+
}
static void intel_disable_ddi_dp(struct intel_atomic_state *state,
@@ -3271,16 +3367,16 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector =
+ to_intel_connector(old_conn_state->connector);
intel_dp->link_trained = false;
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
- intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
- false);
+ intel_dp_sink_disable_decompression(state,
+ connector, old_crtc_state);
/* Disable Ignore_MSA bit in DP Sink */
intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state,
false);
@@ -3294,8 +3390,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = old_conn_state->connector;
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
drm_dbg_kms(&i915->drm,
@@ -3854,18 +3948,13 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
static void mtl_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
- } else if (intel_is_c10phy(i915, phy)) {
- intel_c10pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c10);
- crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
} else {
- intel_c20pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c20);
- crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
+ intel_cx0pll_readout_hw_state(encoder, &crtc_state->cx0pll_state);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
}
intel_ddi_get_config(encoder, crtc_state);
@@ -4844,6 +4933,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
encoder->post_pll_disable = intel_ddi_post_pll_disable;
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;
+ encoder->audio_enable = intel_audio_codec_enable;
+ encoder->audio_disable = intel_audio_codec_disable;
encoder->get_hw_state = intel_ddi_get_hw_state;
encoder->sync_state = intel_ddi_sync_state;
encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 4999c0ee229b..63853a1f6582 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -27,6 +27,8 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder);
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -60,6 +62,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool enabled);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 28d85e1e858e..5cf162628b95 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -48,6 +48,7 @@
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "hsw_ips.h"
+#include "i915_config.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -72,10 +73,10 @@
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
-#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
+#include "intel_dpt_common.h"
#include "intel_drrs.h"
#include "intel_dsb.h"
#include "intel_dsi.h"
@@ -193,12 +194,9 @@ static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
- if (enable)
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
- 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
- else
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
- DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ DUPS1_GATING_DIS | DUPS2_GATING_DIS,
+ enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
}
/* Wa_2006604312:icl,ehl */
@@ -206,10 +204,9 @@ static void
icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
- if (enable)
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
- else
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ DPFR_GATING_DIS,
+ enable ? DPFR_GATING_DIS : 0);
}
/* Wa_1604331009:icl,jsl,ehl */
@@ -217,7 +214,8 @@ static void
icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ CURSOR_GATING_DIS,
enable ? CURSOR_GATING_DIS : 0);
}
@@ -397,7 +395,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
u32 val;
drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
@@ -430,16 +427,16 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
- reg = TRANSCONF(cpu_transcoder);
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
- intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
+ val | TRANSCONF_ENABLE);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
/*
* Until the pipe starts PIPEDSL reads will return a stale value,
@@ -458,7 +455,6 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
u32 val;
drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
@@ -469,8 +465,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- reg = TRANSCONF(cpu_transcoder);
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
if ((val & TRANSCONF_ENABLE) == 0)
return;
@@ -485,14 +480,12 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~TRANSCONF_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
- FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
- else if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
- intel_de_write(dev_priv, reg, val);
if ((val & TRANSCONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
@@ -896,6 +889,48 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
}
+static void intel_encoders_audio_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->audio_enable)
+ encoder->audio_enable(encoder, crtc_state, conn_state);
+ }
+}
+
+static void intel_encoders_audio_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+
+ if (old_conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->audio_disable)
+ encoder->audio_disable(encoder, old_crtc_state, old_conn_state);
+ }
+}
+
#define is_enabling(feature, old_crtc_state, new_crtc_state) \
((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
(new_crtc_state)->feature)
@@ -906,12 +941,18 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
+ if (!new_crtc_state->hw.active)
+ return false;
+
return is_enabling(active_planes, old_crtc_state, new_crtc_state);
}
static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
+ if (!old_crtc_state->hw.active)
+ return false;
+
return is_disabling(active_planes, old_crtc_state, new_crtc_state);
}
@@ -928,6 +969,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
+ if (!new_crtc_state->hw.active)
+ return false;
+
return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) ||
(new_crtc_state->vrr.enable &&
(new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
@@ -937,12 +981,37 @@ static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
+ if (!old_crtc_state->hw.active)
+ return false;
+
return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) ||
(old_crtc_state->vrr.enable &&
(new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
vrr_params_changed(old_crtc_state, new_crtc_state)));
}
+static bool audio_enabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_enabling(has_audio, old_crtc_state, new_crtc_state) ||
+ (new_crtc_state->has_audio &&
+ memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
+}
+
+static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!old_crtc_state->hw.active)
+ return false;
+
+ return is_disabling(has_audio, old_crtc_state, new_crtc_state) ||
+ (old_crtc_state->has_audio &&
+ memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
+}
+
#undef is_disabling
#undef is_enabling
@@ -983,6 +1052,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_post_update(new_crtc_state);
+
+ if (audio_enabling(old_crtc_state, new_crtc_state))
+ intel_encoders_audio_enable(state, crtc);
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -1066,6 +1138,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_crtc_update_active_timings(old_crtc_state, false);
}
+ if (audio_disabling(old_crtc_state, new_crtc_state))
+ intel_encoders_audio_disable(state, crtc);
+
intel_drrs_deactivate(old_crtc_state);
intel_psr_pre_plane_update(state, crtc);
@@ -1501,12 +1576,9 @@ static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder transcoder = crtc_state->cpu_transcoder;
- i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
- CHICKEN_TRANS(transcoder);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- intel_de_rmw(dev_priv, reg,
+ intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder),
HSW_FRAME_START_DELAY_MASK,
HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
@@ -1784,31 +1856,31 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
+ /*
+ * DG2's "TC1", although TC-capable output, doesn't share the same flow
+ * as other platforms on the display engine side and rather rely on the
+ * SNPS PHY, that is programmed separately
+ */
if (IS_DG2(dev_priv))
- /* DG2's "TC1" output uses a SNPS PHY */
return false;
- else if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 0))
+
+ if (DISPLAY_VER(dev_priv) >= 13)
return phy >= PHY_F && phy <= PHY_I;
else if (IS_TIGERLAKE(dev_priv))
return phy >= PHY_D && phy <= PHY_I;
else if (IS_ICELAKE(dev_priv))
return phy >= PHY_C && phy <= PHY_F;
- else
- return false;
+
+ return false;
}
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (phy == PHY_NONE)
- return false;
- else if (IS_DG2(dev_priv))
- /*
- * All four "combo" ports and the TC1 port (PHY E) use
- * Synopsis PHYs.
- */
- return phy <= PHY_E;
-
- return false;
+ /*
+ * For DG2, and for DG2 only, all four "combo" ports and the TC1 port
+ * (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
+ */
+ return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
}
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
@@ -2397,15 +2469,15 @@ static void compute_m_n(u32 *ret_m, u32 *ret_n,
}
void
-intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
+intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool fec_enable)
+ int bw_overhead,
+ struct intel_link_m_n *m_n)
{
- u32 data_clock = bits_per_pixel * pixel_clock;
-
- if (fec_enable)
- data_clock = intel_dp_mode_to_fec_clock(data_clock);
+ u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock);
+ u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16,
+ bw_overhead);
+ u32 data_n = intel_dp_max_data_rate(link_clock, nlanes);
/*
* Windows/BIOS uses fixed M/N values always. Follow suit.
@@ -2416,11 +2488,11 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
*/
m_n->tu = 64;
compute_m_n(&m_n->data_m, &m_n->data_n,
- data_clock, link_clock * nlanes * 8,
+ data_m, data_n,
0x8000000);
compute_m_n(&m_n->link_m, &m_n->link_n,
- pixel_clock, link_clock,
+ pixel_clock, link_symbol_clock,
0x80000);
}
@@ -2838,67 +2910,6 @@ static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
intel_de_read(dev_priv, PFIT_PGM_RATIOS);
}
-static void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- struct dpll clock;
- u32 mdiv;
- int refclk = 100000;
-
- /* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- vlv_dpio_get(dev_priv);
- mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
- vlv_dpio_put(dev_priv);
-
- clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
- clock.m2 = mdiv & DPIO_M2DIV_MASK;
- clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
- clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
- clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
-
- pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
-}
-
-static void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- struct dpll clock;
- u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
- int refclk = 100000;
-
- /* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- vlv_dpio_get(dev_priv);
- cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
- pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
- pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
- pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
- pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
- vlv_dpio_put(dev_priv);
-
- clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
- clock.m2 = (pll_dw0 & 0xff) << 22;
- if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
- clock.m2 |= pll_dw2 & 0x3fffff;
- clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
- clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
- clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
-
- pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
-}
-
static enum intel_output_format
bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
{
@@ -3790,9 +3801,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
- MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
- CHICKEN_TRANS(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder));
pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
} else {
@@ -3821,133 +3830,27 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
return true;
}
-static int i9xx_pll_refclk(struct drm_device *dev,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
-
- if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->display.vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(dev_priv))
- return 120000;
- else if (DISPLAY_VER(dev_priv) != 2)
- return 96000;
- else
- return 48000;
-}
-
-/* Returns the clock of the currently programmed mode of the given pipe. */
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
- u32 fp;
- struct dpll clock;
- int port_clock;
- int refclk = i9xx_pll_refclk(dev, pipe_config);
-
- if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = pipe_config->dpll_hw_state.fp0;
- else
- fp = pipe_config->dpll_hw_state.fp1;
-
- clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev_priv)) {
- clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
- clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
- } else {
- clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
- clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
- }
-
- if (DISPLAY_VER(dev_priv) != 2) {
- if (IS_PINEVIEW(dev_priv))
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
- else
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT);
-
- switch (dpll & DPLL_MODE_MASK) {
- case DPLLB_MODE_DAC_SERIAL:
- clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
- 5 : 10;
- break;
- case DPLLB_MODE_LVDS:
- clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
- 7 : 14;
- break;
- default:
- drm_dbg_kms(&dev_priv->drm,
- "Unknown DPLL mode %08x in programmed "
- "mode\n", (int)(dpll & DPLL_MODE_MASK));
- return;
- }
-
- if (IS_PINEVIEW(dev_priv))
- port_clock = pnv_calc_dpll_params(refclk, &clock);
- else
- port_clock = i9xx_calc_dpll_params(refclk, &clock);
- } else {
- enum pipe lvds_pipe;
-
- if (IS_I85X(dev_priv) &&
- intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
- lvds_pipe == crtc->pipe) {
- u32 lvds = intel_de_read(dev_priv, LVDS);
-
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT);
-
- if (lvds & LVDS_CLKB_POWER_UP)
- clock.p2 = 7;
- else
- clock.p2 = 14;
- } else {
- if (dpll & PLL_P1_DIVIDE_BY_TWO)
- clock.p1 = 2;
- else {
- clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
- }
- if (dpll & PLL_P2_DIVIDE_BY_4)
- clock.p2 = 4;
- else
- clock.p2 = 2;
- }
-
- port_clock = i9xx_calc_dpll_params(refclk, &clock);
- }
-
- /*
- * This value includes pixel_multiplier. We will use
- * port_clock to compute adjusted_mode.crtc_clock in the
- * encoder's get_config() function.
- */
- pipe_config->port_clock = port_clock;
-}
-
int intel_dotclock_calculate(int link_freq,
const struct intel_link_m_n *m_n)
{
/*
- * The calculation for the data clock is:
+ * The calculation for the data clock -> pixel clock is:
* pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
* But we want to avoid losing precison if possible, so:
* pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
*
- * and the link clock is simpler:
- * link_clock = (m * link_clock) / n
+ * and for link freq (10kbs units) -> pixel clock it is:
+ * link_symbol_clock = link_freq * 10 / link_symbol_size
+ * pixel_clock = (m * link_symbol_clock) / n
+ * or for more precision:
+ * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size)
*/
if (!m_n->link_n)
return 0;
- return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
- m_n->link_n);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10),
+ m_n->link_n * intel_dp_link_symbol_size(link_freq));
}
int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
@@ -4679,6 +4582,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret)
return ret;
+ crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) {
@@ -5057,23 +4961,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
-/*
- * Checks state where we only read out the enabling, but not the entire
- * state itself (like full infoframes or ELD for audio). These states
- * require a full modeset on bootup to fix up.
- */
-#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
- if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
- PIPE_CONF_CHECK_BOOL(name); \
- } else { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
- "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
- str_yes_no(current_config->name), \
- str_yes_no(pipe_config->name)); \
- ret = false; \
- } \
-} while (0)
-
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
@@ -5261,8 +5148,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(enhanced_framing);
PIPE_CONF_CHECK_BOOL(fec_enable);
- PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
- PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
+ if (!fastset) {
+ PIPE_CONF_CHECK_BOOL(has_audio);
+ PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
+ }
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
@@ -5414,7 +5303,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.compression_enable);
PIPE_CONF_CHECK_I(dsc.dsc_split);
- PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+ PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
PIPE_CONF_CHECK_BOOL(splitter.enable);
PIPE_CONF_CHECK_I(splitter.link_count);
@@ -5432,7 +5321,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
-#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_COLOR_LUT
@@ -5523,6 +5411,16 @@ int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
return 0;
}
+static void
+intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->uapi.mode_changed = true;
+
+ crtc_state->update_pipe = false;
+ crtc_state->update_m_n = false;
+ crtc_state->update_lrr = false;
+}
+
/**
* intel_modeset_all_pipes_late - force a full modeset on all pipes
* @state: intel atomic state
@@ -5556,9 +5454,8 @@ int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
if (ret)
return ret;
- crtc_state->update_pipe = false;
- crtc_state->update_m_n = false;
- crtc_state->update_lrr = false;
+ intel_crtc_flag_modeset(crtc_state);
+
crtc_state->update_planes |= crtc_state->active_planes;
crtc_state->async_flip_planes = 0;
crtc_state->do_async_flip = false;
@@ -5671,17 +5568,17 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
else
new_crtc_state->uapi.mode_changed = false;
- if (intel_crtc_needs_modeset(new_crtc_state) ||
- intel_compare_link_m_n(&old_crtc_state->dp_m_n,
+ if (intel_compare_link_m_n(&old_crtc_state->dp_m_n,
&new_crtc_state->dp_m_n))
new_crtc_state->update_m_n = false;
- if (intel_crtc_needs_modeset(new_crtc_state) ||
- (old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
+ if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end))
new_crtc_state->update_lrr = false;
- if (!intel_crtc_needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ intel_crtc_flag_modeset(new_crtc_state);
+ else
new_crtc_state->update_pipe = true;
}
@@ -6453,15 +6350,14 @@ int intel_atomic_check(struct drm_device *dev,
if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
continue;
+ if (intel_dp_mst_crtc_needs_modeset(state, crtc))
+ intel_crtc_flag_modeset(new_crtc_state);
+
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
enum transcoder master = new_crtc_state->mst_master_transcoder;
- if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- new_crtc_state->update_m_n = false;
- new_crtc_state->update_lrr = false;
- }
+ if (intel_cpu_transcoders_need_modeset(state, BIT(master)))
+ intel_crtc_flag_modeset(new_crtc_state);
}
if (is_trans_port_sync_mode(new_crtc_state)) {
@@ -6470,21 +6366,13 @@ int intel_atomic_check(struct drm_device *dev,
if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
trans |= BIT(new_crtc_state->master_transcoder);
- if (intel_cpu_transcoders_need_modeset(state, trans)) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- new_crtc_state->update_m_n = false;
- new_crtc_state->update_lrr = false;
- }
+ if (intel_cpu_transcoders_need_modeset(state, trans))
+ intel_crtc_flag_modeset(new_crtc_state);
}
if (new_crtc_state->bigjoiner_pipes) {
- if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- new_crtc_state->update_m_n = false;
- new_crtc_state->update_lrr = false;
- }
+ if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes))
+ intel_crtc_flag_modeset(new_crtc_state);
}
}
@@ -6505,10 +6393,6 @@ int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = drm_dp_mst_atomic_check(&state->base);
- if (ret)
- goto fail;
-
ret = intel_atomic_check_planes(state);
if (ret)
goto fail;
@@ -6744,8 +6628,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_enable_pipe_crc(crtc);
}
-static void intel_update_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void intel_pre_update_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
@@ -6787,6 +6671,15 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_color_commit_noarm(new_crtc_state);
intel_crtc_planes_update_noarm(state, crtc);
+}
+
+static void intel_update_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(state, crtc);
@@ -6815,7 +6708,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
* valid pipe configuration from the BIOS we need to take care
* of enabling them on the CRTC's first fastset.
*/
- if (intel_crtc_needs_fastset(new_crtc_state) && !modeset &&
+ if (intel_crtc_needs_fastset(new_crtc_state) &&
old_crtc_state->inherited)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@@ -6853,10 +6746,11 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
+ intel_pre_plane_update(state, crtc);
+
if (!old_crtc_state->hw.active)
continue;
- intel_pre_plane_update(state, crtc);
intel_crtc_disable_planes(state, crtc);
}
@@ -6910,6 +6804,13 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
continue;
intel_enable_crtc(state, crtc);
+ intel_pre_update_crtc(state, crtc);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->hw.active)
+ continue;
+
intel_update_crtc(state, crtc);
}
}
@@ -6947,6 +6848,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
* So first lets enable all pipes that do not need a fullmodeset as
* those don't have any external dependency.
*/
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((update_pipes & BIT(pipe)) == 0)
+ continue;
+
+ intel_pre_update_crtc(state, crtc);
+ }
+
while (update_pipes) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
@@ -7023,6 +6933,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((update_pipes & BIT(pipe)) == 0)
continue;
+ intel_pre_update_crtc(state, crtc);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((update_pipes & BIT(pipe)) == 0)
+ continue;
+
drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries, I915_MAX_PIPES, pipe));
@@ -7056,29 +6975,22 @@ void intel_atomic_helper_free_state_worker(struct work_struct *work)
static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
{
- struct wait_queue_entry wait_fence, wait_reset;
- struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
-
- init_wait_entry(&wait_fence, 0);
- init_wait_entry(&wait_reset, 0);
- for (;;) {
- prepare_to_wait(&intel_state->commit_ready.wait,
- &wait_fence, TASK_UNINTERRUPTIBLE);
- prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
- I915_RESET_MODESET),
- &wait_reset, TASK_UNINTERRUPTIBLE);
-
+ struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ int ret, i;
- if (i915_sw_fence_done(&intel_state->commit_ready) ||
- test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
- break;
+ for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
+ if (new_plane_state->fence) {
+ ret = dma_fence_wait_timeout(new_plane_state->fence, false,
+ i915_fence_timeout(i915));
+ if (ret <= 0)
+ break;
- schedule();
+ dma_fence_put(new_plane_state->fence);
+ new_plane_state->fence = NULL;
+ }
}
- finish_wait(&intel_state->commit_ready.wait, &wait_fence);
- finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
- I915_RESET_MODESET),
- &wait_reset);
}
static void intel_atomic_cleanup_work(struct work_struct *work)
@@ -7370,32 +7282,6 @@ static void intel_atomic_commit_work(struct work_struct *work)
intel_atomic_commit_tail(state);
}
-static int
-intel_atomic_commit_ready(struct i915_sw_fence *fence,
- enum i915_sw_fence_notify notify)
-{
- struct intel_atomic_state *state =
- container_of(fence, struct intel_atomic_state, commit_ready);
-
- switch (notify) {
- case FENCE_COMPLETE:
- /* we do blocking waits in the worker, nothing to do here */
- break;
- case FENCE_FREE:
- {
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- struct intel_atomic_helper *helper =
- &i915->display.atomic_helper;
-
- if (llist_add(&state->freed, &helper->free_list))
- queue_work(i915->unordered_wq, &helper->free_work);
- break;
- }
- }
-
- return NOTIFY_DONE;
-}
-
static void intel_atomic_track_fbs(struct intel_atomic_state *state)
{
struct intel_plane_state *old_plane_state, *new_plane_state;
@@ -7418,10 +7304,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- drm_atomic_state_get(&state->base);
- i915_sw_fence_init(&state->commit_ready,
- intel_atomic_commit_ready);
-
/*
* The intel_legacy_cursor_update() fast path takes care
* of avoiding the vblank waits for simple cursor
@@ -7454,7 +7336,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_dbg_atomic(&dev_priv->drm,
"Preparing state failed with %i\n", ret);
- i915_sw_fence_commit(&state->commit_ready);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
@@ -7470,8 +7351,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
struct intel_crtc *crtc;
int i;
- i915_sw_fence_commit(&state->commit_ready);
-
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_color_cleanup_commit(new_crtc_state);
@@ -7485,7 +7364,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
drm_atomic_state_get(&state->base);
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
- i915_sw_fence_commit(&state->commit_ready);
if (nonblock && state->modeset) {
queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
} else if (nonblock) {
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 0e5dffe8f018..8548f49e3972 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -105,7 +105,6 @@ enum i9xx_plane_id {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
#define for_each_plane_id_on_crtc(__crtc, __p) \
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
@@ -395,8 +394,8 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool fec_enable);
+ int bw_overhead,
+ struct intel_link_m_n *m_n);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
enum drm_mode_status
@@ -482,8 +481,6 @@ void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
enum transcoder cpu_transcoder,
struct intel_link_m_n *m_n);
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config);
enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
@@ -552,7 +549,7 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port);
struct drm_device *drm = &(__i915)->drm; \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- if (!drm_WARN(drm, i915_modparams.verbose_state_checks, format)) \
+ if (!drm_WARN(drm, __i915->display.params.verbose_state_checks, format)) \
drm_err(drm, format); \
unlikely(__ret_warn_on); \
})
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index ccfe27630fb6..7e82b87e9cde 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -19,6 +19,7 @@
#include "intel_cdclk.h"
#include "intel_display_device.h"
#include "intel_display_limits.h"
+#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
@@ -348,15 +349,6 @@ struct intel_display {
} dbuf;
struct {
- wait_queue_head_t waitqueue;
-
- /* mutex to protect pmdemand programming sequence */
- struct mutex lock;
-
- struct intel_global_obj obj;
- } pmdemand;
-
- struct {
/*
* dkl.phy_lock protects against concurrent access of the
* Dekel TypeC PHYs.
@@ -444,6 +436,15 @@ struct intel_display {
} ips;
struct {
+ wait_queue_head_t waitqueue;
+
+ /* mutex to protect pmdemand programming sequence */
+ struct mutex lock;
+
+ struct intel_global_obj obj;
+ } pmdemand;
+
+ struct {
struct i915_power_domains domains;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
@@ -520,6 +521,7 @@ struct intel_display {
struct intel_hotplug hotplug;
struct intel_opregion opregion;
struct intel_overlay *overlay;
+ struct intel_display_params params;
struct intel_vbt_data vbt;
struct intel_wm wm;
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 2836826f8c05..915420d0cef8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -17,6 +17,7 @@
#include "intel_de.h"
#include "intel_crtc_state_dump.h"
#include "intel_display_debugfs.h"
+#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
#include "intel_display_types.h"
@@ -641,6 +642,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_display_capabilities(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_display_device_info_print(DISPLAY_INFO(i915),
+ DISPLAY_RUNTIME_INFO(i915), &p);
+
+ return 0;
+}
+
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1059,6 +1071,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_display_capabilities", i915_display_capabilities, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
@@ -1098,6 +1111,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_hpd_debugfs_register(i915);
intel_psr_debugfs_register(i915);
intel_wm_debugfs_register(i915);
+ intel_display_debugfs_params(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
@@ -1242,6 +1256,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
DP_DSC_YCbCr420_Native)),
str_yes_no(drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd,
DP_DSC_YCbCr444)));
+ seq_printf(m, "DSC_Sink_BPP_Precision: %d\n",
+ drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd));
seq_printf(m, "Force_DSC_Enable: %s\n",
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
@@ -1434,6 +1450,85 @@ static const struct file_operations i915_dsc_output_format_fops = {
.write = i915_dsc_output_format_write
};
+static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_device *dev = connector->dev;
+ struct drm_crtc *crtc;
+ struct intel_dp *intel_dp;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
+ int ret;
+
+ if (!encoder)
+ return -ENODEV;
+
+ ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
+ crtc = connector->state->crtc;
+ if (connector->status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ intel_dp = intel_attached_dp(intel_connector);
+ seq_printf(m, "Force_DSC_Fractional_BPP_Enable: %s\n",
+ str_yes_no(intel_dp->force_dsc_fractional_bpp_en));
+
+out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct drm_connector *connector =
+ ((struct seq_file *)file->private_data)->private;
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ bool dsc_fractional_bpp_enable = false;
+ int ret;
+
+ if (len == 0)
+ return 0;
+
+ drm_dbg(&i915->drm,
+ "Copied %zu bytes from user to force fractional bpp for DSC\n", len);
+
+ ret = kstrtobool_from_user(ubuf, len, &dsc_fractional_bpp_enable);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg(&i915->drm, "Got %s for DSC Fractional BPP Enable\n",
+ (dsc_fractional_bpp_enable) ? "true" : "false");
+ intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable;
+
+ *offp += len;
+
+ return len;
+}
+
+static int i915_dsc_fractional_bpp_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_fractional_bpp_show, inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fractional_bpp_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_fractional_bpp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_fractional_bpp_write
+};
+
/*
* Returns the Current CRTC's bpc.
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
@@ -1511,6 +1606,9 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
debugfs_create_file("i915_dsc_output_format", 0644, root,
connector, &i915_dsc_output_format_fops);
+
+ debugfs_create_file("i915_dsc_fractional_bpp", 0644, root,
+ connector, &i915_dsc_fractional_bpp_fops);
}
if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
new file mode 100644
index 000000000000..b7e68eb62452
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drm_drv.h>
+
+#include "intel_display_debugfs_params.h"
+#include "i915_drv.h"
+#include "intel_display_params.h"
+
+/* int param */
+static int intel_display_param_int_show(struct seq_file *m, void *data)
+{
+ int *value = m->private;
+
+ seq_printf(m, "%d\n", *value);
+
+ return 0;
+}
+
+static int intel_display_param_int_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, intel_display_param_int_show, inode->i_private);
+}
+
+static ssize_t intel_display_param_int_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ int *value = m->private;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations intel_display_param_int_fops = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_int_open,
+ .read = seq_read,
+ .write = intel_display_param_int_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations intel_display_param_int_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_int_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+/* unsigned int param */
+static int intel_display_param_uint_show(struct seq_file *m, void *data)
+{
+ unsigned int *value = m->private;
+
+ seq_printf(m, "%u\n", *value);
+
+ return 0;
+}
+
+static int intel_display_param_uint_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, intel_display_param_uint_show, inode->i_private);
+}
+
+static ssize_t intel_display_param_uint_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ unsigned int *value = m->private;
+ int ret;
+
+ ret = kstrtouint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations intel_display_param_uint_fops = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_uint_open,
+ .read = seq_read,
+ .write = intel_display_param_uint_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations intel_display_param_uint_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_uint_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+#define RO(mode) (((mode) & 0222) == 0)
+
+__maybe_unused static struct dentry *
+intel_display_debugfs_create_int(const char *name, umode_t mode,
+ struct dentry *parent, int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &intel_display_param_int_fops_ro :
+ &intel_display_param_int_fops);
+}
+
+__maybe_unused static struct dentry *
+intel_display_debugfs_create_uint(const char *name, umode_t mode,
+ struct dentry *parent, unsigned int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &intel_display_param_uint_fops_ro :
+ &intel_display_param_uint_fops);
+}
+
+#define _intel_display_param_create_file(parent, name, mode, valp) \
+ do { \
+ if (mode) \
+ _Generic(valp, \
+ bool * : debugfs_create_bool, \
+ int * : intel_display_debugfs_create_int, \
+ unsigned int * : intel_display_debugfs_create_uint, \
+ unsigned long * : debugfs_create_ulong, \
+ char ** : debugfs_create_str) \
+ (name, mode, parent, valp); \
+ } while (0)
+
+/* add a subdirectory with files for each intel display param */
+void intel_display_debugfs_params(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ struct dentry *dir;
+ char dirname[16];
+
+ snprintf(dirname, sizeof(dirname), "%s_params", i915->drm.driver->name);
+ dir = debugfs_lookup(dirname, minor->debugfs_root);
+ if (!dir)
+ dir = debugfs_create_dir(dirname, minor->debugfs_root);
+ if (IS_ERR(dir))
+ return;
+
+ /*
+ * Note: We could create files for params needing special handling
+ * here. Set mode in params to 0 to skip the generic create file, or
+ * just let the generic create file fail silently with -EEXIST.
+ */
+
+#define REGISTER(T, x, unused, mode, ...) _intel_display_param_create_file( \
+ dir, #x, mode, &i915->display.params.x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(REGISTER);
+#undef REGISTER
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
new file mode 100644
index 000000000000..1e9945a4044c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_DEBUGFS_PARAMS__
+#define __INTEL_DISPLAY_DEBUGFS_PARAMS__
+
+struct drm_i915_private;
+
+void intel_display_debugfs_params(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DISPLAY_DEBUGFS_PARAMS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 2b1ec23ba9c3..0b522c6a8d6f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -12,6 +12,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_device.h"
+#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
#include "intel_fbc.h"
@@ -937,6 +938,13 @@ void intel_display_device_probe(struct drm_i915_private *i915)
DISPLAY_RUNTIME_INFO(i915)->ip.rel = rel;
DISPLAY_RUNTIME_INFO(i915)->ip.step = step;
}
+
+ intel_display_params_copy(&i915->display.params);
+}
+
+void intel_display_device_remove(struct drm_i915_private *i915)
+{
+ intel_display_params_free(&i915->display.params);
}
static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915)
@@ -1105,7 +1113,7 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
}
/* Disable nuclear pageflip by default on pre-g4x */
- if (!i915->params.nuclear_pageflip &&
+ if (!i915->display.params.nuclear_pageflip &&
DISPLAY_VER(i915) < 5 && !IS_G4X(i915))
i915->drm.driver_features &= ~DRIVER_ATOMIC;
}
@@ -1145,5 +1153,6 @@ bool intel_display_device_enabled(struct drm_i915_private *i915)
/* Only valid when HAS_DISPLAY() is true */
drm_WARN_ON(&i915->drm, !HAS_DISPLAY(i915));
- return !i915->params.disable_display && !intel_opregion_headless_sku(i915);
+ return !i915->display.params.disable_display &&
+ !intel_opregion_headless_sku(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 5b5c0e53307f..4299cc452e05 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -161,6 +161,7 @@ struct intel_display_device_info {
bool intel_display_device_enabled(struct drm_i915_private *i915);
void intel_display_device_probe(struct drm_i915_private *i915);
+void intel_display_device_remove(struct drm_i915_private *i915);
void intel_display_device_info_runtime_init(struct drm_i915_private *i915);
void intel_display_device_info_print(const struct intel_display_device_info *info,
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 44b59ac301e6..62f7b10484be 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -181,6 +181,13 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
+ spin_lock_init(&i915->display.fb_tracking.lock);
+ mutex_init(&i915->display.backlight.lock);
+ mutex_init(&i915->display.audio.mutex);
+ mutex_init(&i915->display.wm.wm_mutex);
+ mutex_init(&i915->display.pps.mutex);
+ mutex_init(&i915->display.hdcp.hdcp_mutex);
+
intel_display_irq_init(i915);
intel_dkl_phy_init(i915);
intel_color_init_hooks(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
new file mode 100644
index 000000000000..11e03cfb774d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "intel_display_params.h"
+#include "i915_drv.h"
+
+#define intel_display_param_named(name, T, perm, desc) \
+ module_param_named(name, intel_display_modparams.name, T, perm); \
+ MODULE_PARM_DESC(name, desc)
+#define intel_display_param_named_unsafe(name, T, perm, desc) \
+ module_param_named_unsafe(name, intel_display_modparams.name, T, perm); \
+ MODULE_PARM_DESC(name, desc)
+
+static struct intel_display_params intel_display_modparams __read_mostly = {
+#define MEMBER(T, member, value, ...) .member = (value),
+ INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER)
+#undef MEMBER
+};
+/*
+ * Note: As a rule, keep module parameter sysfs permissions read-only
+ * 0400. Runtime changes are only supported through i915 debugfs.
+ *
+ * For any exceptions requiring write access and runtime changes through module
+ * parameter sysfs, prevent debugfs file creation by setting the parameter's
+ * debugfs mode to 0.
+ */
+
+intel_display_param_named_unsafe(vbt_firmware, charp, 0400,
+ "Load VBT from specified file under /lib/firmware");
+
+intel_display_param_named_unsafe(lvds_channel_mode, int, 0400,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
+intel_display_param_named_unsafe(panel_use_ssc, int, 0400,
+ "Use Spread Spectrum Clock with panels [LVDS/eDP] "
+ "(default: auto from VBT)");
+
+intel_display_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
+
+intel_display_param_named_unsafe(enable_dc, int, 0400,
+ "Enable power-saving display C-states. "
+ "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
+ "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
+
+intel_display_param_named_unsafe(enable_dpt, bool, 0400,
+ "Enable display page table (DPT) (default: true)");
+
+intel_display_param_named_unsafe(enable_sagv, bool, 0400,
+ "Enable system agent voltage/frequency scaling (SAGV) (default: true)");
+
+intel_display_param_named_unsafe(disable_power_well, int, 0400,
+ "Disable display power wells when possible "
+ "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
+
+intel_display_param_named_unsafe(enable_ips, bool, 0400, "Enable IPS (default: true)");
+
+intel_display_param_named_unsafe(invert_brightness, int, 0400,
+ "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+
+/* WA to get away with the default setting in VBT for early platforms.Will be removed */
+intel_display_param_named_unsafe(edp_vswing, int, 0400,
+ "Ignore/Override vswing pre-emph table selection from VBT "
+ "(0=use value from vbt [default], 1=low power swing(200mV),"
+ "2=default swing(400mV))");
+
+intel_display_param_named(enable_dpcd_backlight, int, 0400,
+ "Enable support for DPCD backlight control"
+ "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
+
+intel_display_param_named_unsafe(load_detect_test, bool, 0400,
+ "Force-enable the VGA load detect code for testing (default:false). "
+ "For developers only.");
+
+intel_display_param_named_unsafe(force_reset_modeset_test, bool, 0400,
+ "Force a modeset during gpu reset for testing (default:false). "
+ "For developers only.");
+
+intel_display_param_named(disable_display, bool, 0400,
+ "Disable display (default: false)");
+
+intel_display_param_named(verbose_state_checks, bool, 0400,
+ "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
+
+intel_display_param_named_unsafe(nuclear_pageflip, bool, 0400,
+ "Force enable atomic functionality on platforms that don't have full support yet.");
+
+intel_display_param_named_unsafe(enable_dp_mst, bool, 0400,
+ "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
+
+intel_display_param_named_unsafe(enable_fbc, int, 0400,
+ "Enable frame buffer compression for power savings "
+ "(default: -1 (use per-chip default))");
+
+intel_display_param_named_unsafe(enable_psr, int, 0400,
+ "Enable PSR "
+ "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
+ "Default: -1 (use per-chip default)");
+
+intel_display_param_named(psr_safest_params, bool, 0400,
+ "Replace PSR VBT parameters by the safest and not optimal ones. This "
+ "is helpful to detect if PSR issues are related to bad values set in "
+ " VBT. (0=use VBT parameters, 1=use safest parameters)"
+ "Default: 0");
+
+intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
+ "Enable PSR2 selective fetch "
+ "(0=disabled, 1=enabled) "
+ "Default: 1");
+
+__maybe_unused
+static void _param_print_bool(struct drm_printer *p, const char *driver_name,
+ const char *name, bool val)
+{
+ drm_printf(p, "%s.%s=%s\n", driver_name, name, str_yes_no(val));
+}
+
+__maybe_unused
+static void _param_print_int(struct drm_printer *p, const char *driver_name,
+ const char *name, int val)
+{
+ drm_printf(p, "%s.%s=%d\n", driver_name, name, val);
+}
+
+__maybe_unused
+static void _param_print_uint(struct drm_printer *p, const char *driver_name,
+ const char *name, unsigned int val)
+{
+ drm_printf(p, "%s.%s=%u\n", driver_name, name, val);
+}
+
+__maybe_unused
+static void _param_print_ulong(struct drm_printer *p, const char *driver_name,
+ const char *name, unsigned long val)
+{
+ drm_printf(p, "%s.%s=%lu\n", driver_name, name, val);
+}
+
+__maybe_unused
+static void _param_print_charp(struct drm_printer *p, const char *driver_name,
+ const char *name, const char *val)
+{
+ drm_printf(p, "%s.%s=%s\n", driver_name, name, val);
+}
+
+#define _param_print(p, driver_name, name, val) \
+ _Generic(val, \
+ bool : _param_print_bool, \
+ int : _param_print_int, \
+ unsigned int : _param_print_uint, \
+ unsigned long : _param_print_ulong, \
+ char * : _param_print_charp)(p, driver_name, name, val)
+
+/**
+ * intel_display_params_dump - dump intel display modparams
+ * @i915: i915 device
+ * @p: the &drm_printer
+ *
+ * Pretty printer for i915 modparams.
+ */
+void intel_display_params_dump(struct drm_i915_private *i915, struct drm_printer *p)
+{
+#define PRINT(T, x, ...) _param_print(p, i915->drm.driver->name, #x, i915->display.params.x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT);
+#undef PRINT
+}
+
+__maybe_unused static void _param_dup_charp(char **valp)
+{
+ *valp = kstrdup(*valp ? *valp : "", GFP_ATOMIC);
+}
+
+__maybe_unused static void _param_nop(void *valp)
+{
+}
+
+#define _param_dup(valp) \
+ _Generic(valp, \
+ char ** : _param_dup_charp, \
+ default : _param_nop) \
+ (valp)
+
+void intel_display_params_copy(struct intel_display_params *dest)
+{
+ *dest = intel_display_modparams;
+#define DUP(T, x, ...) _param_dup(&dest->x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(DUP);
+#undef DUP
+}
+
+__maybe_unused static void _param_free_charp(char **valp)
+{
+ kfree(*valp);
+ *valp = NULL;
+}
+
+#define _param_free(valp) \
+ _Generic(valp, \
+ char ** : _param_free_charp, \
+ default : _param_nop) \
+ (valp)
+
+/* free the allocated members, *not* the passed in params itself */
+void intel_display_params_free(struct intel_display_params *params)
+{
+#define FREE(T, x, ...) _param_free(&params->x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(FREE);
+#undef FREE
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
new file mode 100644
index 000000000000..6206cc51df04
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_DISPLAY_PARAMS_H_
+#define _INTEL_DISPLAY_PARAMS_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct drm_i915_private;
+
+/*
+ * Invoke param, a function-like macro, for each intel display param, with
+ * arguments:
+ *
+ * param(type, name, value, mode)
+ *
+ * type: parameter type, one of {bool, int, unsigned int, unsigned long, char *}
+ * name: name of the parameter
+ * value: initial/default value of the parameter
+ * mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create
+ * debugfs file
+ */
+#define INTEL_DISPLAY_PARAMS_FOR_EACH(param) \
+ param(char *, vbt_firmware, NULL, 0400) \
+ param(int, lvds_channel_mode, 0, 0400) \
+ param(int, panel_use_ssc, -1, 0600) \
+ param(int, vbt_sdvo_panel_type, -1, 0400) \
+ param(int, enable_dc, -1, 0400) \
+ param(bool, enable_dpt, true, 0400) \
+ param(bool, enable_sagv, true, 0600) \
+ param(int, disable_power_well, -1, 0400) \
+ param(bool, enable_ips, true, 0600) \
+ param(int, invert_brightness, 0, 0600) \
+ param(int, edp_vswing, 0, 0400) \
+ param(int, enable_dpcd_backlight, -1, 0600) \
+ param(bool, load_detect_test, false, 0600) \
+ param(bool, force_reset_modeset_test, false, 0600) \
+ param(bool, disable_display, false, 0400) \
+ param(bool, verbose_state_checks, true, 0400) \
+ param(bool, nuclear_pageflip, false, 0400) \
+ param(bool, enable_dp_mst, true, 0600) \
+ param(int, enable_fbc, -1, 0600) \
+ param(int, enable_psr, -1, 0600) \
+ param(bool, psr_safest_params, false, 0400) \
+ param(bool, enable_psr2_sel_fetch, true, 0400) \
+
+#define MEMBER(T, member, ...) T member;
+struct intel_display_params {
+ INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER);
+};
+#undef MEMBER
+
+void intel_display_params_dump(struct drm_i915_private *i915,
+ struct drm_printer *p);
+void intel_display_params_copy(struct intel_display_params *dest);
+void intel_display_params_free(struct intel_display_params *params);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index e25785ae1c20..e390595d7341 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -967,7 +967,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
DISPLAY_VER(dev_priv) >= 11 ?
DC_STATE_EN_DC9 : 0;
- if (!dev_priv->params.disable_power_well)
+ if (!dev_priv->display.params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -1016,11 +1016,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
- dev_priv->params.disable_power_well =
+ dev_priv->display.params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
- dev_priv->params.disable_power_well);
+ dev_priv->display.params.disable_power_well);
power_domains->allowed_dc_mask =
- get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
+ get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc);
power_domains->target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
@@ -1950,7 +1950,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915->params.disable_power_well) {
+ if (!i915->display.params.disable_power_well) {
drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
POWER_DOMAIN_INIT);
@@ -1977,7 +1977,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
fetch_and_zero(&i915->display.power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
- if (!i915->params.disable_power_well)
+ if (!i915->display.params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
fetch_and_zero(&i915->display.power.domains.disable_wakeref));
@@ -2096,7 +2096,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
- if (!i915->params.disable_power_well)
+ if (!i915->display.params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
fetch_and_zero(&i915->display.power.domains.disable_wakeref));
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 07d650050099..47cd6bb04366 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -1400,20 +1400,16 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
{
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
- enum pipe pipe;
u32 tmp;
drm_WARN_ON_ONCE(&dev_priv->drm,
id != VLV_DISP_PW_DPIO_CMN_BC &&
id != CHV_DISP_PW_DPIO_CMN_D);
- if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- pipe = PIPE_A;
+ if (id == VLV_DISP_PW_DPIO_CMN_BC)
phy = DPIO_PHY0;
- } else {
- pipe = PIPE_C;
+ else
phy = DPIO_PHY1;
- }
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
@@ -1428,24 +1424,24 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_get(dev_priv);
/* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28);
tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+ tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
* too, so maybe it saves some power even though
* CL2 doesn't exist?
*/
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30);
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp);
}
vlv_dpio_put(dev_priv);
@@ -1499,7 +1495,6 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
- enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
u32 reg, val, expected, actual;
/*
@@ -1518,7 +1513,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
reg = _CHV_CMN_DW6_CH1;
vlv_dpio_get(dev_priv);
- val = vlv_dpio_read(dev_priv, pipe, reg);
+ val = vlv_dpio_read(dev_priv, phy, reg);
vlv_dpio_put(dev_priv);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 17178d5d7788..c2c347b22448 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -29,7 +29,7 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
return;
/* reset doesn't touch the display */
- if (!dev_priv->params.force_reset_modeset_test &&
+ if (!dev_priv->display.params.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 65ea37fe8cff..b3e942f2eeb0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -198,6 +198,12 @@ struct intel_encoder {
struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
+ void (*audio_enable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ void (*audio_disable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
@@ -624,6 +630,9 @@ struct intel_connector {
struct drm_dp_aux *dsc_decompression_aux;
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
u8 fec_capability;
+
+ u8 dsc_hblank_expansion_quirk:1;
+ u8 dsc_decompression_enabled:1;
} dp;
/* Work struct to schedule a uevent on link train failure */
@@ -676,8 +685,6 @@ struct intel_atomic_state {
bool rps_interactive;
- struct i915_sw_fence commit_ready;
-
struct llist_node freed;
};
@@ -1210,6 +1217,7 @@ struct intel_crtc_state {
bool has_psr2;
bool enable_psr2_sel_fetch;
bool req_psr2_sdp_prior_scanline;
+ bool has_panel_replay;
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
@@ -1361,7 +1369,8 @@ struct intel_crtc_state {
struct {
bool compression_enable;
bool dsc_split;
- u16 compressed_bpp;
+ /* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
+ u16 compressed_bpp_x16;
u8 slice_count;
struct drm_dsc_config config;
} dsc;
@@ -1707,9 +1716,13 @@ struct intel_psr {
bool irq_aux_error;
u16 su_w_granularity;
u16 su_y_granularity;
+ bool source_panel_replay_support;
+ bool sink_panel_replay_support;
+ bool panel_replay_enabled;
u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
+ u8 entry_setup_frames;
};
struct intel_dp {
@@ -1808,6 +1821,7 @@ struct intel_dp {
/* Display stream compression testing */
bool force_dsc_en;
int force_dsc_output_format;
+ bool force_dsc_fractional_bpp_en;
int force_dsc_bpc;
bool hobl_failed;
@@ -1992,17 +2006,6 @@ dp_to_lspcon(struct intel_dp *intel_dp)
#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
-#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
- (intel_dp)->psr.source_support)
-
-static inline bool intel_encoder_can_psr(struct intel_encoder *encoder)
-{
- if (!intel_encoder_is_dp(encoder))
- return false;
-
- return CAN_PSR(enc_to_intel_dp(encoder));
-}
-
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2c1034578984..1422c2370269 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -85,8 +85,8 @@
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
-/* DP DSC FEC Overhead factor = 1/(0.972261) */
-#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
+/* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
+#define DP_DSC_FEC_OVERHEAD_FACTOR 1028530
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -124,7 +124,31 @@ static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* Is link rate UHBR and thus 128b/132b? */
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->port_clock >= 1000000;
+ return drm_dp_is_uhbr_rate(crtc_state->port_clock);
+}
+
+/**
+ * intel_dp_link_symbol_size - get the link symbol size for a given link rate
+ * @rate: link rate in 10kbit/s units
+ *
+ * Returns the link symbol size in bits/symbol units depending on the link
+ * rate -> channel coding.
+ */
+int intel_dp_link_symbol_size(int rate)
+{
+ return drm_dp_is_uhbr_rate(rate) ? 32 : 10;
+}
+
+/**
+ * intel_dp_link_symbol_clock - convert link rate to link symbol clock
+ * @rate: link rate in 10kbit/s units
+ *
+ * Returns the link symbol clock frequency in kHz units depending on the
+ * link rate and channel coding.
+ */
+int intel_dp_link_symbol_clock(int rate)
+{
+ return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
}
static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
@@ -331,6 +355,9 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp)
/*
* The required data bandwidth for a mode with given pixel clock and bpp. This
* is the required net bandwidth independent of the data bandwidth efficiency.
+ *
+ * TODO: check if callers of this functions should use
+ * intel_dp_effective_data_rate() instead.
*/
int
intel_dp_link_required(int pixel_clock, int bpp)
@@ -339,6 +366,22 @@ intel_dp_link_required(int pixel_clock, int bpp)
return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
+/**
+ * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead
+ * @pixel_clock: pixel clock in kHz
+ * @bpp_x16: bits per pixel .4 fixed point format
+ * @bw_overhead: BW allocation overhead in 1ppm units
+ *
+ * Return the effective pixel data rate in kB/sec units taking into account
+ * the provided SSC, FEC, DSC BW allocation overhead.
+ */
+int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
+ int bw_overhead)
+{
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead),
+ 1000000 * 16 * 8);
+}
+
/*
* Given a link rate and lanes, get the data bandwidth.
*
@@ -362,29 +405,27 @@ intel_dp_link_required(int pixel_clock, int bpp)
int
intel_dp_max_data_rate(int max_link_rate, int max_lanes)
{
- if (max_link_rate >= 1000000) {
- /*
- * UHBR rates always use 128b/132b channel encoding, and have
- * 97.71% data bandwidth efficiency. Consider max_link_rate the
- * link bit rate in units of 10000 bps.
- */
- int max_link_rate_kbps = max_link_rate * 10;
-
- max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000);
- max_link_rate = max_link_rate_kbps / 8;
- }
+ int ch_coding_efficiency =
+ drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
+ int max_link_rate_kbps = max_link_rate * 10;
/*
+ * UHBR rates always use 128b/132b channel encoding, and have
+ * 97.71% data bandwidth efficiency. Consider max_link_rate the
+ * link bit rate in units of 10000 bps.
+ */
+ /*
* Lower than UHBR rates always use 8b/10b channel encoding, and have
* 80% data bandwidth efficiency for SST non-FEC. However, this turns
- * out to be a nop by coincidence, and can be skipped:
+ * out to be a nop by coincidence:
*
* int max_link_rate_kbps = max_link_rate * 10;
- * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10);
+ * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10);
* max_link_rate = max_link_rate_kbps / 8;
*/
-
- return max_link_rate * max_lanes;
+ return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes,
+ ch_coding_efficiency),
+ 1000000 * 8);
}
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
@@ -680,8 +721,22 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
{
- return div_u64(mul_u32_u32(mode_clock, 1000000U),
- DP_DSC_FEC_OVERHEAD_FACTOR);
+ return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR),
+ 1000000U);
+}
+
+int intel_dp_bw_fec_overhead(bool fec_enabled)
+{
+ /*
+ * TODO: Calculate the actual overhead for a given mode.
+ * The hard-coded 1/0.972261=2.853% overhead factor
+ * corresponds (for instance) to the 8b/10b DP FEC 2.4% +
+ * 0.453% DSC overhead. This is enough for a 3840 width mode,
+ * which has a DSC overhead of up to ~0.2%, but may not be
+ * enough for a 1024 width mode where this is ~0.8% (on a 4
+ * lane DP link, with 2 DSC slices and 8 bpp color depth).
+ */
+ return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000;
}
static int
@@ -1369,9 +1424,9 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
return false;
}
-static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
- const struct intel_crtc_state *pipe_config)
+bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config)
{
return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
drm_dp_sink_supports_fec(connector->dp.fec_capability);
@@ -1384,6 +1439,7 @@ static bool intel_dp_supports_dsc(const struct intel_connector *connector,
return false;
return intel_dsc_source_support(crtc_state) &&
+ connector->dp.dsc_decompression_aux &&
drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd);
}
@@ -1717,15 +1773,15 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector
return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format);
}
-static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock,
+static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock,
u32 lane_count, u32 mode_clock,
enum intel_output_format output_format,
int timeslots)
{
u32 available_bw, required_bw;
- available_bw = (link_clock * lane_count * timeslots) / 8;
- required_bw = compressed_bpp * (intel_dp_mode_to_fec_clock(mode_clock));
+ available_bw = (link_clock * lane_count * timeslots * 16) / 8;
+ required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock));
return available_bw > required_bw;
}
@@ -1733,7 +1789,7 @@ static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock,
static int dsc_compute_link_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits,
- u16 compressed_bpp,
+ u16 compressed_bppx16,
int timeslots)
{
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1748,8 +1804,8 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- if (!is_bw_sufficient_for_dsc_config(compressed_bpp, link_rate, lane_count,
- adjusted_mode->clock,
+ if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate,
+ lane_count, adjusted_mode->clock,
pipe_config->output_format,
timeslots))
continue;
@@ -1791,7 +1847,7 @@ u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connec
return 0;
}
-static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
+int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
{
/* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */
switch (pipe_config->output_format) {
@@ -1808,9 +1864,9 @@ static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
return 0;
}
-static int dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
- int bpc)
+int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
+ struct intel_crtc_state *pipe_config,
+ int bpc)
{
return intel_dp_dsc_max_sink_compressed_bppx16(connector,
pipe_config, bpc) >> 4;
@@ -1862,10 +1918,11 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
ret = dsc_compute_link_config(intel_dp,
pipe_config,
limits,
- valid_dsc_bpp[i],
+ valid_dsc_bpp[i] << 4,
timeslots);
if (ret == 0) {
- pipe_config->dsc.compressed_bpp = valid_dsc_bpp[i];
+ pipe_config->dsc.compressed_bpp_x16 =
+ to_bpp_x16(valid_dsc_bpp[i]);
return 0;
}
}
@@ -1881,6 +1938,7 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
*/
static int
xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits,
int dsc_max_bpp,
@@ -1888,22 +1946,38 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
int pipe_bpp,
int timeslots)
{
- u16 compressed_bpp;
+ u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u16 compressed_bppx16;
+ u8 bppx16_step;
int ret;
- /* Compressed BPP should be less than the Input DSC bpp */
- dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
+ if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1)
+ bppx16_step = 16;
+ else
+ bppx16_step = 16 / bppx16_incr;
- for (compressed_bpp = dsc_max_bpp;
- compressed_bpp >= dsc_min_bpp;
- compressed_bpp--) {
+ /* Compressed BPP should be less than the Input DSC bpp */
+ dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step);
+ dsc_min_bpp = dsc_min_bpp << 4;
+
+ for (compressed_bppx16 = dsc_max_bpp;
+ compressed_bppx16 >= dsc_min_bpp;
+ compressed_bppx16 -= bppx16_step) {
+ if (intel_dp->force_dsc_fractional_bpp_en &&
+ !to_bpp_frac(compressed_bppx16))
+ continue;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
limits,
- compressed_bpp,
+ compressed_bppx16,
timeslots);
if (ret == 0) {
- pipe_config->dsc.compressed_bpp = compressed_bpp;
+ pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
+ if (intel_dp->force_dsc_fractional_bpp_en &&
+ to_bpp_frac(compressed_bppx16))
+ drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
+
return 0;
}
}
@@ -1924,12 +1998,14 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
int dsc_joiner_max_bpp;
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config);
+ dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3);
+ dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ pipe_config,
+ pipe_bpp / 3);
dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock,
@@ -1939,7 +2015,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
if (DISPLAY_VER(i915) >= 13)
- return xelpd_dsc_compute_link_config(intel_dp, pipe_config, limits,
+ return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
@@ -2084,19 +2160,22 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
pipe_config->lane_count = limits->max_lane_count;
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config);
+ dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3);
+ dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ pipe_config,
+ pipe_bpp / 3);
dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
- pipe_config->dsc.compressed_bpp = max(dsc_min_bpp, dsc_max_bpp);
+ pipe_config->dsc.compressed_bpp_x16 =
+ to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp));
pipe_config->pipe_bpp = pipe_bpp;
@@ -2118,8 +2197,9 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
&pipe_config->hw.adjusted_mode;
int ret;
- pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
- intel_dp_supports_fec(intel_dp, connector, pipe_config);
+ pipe_config->fec_enable = pipe_config->fec_enable ||
+ (!intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, connector, pipe_config));
if (!intel_dp_supports_dsc(connector, pipe_config))
return -EINVAL;
@@ -2184,18 +2264,18 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
ret = intel_dp_dsc_compute_params(connector, pipe_config);
if (ret < 0) {
drm_dbg_kms(&dev_priv->drm,
- "Cannot compute valid DSC parameters for Input Bpp = %d "
- "Compressed BPP = %d\n",
+ "Cannot compute valid DSC parameters for Input Bpp = %d"
+ "Compressed BPP = " BPP_X16_FMT "\n",
pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
return ret;
}
pipe_config->dsc.compression_enable = true;
drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
- "Compressed Bpp = %d Slice Count = %d\n",
+ "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp,
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
pipe_config->dsc.slice_count);
return 0;
@@ -2307,6 +2387,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2315,6 +2397,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
bool dsc_needed;
int ret = 0;
+ if (pipe_config->fec_enable &&
+ !intel_dp_supports_fec(intel_dp, connector, pipe_config))
+ return -EINVAL;
+
if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
adjusted_mode->crtc_clock))
pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
@@ -2362,15 +2448,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
if (pipe_config->dsc.compression_enable) {
drm_dbg_kms(&i915->drm,
- "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
drm_dbg_kms(&i915->drm,
"DP link rate required %i available %i\n",
intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->dsc.compressed_bpp),
+ to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)),
intel_dp_max_data_rate(pipe_config->port_clock,
pipe_config->lane_count));
} else {
@@ -2439,12 +2525,22 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- /*
- * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
- * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
- * Colorimetry Format indication.
- */
- vsc->revision = 0x5;
+ if (crtc_state->has_panel_replay) {
+ /*
+ * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
+ * VSC SDP supporting 3D stereo, Panel Replay, and Pixel
+ * Encoding/Colorimetry Format indication.
+ */
+ vsc->revision = 0x7;
+ } else {
+ /*
+ * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+ * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
+ * Colorimetry Format indication.
+ */
+ vsc->revision = 0x5;
+ }
+
vsc->length = 0x13;
/* DP 1.4a spec, Table 2-120 */
@@ -2553,6 +2649,21 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
vsc->revision = 0x4;
vsc->length = 0xe;
}
+ } else if (crtc_state->has_panel_replay) {
+ if (intel_dp->psr.colorimetry_support &&
+ intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
+ /* [Panel Replay with colorimetry info] */
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ vsc);
+ } else {
+ /*
+ * [Panel Replay without colorimetry info]
+ * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
+ * VSC SDP supporting 3D stereo + Panel Replay.
+ */
+ vsc->revision = 0x6;
+ vsc->length = 0x10;
+ }
} else {
/*
* [PSR1]
@@ -2629,7 +2740,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
static void
intel_dp_drrs_compute_config(struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
- int link_bpp)
+ int link_bpp_x16)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
@@ -2654,9 +2765,10 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
if (pipe_config->splitter.enable)
pixel_clock /= pipe_config->splitter.link_count;
- intel_link_compute_m_n(link_bpp, pipe_config->lane_count, pixel_clock,
- pipe_config->port_clock, &pipe_config->dp_m2_n2,
- pipe_config->fec_enable);
+ intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock,
+ pipe_config->port_clock,
+ intel_dp_bw_fec_overhead(pipe_config->fec_enable),
+ &pipe_config->dp_m2_n2);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2757,7 +2869,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
const struct drm_display_mode *fixed_mode;
struct intel_connector *connector = intel_dp->attached_connector;
- int ret = 0, link_bpp;
+ int ret = 0, link_bpp_x16;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -2806,10 +2918,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
drm_dp_enhanced_frame_cap(intel_dp->dpcd);
if (pipe_config->dsc.compression_enable)
- link_bpp = pipe_config->dsc.compressed_bpp;
+ link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
else
- link_bpp = intel_dp_output_bpp(pipe_config->output_format,
- pipe_config->pipe_bpp);
+ link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format,
+ pipe_config->pipe_bpp));
if (intel_dp->mso_link_count) {
int n = intel_dp->mso_link_count;
@@ -2833,12 +2945,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
- intel_link_compute_m_n(link_bpp,
+ intel_link_compute_m_n(link_bpp_x16,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
- &pipe_config->dp_m_n,
- pipe_config->fec_enable);
+ intel_dp_bw_fec_overhead(pipe_config->fec_enable),
+ &pipe_config->dp_m_n);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2849,7 +2961,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
- intel_dp_drrs_compute_config(connector, pipe_config, link_bpp);
+ intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
@@ -2917,25 +3029,180 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
-void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool enable)
+static int
+write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int ret;
+ int err;
+ u8 val;
- if (!crtc_state->dsc.compression_enable)
- return;
+ err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val);
+ if (err < 0)
+ return err;
- ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
- enable ? DP_DECOMPRESSION_EN : 0);
- if (ret < 0)
+ if (set)
+ val |= flag;
+ else
+ val &= ~flag;
+
+ return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val);
+}
+
+static void
+intel_dp_sink_set_dsc_decompression(struct intel_connector *connector,
+ bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux,
+ DP_DECOMPRESSION_EN, enable) < 0)
drm_dbg_kms(&i915->drm,
"Failed to %s sink decompression state\n",
str_enable_disable(enable));
}
static void
+intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
+ bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_dp_aux *aux = connector->port ?
+ connector->port->passthrough_aux : NULL;
+
+ if (!aux)
+ return;
+
+ if (write_dsc_decompression_flag(aux,
+ DP_DSC_PASSTHROUGH_EN, enable) < 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to %s sink compression passthrough state\n",
+ str_enable_disable(enable));
+}
+
+static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
+ const struct intel_connector *connector,
+ bool for_get_ref)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct drm_connector *_connector_iter;
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ int ref_count = 0;
+ int i;
+
+ /*
+ * On SST the decompression AUX device won't be shared, each connector
+ * uses for this its own AUX targeting the sink device.
+ */
+ if (!connector->mst_port)
+ return connector->dp.dsc_decompression_enabled ? 1 : 0;
+
+ for_each_oldnew_connector_in_state(&state->base, _connector_iter,
+ old_conn_state, new_conn_state, i) {
+ const struct intel_connector *
+ connector_iter = to_intel_connector(_connector_iter);
+
+ if (connector_iter->mst_port != connector->mst_port)
+ continue;
+
+ if (!connector_iter->dp.dsc_decompression_enabled)
+ continue;
+
+ drm_WARN_ON(&i915->drm,
+ (for_get_ref && !new_conn_state->crtc) ||
+ (!for_get_ref && !old_conn_state->crtc));
+
+ if (connector_iter->dp.dsc_decompression_aux ==
+ connector->dp.dsc_decompression_aux)
+ ref_count++;
+ }
+
+ return ref_count;
+}
+
+static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0;
+
+ connector->dp.dsc_decompression_enabled = true;
+
+ return ret;
+}
+
+static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ connector->dp.dsc_decompression_enabled = false;
+
+ return intel_dp_dsc_aux_ref_count(state, connector, false) == 0;
+}
+
+/**
+ * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device
+ * @state: atomic state
+ * @connector: connector to enable the decompression for
+ * @new_crtc_state: new state for the CRTC driving @connector
+ *
+ * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
+ * register of the appropriate sink/branch device. On SST this is always the
+ * sink device, whereas on MST based on each device's DSC capabilities it's
+ * either the last branch device (enabling decompression in it) or both the
+ * last branch device (enabling passthrough in it) and the sink device
+ * (enabling decompression in it).
+ */
+void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (!new_crtc_state->dsc.compression_enable)
+ return;
+
+ if (drm_WARN_ON(&i915->drm,
+ !connector->dp.dsc_decompression_aux ||
+ connector->dp.dsc_decompression_enabled))
+ return;
+
+ if (!intel_dp_dsc_aux_get_ref(state, connector))
+ return;
+
+ intel_dp_sink_set_dsc_passthrough(connector, true);
+ intel_dp_sink_set_dsc_decompression(connector, true);
+}
+
+/**
+ * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device
+ * @state: atomic state
+ * @connector: connector to disable the decompression for
+ * @old_crtc_state: old state for the CRTC driving @connector
+ *
+ * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
+ * register of the appropriate sink/branch device, corresponding to the
+ * sequence in intel_dp_sink_enable_decompression().
+ */
+void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (!old_crtc_state->dsc.compression_enable)
+ return;
+
+ if (drm_WARN_ON(&i915->drm,
+ !connector->dp.dsc_decompression_aux ||
+ !connector->dp.dsc_decompression_enabled))
+ return;
+
+ if (!intel_dp_dsc_aux_put_ref(state, connector))
+ return;
+
+ intel_dp_sink_set_dsc_decompression(connector, false);
+ intel_dp_sink_set_dsc_passthrough(connector, false);
+}
+
+static void
intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -3771,7 +4038,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- return i915->params.enable_dp_mst &&
+ return i915->display.params.enable_dp_mst &&
intel_dp_mst_source_support(intel_dp) &&
drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
}
@@ -3789,13 +4056,13 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp_mst_source_support(intel_dp)),
str_yes_no(sink_can_mst),
- str_yes_no(i915->params.enable_dp_mst));
+ str_yes_no(i915->display.params.enable_dp_mst));
if (!intel_dp_mst_source_support(intel_dp))
return;
intel_dp->is_mst = sink_can_mst &&
- i915->params.enable_dp_mst;
+ i915->display.params.enable_dp_mst;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
@@ -3865,11 +4132,16 @@ static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
+ if (vsc->revision == 0x6) {
+ sdp->db[0] = 1;
+ sdp->db[3] = 1;
+ }
+
/*
- * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
- * per DP 1.4a spec.
+ * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
+ * Format as per DP 1.4a spec and DP 2.0 respectively.
*/
- if (vsc->revision != 0x5)
+ if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
goto out;
/* VSC SDP Payload for DB16 through DB18 */
@@ -4049,7 +4321,10 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
- /* TODO: Add DSC case (DIP_ENABLE_PPS) */
+ /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
+ if (!enable && HAS_DSC(dev_priv))
+ val &= ~VDIP_ENABLE_PPS;
+
/* When PSR is enabled, this routine doesn't disable VSC DIP */
if (!crtc_state->has_psr)
val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
@@ -5409,6 +5684,7 @@ intel_dp_detect(struct drm_connector *connector,
if (status == connector_status_disconnected) {
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
+ intel_dp->psr.sink_panel_replay_support = false;
if (intel_dp->is_mst) {
drm_dbg_kms(&dev_priv->drm,
@@ -6037,8 +6313,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* (eg. Acer Chromebook C710), so we'll check it only if multiple
* ports are attempting to use the same AUX CH, according to VBT.
*/
- if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
- !intel_digital_port_connected(encoder)) {
+ if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
/*
* If this fails, presume the DPCD answer came
* from some other port using the same AUX CH.
@@ -6046,10 +6321,27 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* FIXME maybe cleaner to check this before the
* DPCD read? Would need sort out the VDD handling...
*/
- drm_info(&dev_priv->drm,
- "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
- encoder->base.base.id, encoder->base.name);
- goto out_vdd_off;
+ if (!intel_digital_port_connected(encoder)) {
+ drm_info(&dev_priv->drm,
+ "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
+ encoder->base.base.id, encoder->base.name);
+ goto out_vdd_off;
+ }
+
+ /*
+ * Unfortunately even the HPD based detection fails on
+ * eg. Asus B360M-A (CFL+CNP), so as a last resort fall
+ * back to checking for a VGA branch device. Only do this
+ * on known affected platforms to minimize false positives.
+ */
+ if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
+ (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
+ DP_DWN_STRM_PORT_TYPE_ANALOG) {
+ drm_info(&dev_priv->drm,
+ "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
+ encoder->base.base.id, encoder->base.name);
+ goto out_vdd_off;
+ }
}
mutex_lock(&dev_priv->drm.mode_config.mutex);
@@ -6238,16 +6530,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
- /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
- * 0xd. Failure to do so will result in spurious interrupts being
- * generated on the port when a cable is not attached.
- */
- if (IS_G45(dev_priv)) {
- u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
- intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
- (temp & ~0xf) | 0xd);
- }
-
intel_dp->frl.is_trained = false;
intel_dp->frl.trained_rate_gbps = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 484aea215a25..05db46b111f2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -57,9 +57,12 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool enable);
+void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *new_crtc_state);
+void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *old_crtc_state);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder);
void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
@@ -78,6 +81,8 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder,
bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
+int intel_dp_link_symbol_size(int rate);
+int intel_dp_link_symbol_clock(int rate);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
bool long_hpd);
@@ -98,6 +103,8 @@ bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
+int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
+ int bw_overhead);
int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
@@ -125,6 +132,10 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
enum intel_output_format output_format,
u32 pipe_bpp,
u32 timeslots);
+int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config);
+int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
+ struct intel_crtc_state *pipe_config,
+ int bpc);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
bool bigjoiner);
@@ -136,7 +147,16 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
return ~((1 << lane_count) - 1) & 0xf;
}
+bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config);
u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+int intel_dp_bw_fec_overhead(bool fec_enabled);
+
+bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config);
+
u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp);
void intel_ddi_update_pipe(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 4431b6290c4c..2e2af71bcd5a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -74,7 +74,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (index)
return 0;
@@ -83,12 +83,12 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(i915)->rawclk_freq, 2000);
}
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 freq;
@@ -101,18 +101,18 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- freq = dev_priv->display.cdclk.hw.cdclk;
+ freq = i915->display.cdclk.hw.cdclk;
else
- freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ freq = RUNTIME_INFO(i915)->rawclk_freq;
return DIV_ROUND_CLOSEST(freq, 2000);
}
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -165,12 +165,11 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 aux_clock_divider)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 timeout;
/* Max timeout value on G4x-BDW: 1.6ms */
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(i915))
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -229,8 +228,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
@@ -531,9 +529,40 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
+static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return VLV_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return VLV_DP_AUX_CH_CTL(AUX_CH_B);
+ }
+}
+
+static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return VLV_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return VLV_DP_AUX_CH_DATA(AUX_CH_B, index);
+ }
+}
+
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -550,7 +579,6 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -567,7 +595,6 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -586,7 +613,6 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -605,7 +631,6 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -625,7 +650,6 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -645,7 +669,6 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -668,7 +691,6 @@ static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -691,7 +713,7 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -702,16 +724,16 @@ static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch);
+ return XELPDP_DP_AUX_CH_CTL(i915, aux_ch);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_CTL(dev_priv, AUX_CH_A);
+ return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A);
}
}
static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -722,10 +744,10 @@ static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_DATA(dev_priv, aux_ch, index);
+ return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_DATA(dev_priv, AUX_CH_A, index);
+ return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index);
}
}
@@ -739,49 +761,52 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
void intel_dp_aux_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
char buf[AUX_CH_NAME_BUFSIZE];
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(i915) >= 14) {
intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(i915) >= 12) {
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (DISPLAY_VER(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(i915) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(i915)) {
intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
} else {
intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
}
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(i915) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(i915))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(i915) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- intel_dp->aux.drm_dev = &dev_priv->drm;
+ intel_dp->aux.drm_dev = &i915->drm;
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
- aux_ch_name(dev_priv, buf, sizeof(buf), aux_ch),
+ aux_ch_name(i915, buf, sizeof(buf), aux_ch),
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 26ea7e9f1b89..4f58efdc688a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -146,7 +146,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
* HDR static metadata we need to start maintaining table of
* ranges for such panels.
*/
- if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
+ if (i915->display.params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(&i915->drm,
@@ -489,7 +489,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
/* Check the VBT and user's module parameters to figure out which
* interfaces to probe
*/
- switch (i915->params.enable_dpcd_backlight) {
+ switch (i915->display.params.enable_dpcd_backlight) {
case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
case INTEL_DP_AUX_BACKLIGHT_AUTO:
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
index 34f6e0a48ed2..e642445364d2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
@@ -21,13 +21,14 @@
#define __xe2lpd_aux_ch_idx(aux_ch) \
(aux_ch >= AUX_CH_USBC1 ? aux_ch : AUX_CH_USBC4 + 1 + (aux_ch) - AUX_CH_A)
-/* TODO: Remove implicit dev_priv */
-#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
-#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
+#define _DPA_AUX_CH_CTL 0x64010
+#define _DPB_AUX_CH_CTL 0x64110
#define _XELPDP_USBC1_AUX_CH_CTL 0x16f210
#define _XELPDP_USBC2_AUX_CH_CTL 0x16f410
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, \
_DPB_AUX_CH_CTL)
+#define VLV_DP_AUX_CH_CTL(aux_ch) _MMIO(VLV_DISPLAY_BASE + \
+ _PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL))
#define _XELPDP_DP_AUX_CH_CTL(aux_ch) \
_MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \
_DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL, \
@@ -69,13 +70,14 @@
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK REG_GENMASK(4, 0) /* skl+ */
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) REG_FIELD_PREP(DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK, (c) - 1)
-/* TODO: Remove implicit dev_priv */
-#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
-#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
+#define _DPA_AUX_CH_DATA1 0x64014
+#define _DPB_AUX_CH_DATA1 0x64114
#define _XELPDP_USBC1_AUX_CH_DATA1 0x16f214
#define _XELPDP_USBC2_AUX_CH_DATA1 0x16f414
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, \
_DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define VLV_DP_AUX_CH_DATA(aux_ch, i) _MMIO(VLV_DISPLAY_BASE + _PORT(aux_ch, _DPA_AUX_CH_DATA1, \
+ _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
#define _XELPDP_DP_AUX_CH_DATA(aux_ch, i) \
_MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \
_DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1, \
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 7b4628f4f124..63364c9602ef 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
@@ -43,6 +44,9 @@
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
+#include "intel_link_bw.h"
+#include "intel_psr.h"
+#include "intel_vdsc.h"
#include "skl_scaler.h"
static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp,
@@ -66,6 +70,73 @@ static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp
return 0;
}
+static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
+ const struct intel_connector *connector,
+ bool ssc, bool dsc, int bpp_x16)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
+ int dsc_slice_count = 0;
+ int overhead;
+
+ flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
+ flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
+ flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
+
+ if (dsc) {
+ flags |= DRM_DP_BW_OVERHEAD_DSC;
+ /* TODO: add support for bigjoiner */
+ dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
+ adjusted_mode->clock,
+ adjusted_mode->hdisplay,
+ false);
+ }
+
+ overhead = drm_dp_bw_overhead(crtc_state->lane_count,
+ adjusted_mode->hdisplay,
+ dsc_slice_count,
+ bpp_x16,
+ flags);
+
+ /*
+ * TODO: clarify whether a minimum required by the fixed FEC overhead
+ * in the bspec audio programming sequence is required here.
+ */
+ return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable));
+}
+
+static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_connector *connector,
+ int overhead,
+ int bpp_x16,
+ struct intel_link_m_n *m_n)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */
+ intel_link_compute_m_n(bpp_x16, crtc_state->lane_count,
+ adjusted_mode->crtc_clock,
+ crtc_state->port_clock,
+ overhead,
+ m_n);
+
+ m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n);
+}
+
+static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
+{
+ int effective_data_rate =
+ intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead);
+
+ /*
+ * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted
+ * to calculate PBN with the BW overhead passed to it.
+ */
+ return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
+}
+
static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int max_bpp,
@@ -94,20 +165,67 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
crtc_state->lane_count = limits->max_lane_count;
crtc_state->port_clock = limits->max_rate;
+ if (dsc) {
+ if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
+ return -EINVAL;
+
+ crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
+ }
+
mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
crtc_state->port_clock,
crtc_state->lane_count);
+ drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
+ min_bpp, max_bpp);
+
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
+ int local_bw_overhead;
+ int remote_bw_overhead;
+ int link_bpp_x16;
+ int remote_tu;
+
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc);
if (ret)
continue;
- crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
- dsc ? bpp << 4 : bpp,
- dsc);
+ link_bpp_x16 = to_bpp_x16(dsc ? bpp :
+ intel_dp_output_bpp(crtc_state->output_format, bpp));
+
+ local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
+ false, dsc, link_bpp_x16);
+ remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
+ true, dsc, link_bpp_x16);
+
+ intel_dp_mst_compute_m_n(crtc_state, connector,
+ local_bw_overhead,
+ link_bpp_x16,
+ &crtc_state->dp_m_n);
+
+ /*
+ * The TU size programmed to the HW determines which slots in
+ * an MTP frame are used for this stream, which needs to match
+ * the payload size programmed to the first downstream branch
+ * device's payload table.
+ *
+ * Note that atm the payload's PBN value DRM core sends via
+ * the ALLOCATE_PAYLOAD side-band message matches the payload
+ * size (which it calculates from the PBN value) it programs
+ * to the first branch device's payload table. The allocation
+ * in the payload table could be reduced though (to
+ * crtc_state->dp_m_n.tu), provided that the driver doesn't
+ * enable SSC on the corresponding link.
+ */
+ crtc_state->pbn = intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
+ link_bpp_x16,
+ remote_bw_overhead);
+
+ remote_tu = DIV_ROUND_UP(dfixed_const(crtc_state->pbn), mst_state->pbn_div.full);
+
+ drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
+ crtc_state->dp_m_n.tu = remote_tu;
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
connector->port,
@@ -116,13 +234,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
return slots;
if (slots >= 0) {
- ret = drm_dp_mst_atomic_check(state);
- /*
- * If we got slots >= 0 and we can fit those based on check
- * then we can exit the loop. Otherwise keep trying.
- */
- if (!ret)
- break;
+ drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
+
+ break;
}
}
@@ -137,7 +251,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
if (!dsc)
crtc_state->pipe_bpp = bpp;
else
- crtc_state->dsc.compressed_bpp = bpp;
+ crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(bpp);
drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
}
@@ -149,10 +263,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
int slots = -EINVAL;
- int link_bpp;
/*
* FIXME: allocate the BW according to link_bpp, which in the case of
@@ -167,16 +278,6 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
if (slots < 0)
return slots;
- link_bpp = intel_dp_output_bpp(crtc_state->output_format, crtc_state->pipe_bpp);
-
- intel_link_compute_m_n(link_bpp,
- crtc_state->lane_count,
- adjusted_mode->crtc_clock,
- crtc_state->port_clock,
- &crtc_state->dp_m_n,
- crtc_state->fec_enable);
- crtc_state->dp_m_n.tu = slots;
-
return 0;
}
@@ -188,15 +289,12 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
int slots = -EINVAL;
int i, num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
u8 dsc_max_bpc;
- bool need_timeslot_recalc = false;
- u32 last_compressed_bpp;
+ int min_compressed_bpp, max_compressed_bpp;
/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
if (DISPLAY_VER(i915) >= 12)
@@ -232,45 +330,31 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
if (max_bpp > sink_max_bpp)
max_bpp = sink_max_bpp;
- min_bpp = max(min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
- max_bpp = min(max_bpp, to_bpp_int(limits->link.max_bpp_x16));
-
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_bpp,
- min_bpp, limits,
- conn_state, 2 * 3, true);
+ max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ crtc_state,
+ max_bpp / 3);
+ max_compressed_bpp = min(max_compressed_bpp,
+ to_bpp_int(limits->link.max_bpp_x16));
- if (slots < 0)
- return slots;
+ min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
+ min_compressed_bpp = max(min_compressed_bpp,
+ to_bpp_int_roundup(limits->link.min_bpp_x16));
- last_compressed_bpp = crtc_state->dsc.compressed_bpp;
+ drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
+ min_compressed_bpp, max_compressed_bpp);
- crtc_state->dsc.compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915,
- last_compressed_bpp,
- crtc_state->pipe_bpp);
+ /* Align compressed bpps according to our own constraints */
+ max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp,
+ crtc_state->pipe_bpp);
+ min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp,
+ crtc_state->pipe_bpp);
- if (crtc_state->dsc.compressed_bpp != last_compressed_bpp)
- need_timeslot_recalc = true;
+ slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp,
+ min_compressed_bpp, limits,
+ conn_state, 1, true);
- /*
- * Apparently some MST hubs dislike if vcpi slots are not matching precisely
- * the actual compressed bpp we use.
- */
- if (need_timeslot_recalc) {
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
- crtc_state->dsc.compressed_bpp,
- crtc_state->dsc.compressed_bpp,
- limits, conn_state, 2 * 3, true);
- if (slots < 0)
- return slots;
- }
-
- intel_link_compute_m_n(crtc_state->dsc.compressed_bpp,
- crtc_state->lane_count,
- adjusted_mode->crtc_clock,
- crtc_state->port_clock,
- &crtc_state->dp_m_n,
- crtc_state->fec_enable);
- crtc_state->dp_m_n.tu = slots;
+ if (slots < 0)
+ return slots;
return 0;
}
@@ -298,7 +382,102 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
}
static bool
+intel_dp_mst_dsc_source_support(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /*
+ * FIXME: Enabling DSC on ICL results in blank screen and FIFO pipe /
+ * transcoder underruns, re-enable DSC after fixing this issue.
+ */
+ return DISPLAY_VER(i915) >= 12 && intel_dsc_source_support(crtc_state);
+}
+
+static int mode_hblank_period_ns(const struct drm_display_mode *mode)
+{
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay,
+ NSEC_PER_SEC / 1000),
+ mode->crtc_clock);
+}
+
+static bool
+hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!connector->dp.dsc_hblank_expansion_quirk)
+ return false;
+
+ if (mode_hblank_period_ns(adjusted_mode) > 300)
+ return false;
+
+ return true;
+}
+
+static bool
+adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state,
+ struct link_config_limits *limits,
+ bool dsc)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int min_bpp_x16 = limits->link.min_bpp_x16;
+
+ if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state))
+ return true;
+
+ if (!dsc) {
+ if (intel_dp_mst_dsc_source_support(crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name);
+ return false;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name);
+
+ if (limits->link.max_bpp_x16 < to_bpp_x16(24))
+ return false;
+
+ limits->link.min_bpp_x16 = to_bpp_x16(24);
+
+ return true;
+ }
+
+ drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
+
+ if (limits->max_rate < 540000)
+ min_bpp_x16 = to_bpp_x16(13);
+ else if (limits->max_rate < 810000)
+ min_bpp_x16 = to_bpp_x16(10);
+
+ if (limits->link.min_bpp_x16 >= min_bpp_x16)
+ return true;
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " BPP_X16_FMT " in DSC mode due to hblank expansion quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name,
+ BPP_X16_ARGS(min_bpp_x16));
+
+ if (limits->link.max_bpp_x16 < min_bpp_x16)
+ return false;
+
+ limits->link.min_bpp_x16 = min_bpp_x16;
+
+ return true;
+}
+
+static bool
intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
@@ -326,10 +505,16 @@ intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
- return intel_dp_compute_config_link_bpp_limits(intel_dp,
- crtc_state,
- dsc,
- limits);
+ if (!intel_dp_compute_config_link_bpp_limits(intel_dp,
+ crtc_state,
+ dsc,
+ limits))
+ return false;
+
+ return adjust_limits_for_dsc_hblank_expansion_quirk(connector,
+ crtc_state,
+ limits,
+ dsc);
}
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
@@ -339,12 +524,18 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ const struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
bool dsc_needed;
int ret = 0;
+ if (pipe_config->fec_enable &&
+ !intel_dp_supports_fec(intel_dp, connector, pipe_config))
+ return -EINVAL;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -354,6 +545,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
dsc_needed = intel_dp->force_dsc_en ||
!intel_dp_mst_compute_config_limits(intel_dp,
+ connector,
pipe_config,
false,
&limits);
@@ -375,7 +567,11 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
str_yes_no(ret),
str_yes_no(intel_dp->force_dsc_en));
+ if (!intel_dp_mst_dsc_source_support(pipe_config))
+ return -EINVAL;
+
if (!intel_dp_mst_compute_config_limits(intel_dp,
+ connector,
pipe_config,
true,
&limits))
@@ -420,6 +616,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ intel_psr_compute_config(intel_dp, pipe_config, conn_state);
+
return 0;
}
@@ -459,6 +657,130 @@ intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
return transcoders;
}
+static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mst_mgr,
+ struct drm_dp_mst_port *parent_port)
+{
+ const struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ u8 mask = 0;
+ int i;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ if (!conn_state->base.crtc)
+ continue;
+
+ if (&connector->mst_port->mst_mgr != mst_mgr)
+ continue;
+
+ if (connector->port != parent_port &&
+ !drm_dp_mst_port_downstream_of_parent(mst_mgr,
+ connector->port,
+ parent_port))
+ continue;
+
+ mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe);
+ }
+
+ return mask;
+}
+
+static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mst_mgr,
+ struct intel_link_bw_limits *limits)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ u8 mst_pipe_mask;
+ u8 fec_pipe_mask = 0;
+ int ret;
+
+ mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) {
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /* Atomic connector check should've added all the MST CRTCs. */
+ if (drm_WARN_ON(&i915->drm, !crtc_state))
+ return -EINVAL;
+
+ if (crtc_state->fec_enable)
+ fec_pipe_mask |= BIT(crtc->pipe);
+ }
+
+ if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask)
+ return 0;
+
+ limits->force_fec_pipes |= mst_pipe_mask;
+
+ ret = intel_modeset_pipes_in_mask_early(state, "MST FEC",
+ mst_pipe_mask);
+
+ return ret ? : -EAGAIN;
+}
+
+static int intel_dp_mst_check_bw(struct intel_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mst_mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct intel_link_bw_limits *limits)
+{
+ struct drm_dp_mst_port *mst_port;
+ u8 mst_port_pipes;
+ int ret;
+
+ ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port);
+ if (ret != -ENOSPC)
+ return ret;
+
+ mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port);
+
+ ret = intel_link_bw_reduce_bpp(state, limits,
+ mst_port_pipes, "MST link BW");
+
+ return ret ? : -EAGAIN;
+}
+
+/**
+ * intel_dp_mst_atomic_check_link - check all modeset MST link configuration
+ * @state: intel atomic state
+ * @limits: link BW limits
+ *
+ * Check the link configuration for all modeset MST outputs. If the
+ * configuration is invalid @limits will be updated if possible to
+ * reduce the total BW, after which the configuration for all CRTCs in
+ * @state must be recomputed with the updated @limits.
+ *
+ * Returns:
+ * - 0 if the confugration is valid
+ * - %-EAGAIN, if the configuration is invalid and @limits got updated
+ * with fallback values with which the configuration of all CRTCs in
+ * @state must be recomputed
+ * - Other negative error, if the configuration is invalid without a
+ * fallback possibility, or the check failed for another reason
+ */
+int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits)
+{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ int ret;
+ int i;
+
+ for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
+ ret = intel_dp_mst_check_fec_change(state, mgr, limits);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_mst_check_bw(state, mgr, mst_state,
+ limits);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -479,19 +801,23 @@ static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
* that shares the same MST stream as mode changed,
* intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
* a fastset when possible.
+ *
+ * On TGL+ this is required since each stream go through a master transcoder,
+ * so if the master transcoder needs modeset, all other streams in the
+ * topology need a modeset. All platforms need to add the atomic state
+ * for all streams in the topology, since a modeset on one may require
+ * changing the MST link BW usage of the others, which in turn needs a
+ * recomputation of the corresponding CRTC states.
*/
static int
-intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
- struct intel_atomic_state *state)
+intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
+ struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector_iter;
int ret = 0;
- if (DISPLAY_VER(dev_priv) < 12)
- return 0;
-
if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
@@ -545,7 +871,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
- ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state);
+ ret = intel_dp_mst_atomic_topology_check(intel_connector, state);
if (ret)
return ret;
@@ -587,10 +913,6 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- struct drm_dp_mst_topology_state *new_mst_state =
- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
- struct drm_dp_mst_atomic_payload *new_payload =
- drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_dbg_kms(&i915->drm, "active links %d\n",
@@ -598,9 +920,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_hdcp_disable(intel_mst->connector);
- drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
-
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+ intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
}
static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
@@ -634,6 +954,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
+ drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
+
clear_act_sent(encoder, old_crtc_state);
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
@@ -646,6 +968,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_dsc_disable(old_crtc_state);
+
if (DISPLAY_VER(dev_priv) >= 9)
skl_scaler_disable(old_crtc_state);
else
@@ -662,9 +986,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* BSpec 4287: disable DIP after the transcoder is disabled and before
* the transcoder clock select is set to none.
*/
- if (last_mst_stream)
- intel_dp_set_infoframes(&dig_port->base, false,
- old_crtc_state, NULL);
+ intel_dp_set_infoframes(&dig_port->base, false,
+ old_crtc_state, NULL);
/*
* From TGL spec: "If multi-stream slave transcoder: Configure
* Transcoder Clock Select to direct no clock to the transcoder"
@@ -754,6 +1077,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
+ intel_dp_sink_enable_decompression(state, connector, pipe_config);
+
if (first_mst_stream)
dig_port->base.pre_enable(state, &dig_port->base,
pipe_config, NULL);
@@ -776,6 +1101,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
intel_ddi_enable_transcoder_clock(encoder, pipe_config);
+ intel_dsc_dp_pps_write(&dig_port->base, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
@@ -792,11 +1118,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
+ bool first_mst_stream = intel_dp->active_mst_links == 1;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
- clear_act_sent(encoder, pipe_config);
-
if (intel_dp_is_uhbr(pipe_config)) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
@@ -810,6 +1135,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, pipe_config);
+ clear_act_sent(encoder, pipe_config);
+
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
@@ -818,15 +1145,16 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
wait_for_act_sent(encoder, pipe_config);
+ if (first_mst_stream)
+ intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
+
drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
- if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable)
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0,
- FECSTALL_DIS_DPTSTREAM_DPTTG);
- else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
- FECSTALL_DIS_DPTSTREAM_DPTTG);
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
+ FECSTALL_DIS_DPTSTREAM_DPTTG,
+ pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
intel_audio_sdp_split_update(pipe_config);
@@ -834,12 +1162,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_crtc_vblank_on(pipe_config);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
-
- /* Enable hdcp if it's desired */
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(state, encoder, pipe_config, conn_state);
+ intel_hdcp_enable(state, encoder, pipe_config, conn_state);
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -974,8 +1297,20 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (ret)
return ret;
+ /*
+ * TODO:
+ * - Also check if compression would allow for the mode
+ * - Calculate the overhead using drm_dp_bw_overhead() /
+ * drm_dp_bw_channel_coding_efficiency(), similarly to the
+ * compute config code, as drm_dp_calc_pbn_mode() doesn't
+ * account with all the overheads.
+ * - Check here and during compute config the BW reported by
+ * DFP_Link_Available_Payload_Bandwidth_Number (or the
+ * corresponding link capabilities of the sink) in case the
+ * stream is uncompressed for it by the last branch device.
+ */
if (mode_rate > max_rate || mode->clock > max_dotclk ||
- drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
+ drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
return 0;
}
@@ -1139,6 +1474,36 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector);
}
+static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_dp_desc desc;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ if (!connector->dp.dsc_decompression_aux)
+ return false;
+
+ if (drm_dp_read_desc(connector->dp.dsc_decompression_aux,
+ &desc, true) < 0)
+ return false;
+
+ if (!drm_dp_has_quirk(&desc,
+ DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
+ return false;
+
+ if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0)
+ return false;
+
+ if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
+ return false;
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
+ connector->base.base.id, connector->base.name);
+
+ return true;
+}
+
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
const char *pathprop)
@@ -1161,6 +1526,11 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_connector->port = port;
drm_dp_mst_get_port_malloc(port);
+ intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
+ intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
+ intel_connector->dp.dsc_hblank_expansion_quirk =
+ detect_dsc_hblank_expansion_quirk(intel_connector);
+
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
@@ -1172,14 +1542,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
- /*
- * TODO: set the AUX for the actual MST port decompressing the stream.
- * At the moment the driver only supports enabling this globally in the
- * first downstream MST branch, via intel_dp's (root port) AUX.
- */
- intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
- intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
-
for_each_pipe(dev_priv, pipe) {
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
@@ -1260,6 +1622,8 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
+ intel_encoder->audio_enable = intel_audio_codec_enable;
+ intel_encoder->audio_disable = intel_audio_codec_disable;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
intel_encoder->get_config = intel_dp_mst_enc_get_config;
intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
@@ -1407,3 +1771,91 @@ int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
return 0;
}
+
+static struct intel_connector *
+get_connector_in_state_for_crtc(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *_connector;
+ int i;
+
+ for_each_oldnew_connector_in_state(&state->base, _connector,
+ old_conn_state, new_conn_state, i) {
+ struct intel_connector *connector =
+ to_intel_connector(_connector);
+
+ if (old_conn_state->crtc == &crtc->base ||
+ new_conn_state->crtc == &crtc->base)
+ return connector;
+ }
+
+ return NULL;
+}
+
+/**
+ * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC
+ * @state: atomic state
+ * @crtc: CRTC for which to check the modeset requirement
+ *
+ * Check if any change in a MST topology requires a forced modeset on @crtc in
+ * this topology. One such change is enabling/disabling the DSC decompression
+ * state in the first branch device's UFP DPCD as required by one CRTC, while
+ * the other @crtc in the same topology is still active, requiring a full modeset
+ * on @crtc.
+ */
+bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_connector *crtc_connector;
+ const struct drm_connector_state *conn_state;
+ const struct drm_connector *_connector;
+ int i;
+
+ if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc),
+ INTEL_OUTPUT_DP_MST))
+ return false;
+
+ crtc_connector = get_connector_in_state_for_crtc(state, crtc);
+
+ if (!crtc_connector)
+ /* None of the connectors in the topology needs modeset */
+ return false;
+
+ for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
+ const struct intel_connector *connector =
+ to_intel_connector(_connector);
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc *crtc_iter;
+
+ if (connector->mst_port != crtc_connector->mst_port ||
+ !conn_state->crtc)
+ continue;
+
+ crtc_iter = to_intel_crtc(conn_state->crtc);
+
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter);
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter);
+
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (old_crtc_state->dsc.compression_enable ==
+ new_crtc_state->dsc.compression_enable)
+ continue;
+ /*
+ * Toggling the decompression flag because of this stream in
+ * the first downstream branch device's UFP DPCD may reset the
+ * whole branch device. To avoid the reset while other streams
+ * are also active modeset the whole MST topology in this
+ * case.
+ */
+ if (connector->dp.dsc_decompression_aux ==
+ &connector->mst_port->aux)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index f1815bb72267..8ca1d599091c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -13,6 +13,7 @@ struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
+struct intel_link_bw_limits;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
@@ -22,5 +23,9 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits);
+bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 62b93d097e44..4ca910874a4f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -666,6 +666,20 @@ enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
}
}
+enum dpio_phy vlv_pipe_to_phy(enum pipe pipe)
+{
+ switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ fallthrough;
+ case PIPE_A:
+ case PIPE_B:
+ return DPIO_PHY0;
+ case PIPE_C:
+ return DPIO_PHY1;
+ }
+}
+
enum dpio_channel vlv_pipe_to_channel(enum pipe pipe)
{
switch (pipe) {
@@ -689,50 +703,50 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 val;
int i;
vlv_dpio_get(dev_priv);
/* Clear calc init */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
}
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val);
}
/* Program swing deemph */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
}
/* Program swing margin */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
@@ -745,7 +759,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
}
/*
@@ -755,23 +769,23 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
* 27 for ch0 and ch1.
*/
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i));
if (uniq_trans_scale)
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
else
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val);
}
/* Start swing calculation */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
}
vlv_dpio_put(dev_priv);
@@ -782,43 +796,43 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum pipe pipe = crtc->pipe;
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 val;
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val);
}
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val);
}
}
@@ -829,6 +843,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(crtc_state->lane_count);
@@ -851,40 +866,40 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
/* program left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
}
/*
@@ -892,12 +907,12 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
if (pipe != PIPE_B)
val &= ~CHV_CMN_USEDCLKCHANNEL;
else
val |= CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
vlv_dpio_put(dev_priv);
}
@@ -910,21 +925,21 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
int data, i, stagger;
u32 val;
vlv_dpio_get(dev_priv);
/* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
@@ -934,7 +949,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
data = 0x0;
else
data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
}
@@ -950,17 +965,17 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
else
stagger = 0x2;
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
}
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -968,7 +983,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
DPIO_TX2_STAGGER_MULT(0));
if (crtc_state->lane_count > 2) {
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -998,19 +1013,20 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
}
vlv_dpio_put(dev_priv);
@@ -1036,22 +1052,22 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW2(port),
uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW3(port), 0x0C782040);
if (tx3_demph)
- vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
+ vlv_dpio_write(dev_priv, phy, VLV_TX3_DW4(port), tx3_demph);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
vlv_dpio_put(dev_priv);
}
@@ -1063,24 +1079,24 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
/* Program Tx lane resets to default */
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW14(port), 0x40400000);
vlv_dpio_put(dev_priv);
}
@@ -1094,23 +1110,24 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* Enable clock channels for this port */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8(port), val);
/* Program lane clock */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23(port), 0x00400888);
vlv_dpio_put(dev_priv);
}
@@ -1122,10 +1139,10 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 0x00e00060);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index 4d43dbbdf81c..9adc4e8c1738 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -44,6 +44,7 @@ u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port);
enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port);
+enum dpio_phy vlv_pipe_to_phy(enum pipe pipe);
enum dpio_channel vlv_pipe_to_channel(enum pipe pipe);
void chv_set_phy_signal_level(struct intel_encoder *encoder,
@@ -116,6 +117,10 @@ static inline enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_p
{
return DPIO_PHY0;
}
+static inline enum dpio_phy vlv_pipe_to_phy(enum pipe pipe)
+{
+ return DPIO_PHY0;
+}
static inline enum dpio_channel vlv_pipe_to_channel(enum pipe pipe)
{
return DPIO_CH0;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index d41c1dc9f66c..3038655377ea 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -16,6 +16,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_snps_phy.h"
@@ -311,7 +312,7 @@ static const struct intel_limit intel_limits_bxt = {
* divided-down version of it.
*/
/* m1 is reserved as 0 in Pineview, n is a ring counter */
-int pnv_calc_dpll_params(int refclk, struct dpll *clock)
+static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -342,7 +343,7 @@ int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
-int vlv_calc_dpll_params(int refclk, struct dpll *clock)
+static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2 * 5;
@@ -368,6 +369,176 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
+static int i9xx_pll_refclk(struct drm_device *dev,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+ if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+ return dev_priv->display.vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ return 120000;
+ else if (DISPLAY_VER(dev_priv) != 2)
+ return 96000;
+ else
+ return 48000;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+ u32 fp;
+ struct dpll clock;
+ int port_clock;
+ int refclk = i9xx_pll_refclk(dev, pipe_config);
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = pipe_config->dpll_hw_state.fp0;
+ else
+ fp = pipe_config->dpll_hw_state.fp1;
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ if (IS_PINEVIEW(dev_priv)) {
+ clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ } else {
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ }
+
+ if (DISPLAY_VER(dev_priv) != 2) {
+ if (IS_PINEVIEW(dev_priv))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+ else
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ switch (dpll & DPLL_MODE_MASK) {
+ case DPLLB_MODE_DAC_SERIAL:
+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+ 5 : 10;
+ break;
+ case DPLLB_MODE_LVDS:
+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+ 7 : 14;
+ break;
+ default:
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ return;
+ }
+
+ if (IS_PINEVIEW(dev_priv))
+ port_clock = pnv_calc_dpll_params(refclk, &clock);
+ else
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
+ } else {
+ enum pipe lvds_pipe;
+
+ if (IS_I85X(dev_priv) &&
+ intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ lvds_pipe == crtc->pipe) {
+ u32 lvds = intel_de_read(dev_priv, LVDS);
+
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ if (lvds & LVDS_CLKB_POWER_UP)
+ clock.p2 = 7;
+ else
+ clock.p2 = 14;
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+ }
+
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
+ }
+
+ /*
+ * This value includes pixel_multiplier. We will use
+ * port_clock to compute adjusted_mode.crtc_clock in the
+ * encoder's get_config() function.
+ */
+ pipe_config->port_clock = port_clock;
+}
+
+void vlv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ struct dpll clock;
+ u32 mdiv;
+ int refclk = 100000;
+
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ vlv_dpio_get(dev_priv);
+ mdiv = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(crtc->pipe));
+ vlv_dpio_put(dev_priv);
+
+ clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+ clock.m2 = mdiv & DPIO_M2DIV_MASK;
+ clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+ clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+ clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+ pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
+}
+
+void chv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum dpio_channel port = vlv_pipe_to_channel(crtc->pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ struct dpll clock;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
+ int refclk = 100000;
+
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ vlv_dpio_get(dev_priv);
+ cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(port));
+ pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(port));
+ pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(port));
+ pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(port));
+ pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
+ vlv_dpio_put(dev_priv);
+
+ clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+ clock.m2 = (pll_dw0 & 0xff) << 22;
+ if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+ clock.m2 |= pll_dw2 & 0x3fffff;
+ clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
+ clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
+ clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+
+ pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
+}
+
/*
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
@@ -1003,12 +1174,10 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
int ret;
ret = intel_cx0pll_calc_state(crtc_state, encoder);
@@ -1016,10 +1185,7 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
return ret;
/* TODO: Do the readback via intel_compute_shared_dplls() */
- if (intel_is_c10phy(i915, phy))
- crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
- else
- crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
@@ -1645,7 +1811,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
}
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+ enum dpio_phy phy)
{
u32 reg_val;
@@ -1653,30 +1819,31 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
reg_val &= 0x00ffffff;
reg_val |= 0x8c000000;
- vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
reg_val &= 0x00ffffff;
reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
}
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -1694,18 +1861,18 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, pipe);
+ vlv_pllb_recal_opamp(dev_priv, phy);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW8(pipe));
reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW8(pipe), reg_val);
/* Disable fast lock */
- vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
+ vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -1719,46 +1886,46 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
* Note: don't use the DAC post divider as it seems unstable.
*/
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
if (crtc_state->port_clock == 162000 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
0x009f0003);
else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
0x00d0000f);
if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df40000);
else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df70000);
else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
+ coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(pipe), coreclk);
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW11(pipe), 0x87871000);
vlv_dpio_put(dev_priv);
}
@@ -1809,6 +1976,7 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 loopfilter, tribuf_calcntr;
u32 bestm2, bestp1, bestp2, bestm2_frac;
u32 dpio_val;
@@ -1825,39 +1993,39 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
vlv_dpio_get(dev_priv);
/* p1 and p2 divider */
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(port),
5 << DPIO_CHV_S1_DIV_SHIFT |
bestp1 << DPIO_CHV_P1_DIV_SHIFT |
bestp2 << DPIO_CHV_P2_DIV_SHIFT |
1 << DPIO_CHV_K_DIV_SHIFT);
/* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(port), bestm2);
/* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(port),
DPIO_CHV_M1_DIV_BY_2 |
1 << DPIO_CHV_N_DIV_SHIFT);
/* M2 fraction division */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(port), bestm2_frac);
/* M2 fraction division enable */
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
if (bestm2_frac)
dpio_val |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(port), dpio_val);
/* Program digital lock detect threshold */
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+ dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(port));
dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
if (!bestm2_frac)
dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(port), dpio_val);
/* Loop filter */
if (vco == 5400000) {
@@ -1882,16 +2050,16 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
tribuf_calcntr = 0;
}
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(port), loopfilter);
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+ dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(port));
dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(port), dpio_val);
/* AFC Recal */
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
- vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port),
+ vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)) |
DPIO_AFC_RECAL);
vlv_dpio_put(dev_priv);
@@ -1903,14 +2071,15 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 tmp;
vlv_dpio_get(dev_priv);
/* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), tmp);
vlv_dpio_put(dev_priv);
@@ -2031,6 +2200,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
/* Make sure the pipe isn't still relying on us */
@@ -2047,9 +2217,9 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_dpio_get(dev_priv);
/* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), val);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index bbc30542f29f..ac01bb19cc6c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -20,8 +20,6 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-int vlv_calc_dpll_params(int refclk, struct dpll *clock);
-int pnv_calc_dpll_params(int refclk, struct dpll *clock);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
@@ -41,6 +39,13 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void vlv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void chv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+
void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 399653a20f98..7958d0bd851e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -219,6 +219,26 @@ intel_tc_pll_enable_reg(struct drm_i915_private *i915,
return MG_PLL_ENABLE(tc_port);
}
+static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ if (pll->info->power_domain)
+ pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
+
+ pll->info->funcs->enable(i915, pll);
+ pll->on = true;
+}
+
+static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ pll->info->funcs->disable(i915, pll);
+ pll->on = false;
+
+ if (pll->info->power_domain)
+ intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
+}
+
/**
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
* @crtc_state: CRTC, and its state, which has a shared DPLL
@@ -258,8 +278,8 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
drm_WARN_ON(&i915->drm, pll->on);
drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
- pll->info->funcs->enable(i915, pll);
- pll->on = true;
+
+ _intel_enable_shared_dpll(i915, pll);
out:
mutex_unlock(&i915->display.dpll.lock);
@@ -304,8 +324,8 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
goto out;
drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
- pll->info->funcs->disable(i915, pll);
- pll->on = false;
+
+ _intel_disable_shared_dpll(i915, pll);
out:
mutex_unlock(&i915->display.dpll.lock);
@@ -631,9 +651,9 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
};
static const struct dpll_info pch_plls[] = {
- { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
- { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
- { },
+ { .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
+ { .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
+ {}
};
static const struct intel_dpll_mgr pch_pll_mgr = {
@@ -1239,13 +1259,16 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
};
static const struct dpll_info hsw_plls[] = {
- { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
- { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
- { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
- { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
- { },
+ { .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
+ { .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
+ { .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
+ { .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
+ .flags = INTEL_DPLL_ALWAYS_ON, },
+ { .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
+ .flags = INTEL_DPLL_ALWAYS_ON, },
+ { .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
+ .flags = INTEL_DPLL_ALWAYS_ON, },
+ {}
};
static const struct intel_dpll_mgr hsw_pll_mgr = {
@@ -1921,11 +1944,12 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
};
static const struct dpll_info skl_plls[] = {
- { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
- { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
- { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
- { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
+ .flags = INTEL_DPLL_ALWAYS_ON, },
+ { .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
+ { .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
+ { .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
+ {}
};
static const struct intel_dpll_mgr skl_pll_mgr = {
@@ -2376,10 +2400,10 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
};
static const struct dpll_info bxt_plls[] = {
- { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
- { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
- { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
- { },
+ { .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
+ { .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
+ { .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
+ {}
};
static const struct intel_dpll_mgr bxt_pll_mgr = {
@@ -3834,18 +3858,6 @@ static void combo_pll_enable(struct drm_i915_private *i915,
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
-
- /*
- * We need to disable DC states when this DPLL is enabled.
- * This can be done by taking a reference on DPLL4 power
- * domain.
- */
- pll->wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_DC_OFF);
- }
-
icl_pll_power_enable(i915, pll, enable_reg);
icl_dpll_write(i915, pll);
@@ -3941,11 +3953,6 @@ static void combo_pll_disable(struct drm_i915_private *i915,
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
icl_pll_disable(i915, pll, enable_reg);
-
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
- pll->info->id == DPLL_ID_EHL_DPLL4)
- intel_display_power_put(i915, POWER_DOMAIN_DC_OFF,
- pll->wakeref);
}
static void tbt_pll_disable(struct drm_i915_private *i915,
@@ -4014,14 +4021,14 @@ static const struct intel_shared_dpll_funcs mg_pll_funcs = {
};
static const struct dpll_info icl_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
- { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
- { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
- { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
+ { .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
+ { .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
+ { .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
+ { .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
+ {}
};
static const struct intel_dpll_mgr icl_pll_mgr = {
@@ -4035,10 +4042,11 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
};
static const struct dpll_info ehl_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
+ .power_domain = POWER_DOMAIN_DC_OFF, },
+ {}
};
static const struct intel_dpll_mgr ehl_pll_mgr = {
@@ -4058,16 +4066,16 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
};
static const struct dpll_info tgl_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
- { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
- { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
- { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
- { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
- { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
+ { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
+ { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
+ { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
+ { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
+ { .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
+ { .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
+ {}
};
static const struct intel_dpll_mgr tgl_pll_mgr = {
@@ -4081,10 +4089,10 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
};
static const struct dpll_info rkl_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
+ {}
};
static const struct intel_dpll_mgr rkl_pll_mgr = {
@@ -4097,11 +4105,11 @@ static const struct intel_dpll_mgr rkl_pll_mgr = {
};
static const struct dpll_info dg1_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
- { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
- { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
+ { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
+ { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
+ {}
};
static const struct intel_dpll_mgr dg1_pll_mgr = {
@@ -4114,11 +4122,11 @@ static const struct intel_dpll_mgr dg1_pll_mgr = {
};
static const struct dpll_info adls_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
- { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
+ { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
+ {}
};
static const struct intel_dpll_mgr adls_pll_mgr = {
@@ -4131,14 +4139,14 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
};
static const struct dpll_info adlp_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
- { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
- { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
- { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
+ { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
+ { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
+ { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
+ { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
+ {}
};
static const struct intel_dpll_mgr adlp_pll_mgr = {
@@ -4365,12 +4373,8 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
- pll->on &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
- pll->wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_DC_OFF);
- }
+ if (pll->on && pll->info->power_domain)
+ pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
pll->state.pipe_mask = 0;
for_each_intel_crtc(&i915->drm, crtc) {
@@ -4417,8 +4421,7 @@ static void sanitize_dpll_state(struct drm_i915_private *i915,
"%s enabled but not in use, disabling\n",
pll->info->name);
- pll->info->funcs->disable(i915, pll);
- pll->on = false;
+ _intel_disable_shared_dpll(i915, pll);
}
void intel_dpll_sanitize_state(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index dd4796a61751..2e7ea0d8d3ff 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -27,6 +27,7 @@
#include <linux/types.h>
+#include "intel_display_power.h"
#include "intel_wakeref.h"
#define for_each_shared_dpll(__i915, __pll, __i) \
@@ -270,6 +271,11 @@ struct dpll_info {
*/
enum intel_dpll_id id;
+ /**
+ * @power_domain: extra power domain required by the DPLL
+ */
+ enum intel_display_power_domain power_domain;
+
#define INTEL_DPLL_ALWAYS_ON (1 << 0)
/**
* @flags:
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 48582b31b7f7..b29bceff73f2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,8 +9,6 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
-#include "i915_reg.h"
-#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -318,25 +316,3 @@ void intel_dpt_destroy(struct i915_address_space *vm)
i915_vm_put(&dpt->vm);
}
-void intel_dpt_configure(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(i915) == 14) {
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
- continue;
-
- intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
- PLANE_CHICKEN_DISABLE_DPT,
- i915->params.enable_dpt ? 0 : PLANE_CHICKEN_DISABLE_DPT);
- }
- } else if (DISPLAY_VER(i915) == 13) {
- intel_de_rmw(i915, CHICKEN_MISC_2,
- CHICKEN_MISC_DISABLE_DPT,
- i915->params.enable_dpt ? 0 : CHICKEN_MISC_DISABLE_DPT);
- }
-}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index d9a166550185..e18a9f767b11 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -10,7 +10,6 @@ struct drm_i915_private;
struct i915_address_space;
struct i915_vma;
-struct intel_crtc;
struct intel_framebuffer;
void intel_dpt_destroy(struct i915_address_space *vm);
@@ -20,6 +19,5 @@ void intel_dpt_suspend(struct drm_i915_private *i915);
void intel_dpt_resume(struct drm_i915_private *i915);
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb);
-void intel_dpt_configure(struct intel_crtc *crtc);
#endif /* __INTEL_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
new file mode 100644
index 000000000000..cdba47165c04
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dpt_common.h"
+
+void intel_dpt_configure(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) == 14) {
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
+ PLANE_CHICKEN_DISABLE_DPT,
+ i915->display.params.enable_dpt ? 0 :
+ PLANE_CHICKEN_DISABLE_DPT);
+ }
+ } else if (DISPLAY_VER(i915) == 13) {
+ intel_de_rmw(i915, CHICKEN_MISC_2,
+ CHICKEN_MISC_DISABLE_DPT,
+ i915->display.params.enable_dpt ? 0 :
+ CHICKEN_MISC_DISABLE_DPT);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.h b/drivers/gpu/drm/i915/display/intel_dpt_common.h
new file mode 100644
index 000000000000..6d7de405126a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DPT_COMMON_H__
+#define __INTEL_DPT_COMMON_H__
+
+struct intel_crtc;
+
+void intel_dpt_configure(struct intel_crtc *crtc);
+
+#endif /* __INTEL_DPT_COMMON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 78b6fe24dcd8..9598d50f68f2 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,9 +4,6 @@
*
*/
-#include "gem/i915_gem_internal.h"
-#include "gem/i915_gem_lmem.h"
-
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -14,12 +11,13 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
+#include "intel_dsb_buffer.h"
#include "intel_dsb_regs.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
#include "skl_watermark.h"
-struct i915_vma;
+#define CACHELINE_BYTES 64
enum dsb_id {
INVALID_DSB = -1,
@@ -32,8 +30,7 @@ enum dsb_id {
struct intel_dsb {
enum dsb_id id;
- u32 *cmd_buf;
- struct i915_vma *vma;
+ struct intel_dsb_buffer dsb_buf;
struct intel_crtc *crtc;
/*
@@ -109,15 +106,17 @@ static void intel_dsb_dump(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- const u32 *buf = dsb->cmd_buf;
int i;
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] DSB %d commands {\n",
crtc->base.base.id, crtc->base.name, dsb->id);
for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
drm_dbg_kms(&i915->drm,
- " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i * 4, buf[i], buf[i+1], buf[i+2], buf[i+3]);
+ " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
+ intel_dsb_buffer_read(&dsb->dsb_buf, i),
+ intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
+ intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
+ intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
drm_dbg_kms(&i915->drm, "}\n");
}
@@ -129,8 +128,6 @@ static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
{
- u32 *buf = dsb->cmd_buf;
-
if (!assert_dsb_has_room(dsb))
return;
@@ -139,14 +136,13 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
dsb->ins_start_offset = dsb->free_pos;
- buf[dsb->free_pos++] = ldw;
- buf[dsb->free_pos++] = udw;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw);
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw);
}
static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
u32 opcode, i915_reg_t reg)
{
- const u32 *buf = dsb->cmd_buf;
u32 prev_opcode, prev_reg;
/*
@@ -157,8 +153,10 @@ static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
if (dsb->free_pos == 0)
return false;
- prev_opcode = buf[dsb->ins_start_offset + 1] & ~DSB_REG_VALUE_MASK;
- prev_reg = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
+ prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf,
+ dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK;
+ prev_reg = intel_dsb_buffer_read(&dsb->dsb_buf,
+ dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK;
return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
}
@@ -191,6 +189,8 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val)
{
+ u32 old_val;
+
/*
* For example the buffer will look like below for 3 dwords for auto
* increment register:
@@ -214,31 +214,32 @@ void intel_dsb_reg_write(struct intel_dsb *dsb,
(DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
i915_mmio_reg_offset(reg));
} else {
- u32 *buf = dsb->cmd_buf;
-
if (!assert_dsb_has_room(dsb))
return;
/* convert to indexed write? */
if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
- u32 prev_val = buf[dsb->ins_start_offset + 0];
+ u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf,
+ dsb->ins_start_offset + 0);
- buf[dsb->ins_start_offset + 0] = 1; /* count */
- buf[dsb->ins_start_offset + 1] =
- (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
- i915_mmio_reg_offset(reg);
- buf[dsb->ins_start_offset + 2] = prev_val;
+ intel_dsb_buffer_write(&dsb->dsb_buf,
+ dsb->ins_start_offset + 0, 1); /* count */
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1,
+ (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg));
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val);
dsb->free_pos++;
}
- buf[dsb->free_pos++] = val;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
/* Update the count */
- buf[dsb->ins_start_offset]++;
+ old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset);
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1);
/* if number of data words is odd, then the last dword should be 0.*/
if (dsb->free_pos & 0x1)
- buf[dsb->free_pos] = 0;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
}
}
@@ -297,8 +298,8 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
if (aligned_tail > tail)
- memset(&dsb->cmd_buf[dsb->free_pos], 0,
- aligned_tail - tail);
+ intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
+ aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
}
@@ -317,7 +318,7 @@ void intel_dsb_finish(struct intel_dsb *dsb)
intel_dsb_align_tail(dsb);
- i915_gem_object_flush_map(dsb->vma->obj);
+ intel_dsb_buffer_flush_map(&dsb->dsb_buf);
}
static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
@@ -361,7 +362,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
ctrl | DSB_ENABLE);
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
- i915_ggtt_offset(dsb->vma));
+ intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
if (dewake_scanline >= 0) {
int diff, hw_dewake_scanline;
@@ -383,7 +384,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
}
intel_de_write_fw(dev_priv, DSB_TAIL(pipe, dsb->id),
- i915_ggtt_offset(dsb->vma) + tail);
+ intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
}
/**
@@ -408,7 +409,7 @@ void intel_dsb_wait(struct intel_dsb *dsb)
enum pipe pipe = crtc->pipe;
if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
- u32 offset = i915_ggtt_offset(dsb->vma);
+ u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
DSB_ENABLE | DSB_HALT);
@@ -445,12 +446,9 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
struct intel_dsb *dsb;
- struct i915_vma *vma;
unsigned int size;
- u32 *buf;
if (!HAS_DSB(i915))
return NULL;
@@ -464,37 +462,13 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
- if (HAS_LMEM(i915)) {
- obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size),
- I915_BO_ALLOC_CONTIGUOUS);
- if (IS_ERR(obj))
- goto out_put_rpm;
- } else {
- obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
- if (IS_ERR(obj))
- goto out_put_rpm;
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
- }
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- goto out_put_rpm;
- }
-
- buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
- if (IS_ERR(buf)) {
- i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+ if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
goto out_put_rpm;
- }
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
dsb->id = DSB1;
- dsb->vma = vma;
dsb->crtc = crtc;
- dsb->cmd_buf = buf;
dsb->size = size / 4; /* in dwords */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
@@ -522,6 +496,6 @@ out:
*/
void intel_dsb_cleanup(struct intel_dsb *dsb)
{
- i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
+ intel_dsb_buffer_cleanup(&dsb->dsb_buf);
kfree(dsb);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.c b/drivers/gpu/drm/i915/display/intel_dsb_buffer.c
new file mode 100644
index 000000000000..c77d48bda26a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb_buffer.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023, Intel Corporation.
+ */
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_display_types.h"
+#include "intel_dsb_buffer.h"
+
+u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
+{
+ return i915_ggtt_offset(dsb_buf->vma);
+}
+
+void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
+{
+ dsb_buf->cmd_buf[idx] = val;
+}
+
+u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
+{
+ return dsb_buf->cmd_buf[idx];
+}
+
+void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
+{
+ WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
+
+ memset(&dsb_buf->cmd_buf[idx], val, size);
+}
+
+bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *buf;
+
+ if (HAS_LMEM(i915)) {
+ obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size),
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return false;
+ } else {
+ obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ return false;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return false;
+ }
+
+ buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
+ if (IS_ERR(buf)) {
+ i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+ return false;
+ }
+
+ dsb_buf->vma = vma;
+ dsb_buf->cmd_buf = buf;
+ dsb_buf->buf_size = size;
+
+ return true;
+}
+
+void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
+{
+ i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP);
+}
+
+void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
+{
+ i915_gem_object_flush_map(dsb_buf->vma->obj);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.h b/drivers/gpu/drm/i915/display/intel_dsb_buffer.h
new file mode 100644
index 000000000000..425acd393905
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb_buffer.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_DSB_BUFFER_H
+#define _INTEL_DSB_BUFFER_H
+
+#include <linux/types.h>
+
+struct intel_crtc;
+struct i915_vma;
+
+struct intel_dsb_buffer {
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+ size_t buf_size;
+};
+
+u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf);
+void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val);
+u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx);
+void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size);
+bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf,
+ size_t size);
+void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf);
+void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 24b2cbcfc1ef..275d0218394c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -55,43 +55,6 @@
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
#define MIPI_PORT_SHIFT 3
-/* base offsets for gpio pads */
-#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
-#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
-#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
-#define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140
-#define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150
-#define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160
-#define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180
-#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190
-#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170
-#define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100
-#define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0
-#define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0
-
-#define VLV_GPIO_PCONF0(base_offset) (base_offset)
-#define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8)
-
-struct gpio_map {
- u16 base_offset;
- bool init;
-};
-
-static struct gpio_map vlv_gpio_table[] = {
- { VLV_GPIO_NC_0_HV_DDI0_HPD },
- { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
- { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
- { VLV_GPIO_NC_3_PANEL0_VDDEN },
- { VLV_GPIO_NC_4_PANEL0_BKLTEN },
- { VLV_GPIO_NC_5_PANEL0_BKLTCTL },
- { VLV_GPIO_NC_6_HV_DDI1_HPD },
- { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
- { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
- { VLV_GPIO_NC_9_PANEL1_VDDEN },
- { VLV_GPIO_NC_10_PANEL1_BKLTEN },
- { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
-};
-
struct i2c_adapter_lookup {
u16 slave_addr;
struct intel_dsi *intel_dsi;
@@ -103,19 +66,6 @@ struct i2c_adapter_lookup {
#define CHV_GPIO_IDX_START_SW 100
#define CHV_GPIO_IDX_START_SE 198
-#define CHV_VBT_MAX_PINS_PER_FMLY 15
-
-#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
-#define CHV_GPIO_GPIOEN (1 << 15)
-#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
-#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
-#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
-#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
-#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
-
-#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
-#define CHV_GPIO_CFGLOCK (1 << 31)
-
/* ICL DSI Display GPIO Pins */
#define ICL_GPIO_DDSP_HPD_A 0
#define ICL_GPIO_L_VDDEN_1 1
@@ -142,7 +92,7 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
if (seq_port) {
if (intel_dsi->ports & BIT(PORT_B))
return PORT_B;
- else if (intel_dsi->ports & BIT(PORT_C))
+ if (intel_dsi->ports & BIT(PORT_C))
return PORT_C;
}
@@ -243,75 +193,93 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static void vlv_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
+static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
+ const char *con_id, u8 idx, bool value)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct gpio_map *map;
- u16 pconf0, padval;
- u32 tmp;
- u8 port;
-
- if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
- drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n",
- gpio_index);
- return;
+ /* XXX: this table is a quick ugly hack. */
+ static struct gpio_desc *soc_gpio_table[U8_MAX + 1];
+ struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index];
+
+ if (gpio_desc) {
+ gpiod_set_value(gpio_desc, value);
+ } else {
+ gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx,
+ value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
+ if (IS_ERR(gpio_desc)) {
+ drm_err(&dev_priv->drm,
+ "GPIO index %u request failed (%pe)\n",
+ gpio_index, gpio_desc);
+ return;
+ }
+
+ soc_gpio_table[gpio_index] = gpio_desc;
}
+}
- map = &vlv_gpio_table[gpio_index];
+static void soc_opaque_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_index, const char *chip,
+ const char *con_id, u8 idx, bool value)
+{
+ struct gpiod_lookup_table *lookup;
- if (connector->panel.vbt.dsi.seq_version >= 3) {
- /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
- port = IOSF_PORT_GPIO_NC;
- } else {
- if (gpio_source == 0) {
- port = IOSF_PORT_GPIO_NC;
- } else if (gpio_source == 1) {
+ lookup = kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
+ if (!lookup)
+ return;
+
+ lookup->dev_id = "0000:00:02.0";
+ lookup->table[0] =
+ GPIO_LOOKUP_IDX(chip, idx, con_id, idx, GPIO_ACTIVE_HIGH);
+
+ gpiod_add_lookup_table(lookup);
+
+ soc_gpio_set_value(connector, gpio_index, con_id, idx, value);
+
+ gpiod_remove_lookup_table(lookup);
+ kfree(lookup);
+}
+
+static void vlv_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
+ if (connector->panel.vbt.dsi.seq_version < 3) {
+ if (gpio_source == 1) {
drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
return;
- } else {
+ }
+ if (gpio_source > 1) {
drm_dbg_kms(&dev_priv->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
}
- pconf0 = VLV_GPIO_PCONF0(map->base_offset);
- padval = VLV_GPIO_PAD_VAL(map->base_offset);
-
- vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
- if (!map->init) {
- /* FIXME: remove constant below */
- vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
- map->init = true;
- }
-
- tmp = 0x4 | value;
- vlv_iosf_sb_write(dev_priv, port, padval, tmp);
- vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+ soc_opaque_gpio_set_value(connector, gpio_index,
+ "INT33FC:01", "Panel N", gpio_index, value);
}
-static void chv_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
+static void chv_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u16 cfg0, cfg1;
- u16 family_num;
- u8 port;
if (connector->panel.vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
/* XXX: it's unclear whether 255->57 is part of SE. */
- gpio_index -= CHV_GPIO_IDX_START_SE;
- port = CHV_IOSF_PORT_GPIO_SE;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:03", "Panel SE",
+ gpio_index - CHV_GPIO_IDX_START_SE, value);
} else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
- gpio_index -= CHV_GPIO_IDX_START_SW;
- port = CHV_IOSF_PORT_GPIO_SW;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:00", "Panel SW",
+ gpio_index - CHV_GPIO_IDX_START_SW, value);
} else if (gpio_index >= CHV_GPIO_IDX_START_E) {
- gpio_index -= CHV_GPIO_IDX_START_E;
- port = CHV_IOSF_PORT_GPIO_E;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:02", "Panel E",
+ gpio_index - CHV_GPIO_IDX_START_E, value);
} else {
- port = CHV_IOSF_PORT_GPIO_N;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N",
+ gpio_index - CHV_GPIO_IDX_START_N, value);
}
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
@@ -328,56 +296,15 @@ static void chv_exec_gpio(struct intel_connector *connector,
return;
}
- port = CHV_IOSF_PORT_GPIO_N;
- }
-
- family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
- gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
-
- cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
- cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
-
- vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
- vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
- vlv_iosf_sb_write(dev_priv, port, cfg0,
- CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
- CHV_GPIO_GPIOTXSTATE(value));
- vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
-}
-
-static void bxt_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- /* XXX: this table is a quick ugly hack. */
- static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
- struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
-
- if (!gpio_desc) {
- gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
- NULL, gpio_index,
- value ? GPIOD_OUT_LOW :
- GPIOD_OUT_HIGH);
-
- if (IS_ERR_OR_NULL(gpio_desc)) {
- drm_err(&dev_priv->drm,
- "GPIO index %u request failed (%ld)\n",
- gpio_index, PTR_ERR(gpio_desc));
- return;
- }
-
- bxt_gpio_table[gpio_index] = gpio_desc;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N",
+ gpio_index - CHV_GPIO_IDX_START_N, value);
}
-
- gpiod_set_value(gpio_desc, value);
}
-static void icl_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
+static void bxt_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
+ soc_gpio_set_value(connector, gpio_index, NULL, gpio_index, value);
}
enum {
@@ -462,44 +389,45 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct intel_connector *connector = intel_dsi->attached_connector;
- u8 gpio_source, gpio_index = 0, gpio_number;
+ u8 gpio_source = 0, gpio_index = 0, gpio_number;
bool value;
- bool native = DISPLAY_VER(dev_priv) >= 11;
+ int size;
+ bool native = DISPLAY_VER(i915) >= 11;
- if (connector->panel.vbt.dsi.seq_version >= 3)
- gpio_index = *data++;
+ if (connector->panel.vbt.dsi.seq_version >= 3) {
+ size = 3;
- gpio_number = *data++;
+ gpio_index = data[0];
+ gpio_number = data[1];
+ value = data[2] & BIT(0);
- /* gpio source in sequence v2 only */
- if (connector->panel.vbt.dsi.seq_version == 2)
- gpio_source = (*data >> 1) & 3;
- else
- gpio_source = 0;
+ if (connector->panel.vbt.dsi.seq_version >= 4 && data[2] & BIT(1))
+ native = false;
+ } else {
+ size = 2;
- if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
- native = false;
+ gpio_number = data[0];
+ value = data[1] & BIT(0);
- /* pull up/down */
- value = *data++ & 1;
+ if (connector->panel.vbt.dsi.seq_version == 2)
+ gpio_source = (data[1] >> 1) & 3;
+ }
- drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
if (native)
- icl_native_gpio_set_value(dev_priv, gpio_number, value);
- else if (DISPLAY_VER(dev_priv) >= 11)
- icl_exec_gpio(connector, gpio_source, gpio_index, value);
- else if (IS_VALLEYVIEW(dev_priv))
- vlv_exec_gpio(connector, gpio_source, gpio_number, value);
- else if (IS_CHERRYVIEW(dev_priv))
- chv_exec_gpio(connector, gpio_source, gpio_number, value);
- else
- bxt_exec_gpio(connector, gpio_source, gpio_index, value);
-
- return data;
+ icl_native_gpio_set_value(i915, gpio_number, value);
+ else if (DISPLAY_VER(i915) >= 9)
+ bxt_gpio_set_value(connector, gpio_index, value);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_gpio_set_value(connector, gpio_source, gpio_number, value);
+ else if (IS_CHERRYVIEW(i915))
+ chv_gpio_set_value(connector, gpio_source, gpio_number, value);
+
+ return data + size;
}
#ifdef CONFIG_ACPI
@@ -658,6 +586,7 @@ static const fn_mipi_elem_exec exec_elem[] = {
*/
static const char * const seq_name[] = {
+ [MIPI_SEQ_END] = "MIPI_SEQ_END",
[MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
@@ -673,10 +602,10 @@ static const char * const seq_name[] = {
static const char *sequence_name(enum mipi_seq seq_id)
{
- if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
+ if (seq_id < ARRAY_SIZE(seq_name))
return seq_name[seq_id];
- else
- return "(unknown)";
+
+ return "(unknown)";
}
static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
@@ -707,13 +636,10 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
if (connector->panel.vbt.dsi.seq_version >= 3)
data += 4;
- while (1) {
+ while (*data != MIPI_SEQ_ELEM_END) {
u8 operation_byte = *data++;
u8 operation_size = 0;
- if (operation_byte == MIPI_SEQ_ELEM_END)
- break;
-
if (operation_byte < ARRAY_SIZE(exec_elem))
mipi_elem_exec = exec_elem[operation_byte];
else
@@ -873,36 +799,34 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
* multiply by 100 to preserve remainder
*/
if (intel_dsi->video_mode == BURST_MODE) {
- if (mipi_config->target_burst_mode_freq) {
- u32 bitrate = intel_dsi_bitrate(intel_dsi);
-
- /*
- * Sometimes the VBT contains a slightly lower clock,
- * then the bitrate we have calculated, in this case
- * just replace it with the calculated bitrate.
- */
- if (mipi_config->target_burst_mode_freq < bitrate &&
- intel_fuzzy_clock_check(
- mipi_config->target_burst_mode_freq,
- bitrate))
- mipi_config->target_burst_mode_freq = bitrate;
-
- if (mipi_config->target_burst_mode_freq < bitrate) {
- drm_err(&dev_priv->drm,
- "Burst mode freq is less than computed\n");
- return false;
- }
+ u32 bitrate;
- burst_mode_ratio = DIV_ROUND_UP(
- mipi_config->target_burst_mode_freq * 100,
- bitrate);
+ if (mipi_config->target_burst_mode_freq == 0) {
+ drm_err(&dev_priv->drm, "Burst mode target is not set\n");
+ return false;
+ }
- intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
- } else {
- drm_err(&dev_priv->drm,
- "Burst mode target is not set\n");
+ bitrate = intel_dsi_bitrate(intel_dsi);
+
+ /*
+ * Sometimes the VBT contains a slightly lower clock, then
+ * the bitrate we have calculated, in this case just replace it
+ * with the calculated bitrate.
+ */
+ if (mipi_config->target_burst_mode_freq < bitrate &&
+ intel_fuzzy_clock_check(mipi_config->target_burst_mode_freq,
+ bitrate))
+ mipi_config->target_burst_mode_freq = bitrate;
+
+ if (mipi_config->target_burst_mode_freq < bitrate) {
+ drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n");
return false;
}
+
+ burst_mode_ratio =
+ DIV_ROUND_UP(mipi_config->target_burst_mode_freq * 100, bitrate);
+
+ intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
} else
burst_mode_ratio = 100;
@@ -964,6 +888,7 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ struct gpiod_lookup_table *gpiod_lookup_table = NULL;
bool want_backlight_gpio = false;
bool want_panel_gpio = false;
struct pinctrl *pinctrl;
@@ -971,12 +896,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
mipi_config->pwm_blc == PPS_BLC_PMIC) {
- gpiod_add_lookup_table(&pmic_panel_gpio_table);
+ gpiod_lookup_table = &pmic_panel_gpio_table;
want_panel_gpio = true;
}
if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
- gpiod_add_lookup_table(&soc_panel_gpio_table);
+ gpiod_lookup_table = &soc_panel_gpio_table;
want_panel_gpio = true;
want_backlight_gpio = true;
@@ -993,6 +918,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
"Failed to set pinmux to PWM\n");
}
+ if (gpiod_lookup_table)
+ gpiod_add_lookup_table(gpiod_lookup_table);
+
if (want_panel_gpio) {
intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
@@ -1011,15 +939,13 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
intel_dsi->gpio_backlight = NULL;
}
}
+
+ if (gpiod_lookup_table)
+ gpiod_remove_lookup_table(gpiod_lookup_table);
}
void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_connector *connector = intel_dsi->attached_connector;
- struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
-
if (intel_dsi->gpio_panel) {
gpiod_put(intel_dsi->gpio_panel);
intel_dsi->gpio_panel = NULL;
@@ -1029,13 +955,4 @@ void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
gpiod_put(intel_dsi->gpio_backlight);
intel_dsi->gpio_backlight = NULL;
}
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- mipi_config->pwm_blc == PPS_BLC_PMIC)
- gpiod_remove_lookup_table(&pmic_panel_gpio_table);
-
- if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
- pinctrl_unregister_mappings(soc_pwm_pinctrl_map);
- gpiod_remove_lookup_table(&soc_panel_gpio_table);
- }
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 19b35ece31f1..6d48aa3af95a 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -764,7 +764,7 @@ bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
{
- return fb && to_i915(fb->dev)->params.enable_dpt &&
+ return fb && to_i915(fb->dev)->display.params.enable_dpt &&
intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
}
@@ -1930,10 +1930,10 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
if (!atomic_read(&front->bits))
return 0;
- if (dma_resv_test_signaled(obj->base.resv, dma_resv_usage_rw(false)))
+ if (dma_resv_test_signaled(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false)))
goto flush;
- ret = dma_resv_get_singleton(obj->base.resv, dma_resv_usage_rw(false),
+ ret = dma_resv_get_singleton(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false),
&fence);
if (ret || !fence)
goto flush;
@@ -2093,7 +2093,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
}
- fb->obj[i] = &obj->base;
+ fb->obj[i] = intel_bo_to_drm_bo(obj);
}
ret = intel_fill_fb_info(dev_priv, intel_fb);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 4820d21cc942..63f389a1707d 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -608,6 +608,7 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
static void ivb_fbc_activate(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
+ u32 dpfc_ctl;
if (DISPLAY_VER(i915) >= 10)
glk_fbc_program_cfb_stride(fbc);
@@ -617,8 +618,13 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
if (intel_gt_support_legacy_fencing(to_gt(i915)))
snb_fbc_program_fence(fbc);
+ /* wa_14019417088 Alternative WA*/
+ dpfc_ctl = ivb_dpfc_ctl(fbc);
+ if (DISPLAY_VER(i915) >= 20)
+ intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+
intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
- DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
+ DPFC_CTL_EN | dpfc_ctl);
}
static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
@@ -1022,10 +1028,13 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *
struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
unsigned int effective_w, effective_h, max_w, max_h;
- if (DISPLAY_VER(i915) >= 10) {
+ if (DISPLAY_VER(i915) >= 11) {
+ max_w = 8192;
+ max_h = 4096;
+ } else if (DISPLAY_VER(i915) >= 10) {
max_w = 5120;
max_h = 4096;
- } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
+ } else if (DISPLAY_VER(i915) >= 7) {
max_w = 4096;
max_h = 4096;
} else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
@@ -1044,6 +1053,31 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *
return effective_w <= max_w && effective_h <= max_h;
}
+static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ unsigned int w, h, max_w, max_h;
+
+ if (DISPLAY_VER(i915) >= 10) {
+ max_w = 5120;
+ max_h = 4096;
+ } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
+ max_w = 4096;
+ max_h = 4096;
+ } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
+ max_w = 4096;
+ max_h = 2048;
+ } else {
+ max_w = 2048;
+ max_h = 1536;
+ }
+
+ w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ return w <= max_w && h <= max_h;
+}
+
static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1174,7 +1208,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (!i915->params.enable_fbc) {
+ if (!i915->display.params.enable_fbc) {
plane_state->no_fbc_reason = "disabled per module param or by default";
return 0;
}
@@ -1241,11 +1275,16 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
+ if (!intel_fbc_plane_size_valid(plane_state)) {
plane_state->no_fbc_reason = "plane size too big";
return 0;
}
+ if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
+ plane_state->no_fbc_reason = "surface size too big";
+ return 0;
+ }
+
/*
* Work around a problem on GEN9+ HW, where enabling FBC on a plane
* having a Y offset that isn't divisible by 4 causes FIFO underrun
@@ -1751,8 +1790,8 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
*/
static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
{
- if (i915->params.enable_fbc >= 0)
- return !!i915->params.enable_fbc;
+ if (i915->display.params.enable_fbc >= 0)
+ return !!i915->display.params.enable_fbc;
if (!HAS_FBC(i915))
return 0;
@@ -1824,9 +1863,9 @@ void intel_fbc_init(struct drm_i915_private *i915)
if (need_fbc_vtd_wa(i915))
DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
- i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
+ i915->display.params.enable_fbc = intel_sanitize_fbc_option(i915);
drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
- i915->params.enable_fbc);
+ i915->display.params.enable_fbc);
for_each_fbc_id(i915, fbc_id)
i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index e6429dfebe15..295a0f24ebbf 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -10,6 +10,7 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_dp.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
@@ -338,8 +339,11 @@ int ilk_fdi_compute_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes = lane;
- intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false);
+ intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp),
+ lane, fdi_dotclock,
+ link_bw,
+ intel_dp_bw_fec_overhead(false),
+ &pipe_config->fdi_m_n);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index ec46716b2f49..2ea37c0414a9 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -265,8 +265,6 @@ static void frontbuffer_release(struct kref *ref)
spin_unlock(&intel_bo_to_i915(obj)->display.fb_tracking.lock);
i915_active_fini(&front->write);
-
- i915_gem_object_put(obj);
kfree_rcu(front, rcu);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index c89da3568ebd..39b3f7c0c77c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -923,7 +923,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return 0;
}
-static int _intel_hdcp_enable(struct intel_connector *connector)
+static int intel_hdcp1_enable(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -1058,7 +1058,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- ret = _intel_hdcp_enable(connector);
+ ret = intel_hdcp1_enable(connector);
if (ret) {
drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret);
intel_hdcp_update_value(connector,
@@ -2324,10 +2324,10 @@ intel_hdcp_set_streams(struct intel_digital_port *dig_port,
return 0;
}
-int intel_hdcp_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static int _intel_hdcp_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector =
@@ -2388,7 +2388,7 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
*/
if (ret && intel_hdcp_capable(connector) &&
hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
- ret = _intel_hdcp_enable(connector);
+ ret = intel_hdcp1_enable(connector);
}
if (!ret) {
@@ -2404,6 +2404,27 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
return ret;
}
+void intel_hdcp_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ /*
+ * Enable hdcp if it's desired or if userspace is enabled and
+ * driver set its state to undesired
+ */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+ (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
+ _intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+}
+
int intel_hdcp_disable(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2491,7 +2512,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
}
if (desired_and_not_enabled || content_protection_type_changed)
- intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+ _intel_hdcp_enable(state, encoder, crtc_state, conn_state);
}
void intel_hdcp_component_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 5997c52a0958..a9c784fd9ba5 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -28,10 +28,10 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
int intel_hdcp_init(struct intel_connector *connector,
struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state);
+void intel_hdcp_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state);
int intel_hdcp_disable(struct intel_connector *connector);
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index ac315f8e7820..ab18cfc19c0a 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -3030,16 +3030,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
- /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
- * 0xd. Failure to do so will result in spurious interrupts being
- * generated on the port when a cable is not attached.
- */
- if (IS_G45(dev_priv)) {
- u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
- intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
- (temp & ~0xf) | 0xd);
- }
-
cec_fill_conn_info_from_drm(&conn_info, connector);
intel_hdmi->cec_notifier =
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index f07047e9cb30..04f62f27ad74 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -1361,11 +1361,24 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
bxt_hpd_detection_setup(dev_priv);
}
+static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915)
+{
+ /*
+ * For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd);
+}
+
static void i915_hpd_enable_detection(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
+ if (IS_G45(i915))
+ g45_hpd_peg_band_gap_wa(i915);
+
/* HPD sense and interrupt enable are one and the same */
i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
}
@@ -1389,6 +1402,9 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ if (IS_G45(dev_priv))
+ g45_hpd_peg_band_gap_wa(dev_priv);
+
/* Ignore TV since it's buggy */
i915_hotplug_interrupt_update_locked(dev_priv,
HOTPLUG_INT_EN_MASK |
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index c5eb5f242536..9c6d35a405a1 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -7,6 +7,7 @@
#include "intel_atomic.h"
#include "intel_display_types.h"
+#include "intel_dp_mst.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
@@ -21,6 +22,7 @@ void intel_link_bw_init_limits(struct drm_i915_private *i915, struct intel_link_
{
enum pipe pipe;
+ limits->force_fec_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
for_each_pipe(i915, pipe)
limits->max_bpp_x16[pipe] = INT_MAX;
@@ -53,11 +55,11 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe max_bpp_pipe = INVALID_PIPE;
struct intel_crtc *crtc;
- int max_bpp = 0;
+ int max_bpp_x16 = 0;
for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state;
- int link_bpp;
+ int link_bpp_x16;
if (limits->bpp_limit_reached_pipes & BIT(crtc->pipe))
continue;
@@ -68,7 +70,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
return PTR_ERR(crtc_state);
if (crtc_state->dsc.compression_enable)
- link_bpp = crtc_state->dsc.compressed_bpp;
+ link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16;
else
/*
* TODO: for YUV420 the actual link bpp is only half
@@ -76,10 +78,10 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
* is based on the pipe bpp value, set the actual link bpp
* limit here once the MST BW allocation is fixed.
*/
- link_bpp = crtc_state->pipe_bpp;
+ link_bpp_x16 = to_bpp_x16(crtc_state->pipe_bpp);
- if (link_bpp > max_bpp) {
- max_bpp = link_bpp;
+ if (link_bpp_x16 > max_bpp_x16) {
+ max_bpp_x16 = link_bpp_x16;
max_bpp_pipe = crtc->pipe;
}
}
@@ -87,7 +89,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
if (max_bpp_pipe == INVALID_PIPE)
return -ENOSPC;
- limits->max_bpp_x16[max_bpp_pipe] = to_bpp_x16(max_bpp) - 1;
+ limits->max_bpp_x16[max_bpp_pipe] = max_bpp_x16 - 1;
return intel_modeset_pipes_in_mask_early(state, reason,
BIT(max_bpp_pipe));
@@ -143,6 +145,10 @@ static int check_all_link_config(struct intel_atomic_state *state,
/* TODO: Check additional shared display link configurations like MST */
int ret;
+ ret = intel_dp_mst_atomic_check_link(state, limits);
+ if (ret)
+ return ret;
+
ret = intel_fdi_atomic_check_link(state, limits);
if (ret)
return ret;
@@ -158,6 +164,12 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
bool bpps_changed = false;
enum pipe pipe;
+ /* FEC can't be forced off after it was forced on. */
+ if (drm_WARN_ON(&i915->drm,
+ (old_limits->force_fec_pipes & new_limits->force_fec_pipes) !=
+ old_limits->force_fec_pipes))
+ return false;
+
for_each_pipe(i915, pipe) {
/* The bpp limit can only decrease. */
if (drm_WARN_ON(&i915->drm,
@@ -172,7 +184,9 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
/* At least one limit must change. */
if (drm_WARN_ON(&i915->drm,
- !bpps_changed))
+ !bpps_changed &&
+ new_limits->force_fec_pipes ==
+ old_limits->force_fec_pipes))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index e07df22a779a..2cf57307cc24 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -16,6 +16,7 @@ struct intel_atomic_state;
struct intel_crtc_state;
struct intel_link_bw_limits {
+ u8 force_fec_pipes;
u8 bpp_limit_reached_pipes;
/* in 1/16 bpp units */
int max_bpp_x16[I915_MAX_PIPES];
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 2a4ca7e65775..4b114fde57b1 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -794,8 +794,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
unsigned int val;
/* use the module option value if specified */
- if (i915->params.lvds_channel_mode > 0)
- return i915->params.lvds_channel_mode == 2;
+ if (i915->display.params.lvds_channel_mode > 0)
+ return i915->display.params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index b8f43efb0ab5..94eece7f63be 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -318,6 +318,12 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
+ if (crtc_state->dsc.compression_enable) {
+ drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
+ connector->dp.dsc_decompression_enabled = true;
+ } else {
+ connector->dp.dsc_decompression_enabled = false;
+ }
conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index 5e1c2c780412..076298a8d405 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -244,7 +244,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
verify_crtc_state(state, crtc);
intel_shared_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
- intel_c10pll_state_verify(state, crtc);
+ intel_cx0pll_state_verify(state, crtc);
}
void intel_modeset_verify_disabled(struct intel_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 84078fb82b2f..1ce785db6a5e 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -841,7 +841,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->display.opregion;
const struct firmware *fw = NULL;
- const char *name = dev_priv->params.vbt_firmware;
+ const char *name = dev_priv->display.params.vbt_firmware;
int ret;
if (!name || !*name)
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 483beedac5b8..0d8e5320a4f8 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -46,8 +46,8 @@
bool intel_panel_use_ssc(struct drm_i915_private *i915)
{
- if (i915->params.panel_use_ssc >= 0)
- return i915->params.panel_use_ssc != 0;
+ if (i915->display.params.panel_use_ssc >= 0)
+ return i915->display.params.panel_use_ssc != 0;
return i915->display.vbt.lvds_use_ssc &&
!intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 866786e6b32f..baf679759e00 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -8,6 +8,7 @@
#include "intel_crt.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dpll.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
#include "intel_lvds.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 73f0f1714b37..a8fa3a20990e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -90,7 +90,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.pps_pipe;
bool pll_enabled, release_cl_override = false;
- enum dpio_phy phy = DPIO_PHY(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
u32 DP;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 4f1f31fc9529..15c1804dcd59 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -29,6 +29,7 @@
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
+#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -172,6 +173,15 @@
* irrelevant for normal operation.
*/
+bool intel_encoder_can_psr(struct intel_encoder *encoder)
+{
+ if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
+ return CAN_PSR(enc_to_intel_dp(encoder)) ||
+ CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
+ else
+ return false;
+}
+
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
struct intel_connector *connector = intel_dp->attached_connector;
@@ -179,9 +189,9 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- if (i915->params.enable_psr == -1)
+ if (i915->display.params.enable_psr == -1)
return connector->panel.vbt.psr.enable;
- return i915->params.enable_psr;
+ return i915->display.params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -198,7 +208,7 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
- if (i915->params.enable_psr == 1)
+ if (i915->display.params.enable_psr == 1)
return false;
return true;
}
@@ -474,27 +484,41 @@ exit:
intel_dp->psr.su_y_granularity = y;
}
-void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv =
- to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 pr_dpcd = 0;
- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
+ intel_dp->psr.sink_panel_replay_support = false;
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
- if (!intel_dp->psr_dpcd[0])
+ if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
+ drm_dbg_kms(&i915->drm,
+ "Panel replay is not supported by panel\n");
return;
- drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "Panel replay is supported by panel\n");
+ intel_dp->psr.sink_panel_replay_support = true;
+}
+
+static void _psr_init_dpcd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"PSR support not currently available for this panel\n");
return;
}
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Panel lacks power state control, PSR cannot be enabled\n");
return;
}
@@ -503,8 +527,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
intel_dp->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 9 &&
- (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+ if (DISPLAY_VER(i915) >= 9 &&
+ intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
bool alpm = intel_dp_get_alpm_status(intel_dp);
@@ -521,14 +545,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* GTC first.
*/
intel_dp->psr.sink_psr2_support = y_req && alpm;
- drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
+ drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
intel_dp->psr.sink_psr2_support ? "" : "not ");
+ }
+}
- if (intel_dp->psr.sink_psr2_support) {
- intel_dp->psr.colorimetry_support =
- intel_dp_get_colorimetry_status(intel_dp);
- intel_dp_get_su_granularity(intel_dp);
- }
+void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+{
+ _panel_replay_init_dpcd(intel_dp);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+
+ if (intel_dp->psr_dpcd[0])
+ _psr_init_dpcd(intel_dp);
+
+ if (intel_dp->psr.sink_psr2_support) {
+ intel_dp->psr.colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+ intel_dp_get_su_granularity(intel_dp);
}
}
@@ -574,8 +609,11 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 dpcd_val = DP_PSR_ENABLE;
- /* Enable ALPM at sink for psr2 */
+ if (intel_dp->psr.panel_replay_enabled)
+ return;
+
if (intel_dp->psr.psr2_enabled) {
+ /* Enable ALPM at sink for psr2 */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE |
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
@@ -592,6 +630,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
+ if (intel_dp->psr.entry_setup_frames > 0)
+ dpcd_val |= DP_PSR_FRAME_CAPTURE;
+
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
@@ -606,7 +647,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 11)
val |= EDP_PSR_TP4_TIME_0us;
- if (dev_priv->params.psr_safest_params) {
+ if (dev_priv->display.params.psr_safest_params) {
val |= EDP_PSR_TP1_TIME_2500us;
val |= EDP_PSR_TP2_TP3_TIME_2500us;
goto check_tp3_sel;
@@ -690,6 +731,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
+ if (DISPLAY_VER(dev_priv) >= 20)
+ val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
+
intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
}
@@ -700,7 +744,7 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
- if (dev_priv->params.psr_safest_params)
+ if (dev_priv->display.params.psr_safest_params)
return EDP_PSR2_TP2_TIME_2500us;
if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
@@ -727,11 +771,38 @@ static int psr2_block_count(struct intel_dp *intel_dp)
return psr2_block_count_lines(intel_dp) / 4;
}
+static u8 frames_before_su_entry(struct intel_dp *intel_dp)
+{
+ u8 frames_before_su_entry;
+
+ frames_before_su_entry = max_t(u8,
+ intel_dp->psr.sink_sync_latency + 1,
+ 2);
+
+ /* Entry setup frames must be at least 1 less than frames before SU entry */
+ if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
+ frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
+
+ return frames_before_su_entry;
+}
+
+static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+ 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
+
+ intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
+ TRANS_DP2_PANEL_REPLAY_ENABLE);
+}
+
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
+ u32 psr_val = 0;
val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
@@ -741,7 +812,8 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
val |= EDP_Y_COORDINATE_ENABLE;
- val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
+ val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
+
val |= intel_psr2_get_tp_time(intel_dp);
if (DISPLAY_VER(dev_priv) >= 12) {
@@ -785,6 +857,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
val |= EDP_PSR2_SU_SDP_SCANLINE;
+ if (DISPLAY_VER(dev_priv) >= 20)
+ psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
+
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
@@ -798,7 +873,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
+ intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
}
@@ -943,7 +1018,7 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!dev_priv->params.enable_psr2_sel_fetch &&
+ if (!dev_priv->display.params.enable_psr2_sel_fetch &&
intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 sel fetch not enabled, disabled by parameter\n");
@@ -1056,7 +1131,7 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
fast_wake_lines > max_wake_lines)
return false;
- if (i915->params.psr_safest_params)
+ if (i915->display.params.psr_safest_params)
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
@@ -1066,6 +1141,39 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
return true;
}
+static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
+ int entry_setup_frames = 0;
+
+ if (psr_setup_time < 0) {
+ drm_dbg_kms(&i915->drm,
+ "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+ intel_dp->psr_dpcd[1]);
+ return -ETIME;
+ }
+
+ if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
+ adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
+ if (DISPLAY_VER(i915) >= 20) {
+ /* setup entry frames can be up to 3 frames */
+ entry_setup_frames = 1;
+ drm_dbg_kms(&i915->drm,
+ "PSR setup entry frames %d\n",
+ entry_setup_frames);
+ } else {
+ drm_dbg_kms(&i915->drm,
+ "PSR condition failed: PSR setup time (%d us) too long\n",
+ psr_setup_time);
+ return -ETIME;
+ }
+ }
+
+ return entry_setup_frames;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -1206,24 +1314,42 @@ unsupported:
return false;
}
-void intel_psr_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static bool _psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- int psr_setup_time;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int entry_setup_frames;
/*
* Current PSR panels don't work reliably with VRR enabled
* So if VRR is enabled, do not enable PSR.
*/
if (crtc_state->vrr.enable)
- return;
+ return false;
if (!CAN_PSR(intel_dp))
- return;
+ return false;
+
+ entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
+
+ if (entry_setup_frames >= 0) {
+ intel_dp->psr.entry_setup_frames = entry_setup_frames;
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: PSR setup timing not met\n");
+ return false;
+ }
+
+ return true;
+}
+
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
@@ -1242,23 +1368,14 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
- if (psr_setup_time < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
- intel_dp->psr_dpcd[1]);
- return;
- }
+ if (CAN_PANEL_REPLAY(intel_dp))
+ crtc_state->has_panel_replay = true;
+ else
+ crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
- if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
- adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR condition failed: PSR setup time (%d us) too long\n",
- psr_setup_time);
+ if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
return;
- }
- crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
@@ -1279,18 +1396,23 @@ void intel_psr_get_config(struct intel_encoder *encoder,
return;
intel_dp = &dig_port->dp;
- if (!CAN_PSR(intel_dp))
+ if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
return;
mutex_lock(&intel_dp->psr.lock);
if (!intel_dp->psr.enabled)
goto unlock;
- /*
- * Not possible to read EDP_PSR/PSR2_CTL registers as it is
- * enabled/disabled because of frontbuffer tracking and others.
- */
- pipe_config->has_psr = true;
+ if (intel_dp->psr.panel_replay_enabled) {
+ pipe_config->has_panel_replay = true;
+ } else {
+ /*
+ * Not possible to read EDP_PSR/PSR2_CTL registers as it is
+ * enabled/disabled because of frontbuffer tracking and others.
+ */
+ pipe_config->has_psr = true;
+ }
+
pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
@@ -1327,8 +1449,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
lockdep_assert_held(&intel_dp->psr.lock);
- /* psr1 and psr2 are mutually exclusive.*/
- if (intel_dp->psr.psr2_enabled)
+ /* psr1, psr2 and panel-replay are mutually exclusive.*/
+ if (intel_dp->psr.panel_replay_enabled)
+ dg2_activate_panel_replay(intel_dp);
+ else if (intel_dp->psr.psr2_enabled)
hsw_activate_psr2(intel_dp);
else
hsw_activate_psr1(intel_dp);
@@ -1452,12 +1576,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* All supported adlp panels have 1-based X granularity, this may
* cause issues if non-supported panels are used.
*/
- if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
- ADLP_1_BASED_X_GRANULARITY);
- else if (IS_ALDERLAKE_P(dev_priv))
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
- ADLP_1_BASED_X_GRANULARITY);
+ if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ 0, ADLP_1_BASED_X_GRANULARITY);
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
@@ -1508,6 +1630,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+ intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
intel_dp->psr.busy_frontbuffer_bits = 0;
intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
@@ -1523,8 +1646,12 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
if (!psr_interrupt_error_check(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
- intel_dp->psr.psr2_enabled ? "2" : "1");
+ if (intel_dp->psr.panel_replay_enabled)
+ drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
+ else
+ drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+ intel_dp->psr.psr2_enabled ? "2" : "1");
+
intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
intel_psr_enable_sink(intel_dp);
@@ -1553,7 +1680,10 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
return;
}
- if (intel_dp->psr.psr2_enabled) {
+ if (intel_dp->psr.panel_replay_enabled) {
+ intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
+ TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
+ } else if (intel_dp->psr.psr2_enabled) {
tgl_disallow_dc3co_on_psr2_exit(intel_dp);
val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
@@ -1602,8 +1732,11 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (!intel_dp->psr.enabled)
return;
- drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
- intel_dp->psr.psr2_enabled ? "2" : "1");
+ if (intel_dp->psr.panel_replay_enabled)
+ drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
+ else
+ drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ intel_dp->psr.psr2_enabled ? "2" : "1");
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
@@ -1636,6 +1769,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
intel_dp->psr.enabled = false;
+ intel_dp->psr.panel_replay_enabled = false;
intel_dp->psr.psr2_enabled = false;
intel_dp->psr.psr2_sel_fetch_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
@@ -2207,7 +2341,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
- if (!crtc_state->has_psr)
+ if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
@@ -2693,9 +2827,12 @@ void intel_psr_init(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!HAS_PSR(dev_priv))
+ if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
return;
+ if (!intel_dp_is_edp(intel_dp))
+ intel_psr_init_dpcd(intel_dp);
+
/*
* HSW spec explicitly says PSR is tied to port A.
* BDW+ platforms have a instance of PSR registers per transcoder but
@@ -2711,7 +2848,10 @@ void intel_psr_init(struct intel_dp *intel_dp)
return;
}
- intel_dp->psr.source_support = true;
+ if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
+ intel_dp->psr.source_panel_replay_support = true;
+ else
+ intel_dp->psr.source_support = true;
/* Set link_standby x link_off defaults */
if (DISPLAY_VER(dev_priv) < 12)
@@ -2728,12 +2868,19 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
{
struct drm_dp_aux *aux = &intel_dp->aux;
int ret;
+ unsigned int offset;
- ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
+ offset = intel_dp->psr.panel_replay_enabled ?
+ DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
+
+ ret = drm_dp_dpcd_readb(aux, offset, status);
if (ret != 1)
return ret;
- ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
+ offset = intel_dp->psr.panel_replay_enabled ?
+ DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
+
+ ret = drm_dp_dpcd_readb(aux, offset, error_status);
if (ret != 1)
return ret;
@@ -2954,7 +3101,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
status = live_status[status_val];
}
- seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+ seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
}
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
@@ -2967,18 +3114,22 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
bool enabled;
u32 val;
- seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
+ seq_printf(m, "Sink support: PSR = %s",
+ str_yes_no(psr->sink_support));
+
if (psr->sink_support)
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
- seq_puts(m, "\n");
+ seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
- if (!psr->sink_support)
+ if (!(psr->sink_support || psr->sink_panel_replay_support))
return 0;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&psr->lock);
- if (psr->enabled)
+ if (psr->panel_replay_enabled)
+ status = "Panel Replay Enabled";
+ else if (psr->enabled)
status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
else
status = "disabled";
@@ -2991,14 +3142,17 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
goto unlock;
}
- if (psr->psr2_enabled) {
+ if (psr->panel_replay_enabled) {
+ val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
+ enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
+ } else if (psr->psr2_enabled) {
val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
enabled = val & EDP_PSR_ENABLE;
}
- seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
str_enabled_disabled(enabled), val);
psr_source_status(intel_dp, m);
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
@@ -3136,6 +3290,16 @@ void intel_psr_debugfs_register(struct drm_i915_private *i915)
i915, &i915_edp_psr_status_fops);
}
+static const char *psr_mode_str(struct intel_dp *intel_dp)
+{
+ if (intel_dp->psr.panel_replay_enabled)
+ return "PANEL-REPLAY";
+ else if (intel_dp->psr.enabled)
+ return "PSR";
+
+ return "unknown";
+}
+
static int i915_psr_sink_status_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
@@ -3150,12 +3314,19 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
"reserved",
"sink internal error",
};
+ static const char * const panel_replay_status[] = {
+ "Sink device frame is locked to the Source device",
+ "Sink device is coasting, using the VTotal target",
+ "Sink device is governing the frame rate (frame rate unlock is granted)",
+ "Sink device in the process of re-locking with the Source device",
+ };
const char *str;
int ret;
u8 status, error_status;
+ u32 idx;
- if (!CAN_PSR(intel_dp)) {
- seq_puts(m, "PSR Unsupported\n");
+ if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
+ seq_puts(m, "PSR/Panel-Replay Unsupported\n");
return -ENODEV;
}
@@ -3166,15 +3337,20 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
if (ret)
return ret;
- status &= DP_PSR_SINK_STATE_MASK;
- if (status < ARRAY_SIZE(sink_status))
- str = sink_status[status];
- else
- str = "unknown";
+ str = "unknown";
+ if (intel_dp->psr.panel_replay_enabled) {
+ idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
+ if (idx < ARRAY_SIZE(panel_replay_status))
+ str = panel_replay_status[idx];
+ } else if (intel_dp->psr.enabled) {
+ idx = status & DP_PSR_SINK_STATE_MASK;
+ if (idx < ARRAY_SIZE(sink_status))
+ str = sink_status[idx];
+ }
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", status, str);
+ seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
- seq_printf(m, "Sink PSR error status: 0x%x", error_status);
+ seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
@@ -3183,11 +3359,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
else
seq_puts(m, "\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
- seq_puts(m, "\tPSR RFB storage error\n");
+ seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
- seq_puts(m, "\tPSR VSC SDP uncorrectable error\n");
+ seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
if (error_status & DP_PSR_LINK_CRC_ERROR)
- seq_puts(m, "\tPSR Link CRC error\n");
+ seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
return ret;
}
@@ -3207,13 +3383,16 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
- if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
- return;
+ if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) {
+ if (!(HAS_DP20(i915) &&
+ connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort))
+ return;
+ }
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
- if (HAS_PSR(i915))
+ if (HAS_PSR(i915) || HAS_DP20(i915))
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index bf35f42df6bc..6a1f4573852b 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -21,6 +21,13 @@ struct intel_encoder;
struct intel_plane;
struct intel_plane_state;
+#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
+ (intel_dp)->psr.source_support)
+
+#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
+ (intel_dp)->psr.source_panel_replay_support)
+
+bool intel_encoder_can_psr(struct intel_encoder *encoder);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index d39951383c92..efe4306b37e0 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -35,6 +35,8 @@
#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 3)
#define EDP_PSR_MAX_SLEEP_TIME_MASK REG_GENMASK(24, 20)
#define EDP_PSR_MAX_SLEEP_TIME(x) REG_FIELD_PREP(EDP_PSR_MAX_SLEEP_TIME_MASK, (x))
+#define LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK REG_GENMASK(17, 16)
+#define LNL_EDP_PSR_ENTRY_SETUP_FRAMES(x) REG_FIELD_PREP(LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK, (x))
#define EDP_PSR_SKIP_AUX_EXIT REG_BIT(12)
#define EDP_PSR_TP_MASK REG_BIT(11)
#define EDP_PSR_TP_TP1_TP2 REG_FIELD_PREP(EDP_PSR_TP_MASK, 0)
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
index 543cdc46aa1d..600c815e37e4 100644
--- a/drivers/gpu/drm/i915/display/intel_qp_tables.c
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -34,9 +34,6 @@
* These qp tables are as per the C model
* and it has the rows pointing to bpps which increment
* in steps of 0.5
- * We do not support fractional bpps as of today,
- * hence we would skip the fractional bpps during
- * our references for qp calclulations.
*/
static const u8 rc_range_minqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index a636f42ceae5..bcb4959df70d 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -35,6 +35,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -1787,17 +1788,28 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
intel_sdvo_get_eld(intel_sdvo, pipe_config);
}
-static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
+static void intel_sdvo_disable_audio(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+
+ if (!old_crtc_state->has_audio)
+ return;
+
intel_sdvo_set_audio_state(intel_sdvo, 0);
}
-static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_enable_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
const u8 *eld = crtc_state->eld;
+ if (!crtc_state->has_audio)
+ return;
+
intel_sdvo_set_audio_state(intel_sdvo, 0);
intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
@@ -1818,8 +1830,7 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- if (old_crtc_state->has_audio)
- intel_sdvo_disable_audio(intel_sdvo);
+ encoder->audio_disable(encoder, old_crtc_state, conn_state);
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
@@ -1913,8 +1924,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag);
- if (pipe_config->has_audio)
- intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
+ encoder->audio_enable(encoder, pipe_config, conn_state);
}
static enum drm_mode_status
@@ -3390,6 +3400,8 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
}
intel_encoder->pre_enable = intel_sdvo_pre_enable;
intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->audio_enable = intel_sdvo_enable_audio;
+ intel_encoder->audio_disable = intel_sdvo_disable_audio;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
intel_encoder->get_config = intel_sdvo_get_config;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 1fb16510f750..d7b440c8caef 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -48,6 +48,11 @@
#include "intel_frontbuffer.h"
#include "intel_sprite.h"
+static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite)
+{
+ return pipe * DISPLAY_RUNTIME_INFO(i915)->num_sprites[pipe] + sprite + 'A';
+}
+
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
/* The points are not evenly spaced. */
@@ -1636,7 +1641,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
- "sprite %c", sprite_name(pipe, sprite));
+ "sprite %c", sprite_name(dev_priv, pipe, sprite));
kfree(modifiers);
if (ret)
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 6757dbae9ee5..5f2fb702e367 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -77,8 +77,8 @@ intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf,
static void
calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
{
+ int bpp = to_bpp_int(vdsc_cfg->bits_per_pixel);
int bpc = vdsc_cfg->bits_per_component;
- int bpp = vdsc_cfg->bits_per_pixel >> 4;
int qp_bpc_modifier = (bpc - 8) * 2;
int uncompressed_bpg_rate;
int first_line_bpg_offset;
@@ -148,7 +148,13 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
static const s8 ofs_und8[] = {
10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
};
-
+ /*
+ * For 420 format since bits_per_pixel (bpp) is set to target bpp * 2,
+ * QP table values for target bpp 4.0 to 4.4375 (rounded to 4.0) are
+ * actually for bpp 8 to 8.875 (rounded to 4.0 * 2 i.e 8).
+ * Similarly values for target bpp 4.5 to 4.8375 (rounded to 4.5)
+ * are for bpp 9 to 9.875 (rounded to 4.5 * 2 i.e 9), and so on.
+ */
bpp_i = bpp - 8;
for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
u8 range_bpg_offset;
@@ -178,6 +184,9 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
}
} else {
+ /* fractional bpp part * 10000 (for precision up to 4 decimal places) */
+ int fractional_bits = to_bpp_frac(vdsc_cfg->bits_per_pixel);
+
static const s8 ofs_und6[] = {
0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
};
@@ -191,7 +200,14 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
};
- bpp_i = (2 * (bpp - 6));
+ /*
+ * QP table rows have values in increment of 0.5.
+ * So 6.0 bpp to 6.4375 will have index 0, 6.5 to 6.9375 will have index 1,
+ * and so on.
+ * 0.5 fractional part with 4 decimal precision becomes 5000
+ */
+ bpp_i = ((bpp - 6) + (fractional_bits < 5000 ? 0 : 1));
+
for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
u8 range_bpg_offset;
@@ -248,7 +264,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
- u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
+ u16 compressed_bpp = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
int err;
int ret;
@@ -279,8 +295,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
/* Gen 11 does not support VBR */
vdsc_cfg->vbr_enable = false;
- /* Gen 11 only supports integral values of bpp */
- vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+ vdsc_cfg->bits_per_pixel = pipe_config->dsc.compressed_bpp_x16;
/*
* According to DSC 1.2 specs in Section 4.1 if native_420 is set
@@ -874,7 +889,7 @@ static void intel_dsc_get_pps_config(struct intel_crtc_state *crtc_state)
if (vdsc_cfg->native_420)
vdsc_cfg->bits_per_pixel >>= 1;
- crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
+ crtc_state->dsc.compressed_bpp_x16 = vdsc_cfg->bits_per_pixel;
/* PPS 2 */
pps_temp = intel_dsc_pps_read_and_verify(crtc_state, 2);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 245a64332cc7..f5c77a018e10 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -21,7 +21,6 @@
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
-#include "gt/intel_gt.h"
#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
@@ -1007,7 +1006,8 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
* The DPT object contains only one vma, so the VMA's offset
* within the DPT is always 0.
*/
- drm_WARN_ON(&i915->drm, plane_state->dpt_vma->node.start);
+ drm_WARN_ON(&i915->drm, plane_state->dpt_vma &&
+ plane_state->dpt_vma->node.start);
drm_WARN_ON(&i915->drm, offset & 0x1fffff);
return offset >> 9;
} else {
@@ -1855,16 +1855,19 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb)
}
}
-static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
+static void check_protection(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- return intel_pxp_key_check(i915->pxp, obj, false) == 0;
-}
+ if (DISPLAY_VER(i915) < 11)
+ return;
-static bool pxp_is_borked(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
+ plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0;
+ plane_state->force_black = i915_gem_object_is_protected(obj) &&
+ !plane_state->decrypt;
}
static int skl_plane_check(struct intel_crtc_state *crtc_state,
@@ -1911,10 +1914,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (DISPLAY_VER(dev_priv) >= 11) {
- plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
- plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
- }
+ check_protection(plane_state);
/* HW only has 8 bits pixel precision, disable plane if invisible */
if (!(plane_state->hw.alpha >> 8))
@@ -2489,7 +2489,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
- if (!dev_priv->params.enable_dpt &&
+ if (!dev_priv->display.params.enable_dpt &&
intel_fb_modifier_uses_dpt(dev_priv, fb->modifier)) {
drm_dbg_kms(&dev_priv->drm, "DPT disabled, skipping initial FB\n");
goto error;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 99b8ccdc3dfa..56588d6e24ae 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -412,7 +412,7 @@ static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->params.enable_sagv)
+ if (!i915->display.params.enable_sagv)
return false;
if (DISPLAY_VER(i915) >= 12)
@@ -3702,7 +3702,8 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused)
};
seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
- seq_printf(m, "SAGV modparam: %s\n", str_enabled_disabled(i915->params.enable_sagv));
+ seq_printf(m, "SAGV modparam: %s\n",
+ str_enabled_disabled(i915->display.params.enable_sagv));
seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 55da627a8b8d..bda49734ca33 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -561,6 +561,12 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
glk_dsi_disable_mipi_io(encoder);
}
+static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
+{
+ return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
+ BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+}
+
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -570,7 +576,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
drm_dbg_kms(&dev_priv->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
@@ -589,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
* Port A only. MIPI Port C has no similar bit for checking.
*/
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || port == PORT_A) &&
+ if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
intel_de_wait_for_clear(dev_priv, port_ctrl,
AFE_LATCHOUT, 30))
drm_err(&dev_priv->drm, "DSI LP not going Low\n");
@@ -627,8 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
u32 temp;
temp = intel_de_read(dev_priv, port_ctrl);
@@ -664,8 +669,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
/* de-assert ip_tg_enable signal */
intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
@@ -955,9 +959,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE;
+ i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ bool enabled = intel_de_read(dev_priv, port_ctrl) & DPI_ENABLE;
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 683fd8d3151c..ccc077b74d2d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -9,6 +9,7 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
+#include <drm/drm_auth.h>
#include <drm/drm_syncobj.h>
#include "display/intel_frontbuffer.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
index e5e870b6f186..9fbf14867a2a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
@@ -89,6 +89,7 @@ i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj,
if (!front) {
RCU_INIT_POINTER(obj->frontbuffer, NULL);
+ drm_gem_object_put(intel_bo_to_drm_bo(obj));
} else if (rcu_access_pointer(obj->frontbuffer)) {
cur = rcu_dereference_protected(obj->frontbuffer, true);
kref_get(&cur->ref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index d68675925b79..1d97c435a015 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -10,6 +10,7 @@
#include "i915_request.h"
#include "intel_engine_types.h"
#include "intel_wakeref.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index ed32bf5b1546..ba1186fc524f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -982,8 +982,6 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
err:
i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
- intel_gt_release_all(i915);
-
return ret;
}
@@ -1002,15 +1000,6 @@ int intel_gt_tiles_init(struct drm_i915_private *i915)
return 0;
}
-void intel_gt_release_all(struct drm_i915_private *i915)
-{
- struct intel_gt *gt;
- unsigned int id;
-
- for_each_gt(gt, i915, id)
- i915->gt[id] = NULL;
-}
-
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 970bedf6b78a..e1f13735f530 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -167,6 +167,20 @@ void intel_gt_release_all(struct drm_i915_private *i915);
(id__)++) \
for_each_if(((gt__) = (i915__)->gt[(id__)]))
+/* Simple iterator over all initialised engines */
+#define for_each_engine(engine__, gt__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_NUM_ENGINES; \
+ (id__)++) \
+ for_each_if ((engine__) = (gt__)->engine[(id__)])
+
+/* Iterator over subset of engines selected by mask */
+#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
+ for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
+ (tmp__) ? \
+ ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
+ 0;)
+
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
index 8f9b874fdc9c..3aa1d014c14d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
@@ -6,8 +6,8 @@
#include <drm/drm_print.h>
-#include "i915_drv.h" /* for_each_engine! */
#include "intel_engine.h"
+#include "intel_gt.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_engines_debugfs.h"
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 05f9348b7a9d..d4a3f3e093b0 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -3047,7 +3047,7 @@ put_obj:
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- u32 per_ctx_start[CACHELINE_DWORDS] = {0};
+ u32 per_ctx_start[CACHELINE_DWORDS] = {};
unsigned char *bb_start_sva;
if (!wa_ctx->per_ctx.valid)
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 835c3fde8a20..313efdabee57 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -56,7 +56,7 @@ static const struct pixel_format bdw_pixel_formats[] = {
{DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
/* non-supported format has bpp default to 0 */
- {0, 0, NULL},
+ {}
};
static const struct pixel_format skl_pixel_formats[] = {
@@ -76,7 +76,7 @@ static const struct pixel_format skl_pixel_formats[] = {
{DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
/* non-supported format has bpp default to 0 */
- {0, 0, NULL},
+ {}
};
static int bdw_format_to_drm(int format)
@@ -293,7 +293,7 @@ static const struct cursor_mode_format cursor_pixel_formats[] = {
{DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
/* non-supported format has bpp default to 0 */
- {0, 0, 0, 0, NULL},
+ {}
};
static int cursor_mode_to_drm(int mode)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a9f7fa9b90bd..90f6c1ece57d 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -538,7 +538,7 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
- struct dpll clock = {0};
+ struct dpll clock = {};
u32 temp;
/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
@@ -2576,7 +2576,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
static int init_skl_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e9b79c2c37d8..bfe92d2402ea 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,8 @@
#include <drm/drm_debugfs.h>
+#include "display/intel_display_params.h"
+
#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
@@ -67,13 +69,13 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
- intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
i915_params_dump(&i915->params, &p);
+ intel_display_params_dump(i915, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 8a0e2c745e1f..2a1faf403965 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -231,16 +231,10 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- mutex_init(&dev_priv->display.backlight.lock);
mutex_init(&dev_priv->sb_lock);
cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
- mutex_init(&dev_priv->display.audio.mutex);
- mutex_init(&dev_priv->display.wm.wm_mutex);
- mutex_init(&dev_priv->display.pps.mutex);
- mutex_init(&dev_priv->display.hdcp.hdcp_mutex);
-
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -782,7 +776,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = i915_driver_mmio_probe(i915);
if (ret < 0)
- goto out_tiles_cleanup;
+ goto out_runtime_pm_put;
ret = i915_driver_hw_probe(i915);
if (ret < 0)
@@ -842,8 +836,6 @@ out_cleanup_hw:
i915_ggtt_driver_late_release(i915);
out_cleanup_mmio:
i915_driver_mmio_release(i915);
-out_tiles_cleanup:
- intel_gt_release_all(i915);
out_runtime_pm_put:
enable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_driver_late_release(i915);
@@ -909,6 +901,8 @@ static void i915_driver_release(struct drm_device *dev)
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
+
+ intel_display_device_remove(dev_priv);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index dd452c220df7..861567362abd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -396,20 +396,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
return i915->gt[0];
}
-/* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, gt__, id__) \
- for ((id__) = 0; \
- (id__) < I915_NUM_ENGINES; \
- (id__)++) \
- for_each_if ((engine__) = (gt__)->engine[(id__)])
-
-/* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
- for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
- (tmp__) ? \
- ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
- 0;)
-
#define rb_to_uabi_engine(rb) \
rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
@@ -418,11 +404,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
(engine__); \
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-#define for_each_uabi_class_engine(engine__, class__, i915__) \
- for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
- (engine__) && (engine__)->uabi_class == (class__); \
- (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-
#define INTEL_INFO(i915) ((i915)->__info)
#define RUNTIME_INFO(i915) (&(i915)->__runtime)
#define DRIVER_CAPS(i915) (&(i915)->caps)
@@ -575,6 +556,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
#define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO)
#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
+#define IS_LUNARLAKE(i915) 0
#define IS_DG2_G10(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c166ad5e187a..92758b6b41f0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1306,8 +1306,6 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
i915_gem_init__mm(dev_priv);
i915_gem_init__contexts(dev_priv);
-
- spin_lock_init(&dev_priv->display.fb_tracking.lock);
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b4e31e59c799..0971f4976324 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -660,6 +660,7 @@ static void err_print_params(struct drm_i915_error_state_buf *m,
struct drm_printer p = i915_error_printer(m);
i915_params_dump(params, &p);
+ intel_display_params_dump(m->i915, &p);
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
@@ -1027,6 +1028,7 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
static void cleanup_params(struct i915_gpu_coredump *error)
{
i915_params_free(&error->params);
+ intel_display_params_free(&error->display_params);
}
static void cleanup_uc(struct intel_uc_coredump *uc)
@@ -1988,6 +1990,7 @@ static void capture_gen(struct i915_gpu_coredump *error)
error->suspend_count = i915->suspend_count;
i915_params_copy(&error->params, &i915->params);
+ intel_display_params_copy(&error->display_params);
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
@@ -2174,7 +2177,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
- pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
+ pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n");
pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 9f5971f5e980..4ce227f7e1e1 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -15,6 +15,7 @@
#include <drm/drm_mm.h>
#include "display/intel_display_device.h"
+#include "display/intel_display_params.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
#include "gt/uc/intel_uc_fw.h"
@@ -214,6 +215,7 @@ struct i915_gpu_coredump {
struct intel_display_runtime_info display_runtime_info;
struct intel_driver_caps driver_caps;
struct i915_params params;
+ struct intel_display_params display_params;
struct intel_overlay_error_state *overlay;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 036c4c3ed6ed..de43048543e8 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -67,33 +67,9 @@ i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
-i915_param_named_unsafe(enable_dc, int, 0400,
- "Enable power-saving display C-states. "
- "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
- "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
-
-i915_param_named_unsafe(enable_fbc, int, 0400,
- "Enable frame buffer compression for power savings "
- "(default: -1 (use per-chip default))");
-
-i915_param_named_unsafe(lvds_channel_mode, int, 0400,
- "Specify LVDS channel mode "
- "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-
-i915_param_named_unsafe(panel_use_ssc, int, 0400,
- "Use Spread Spectrum Clock with panels [LVDS/eDP] "
- "(default: auto from VBT)");
-
-i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
- "Override/Ignore selection of SDVO panel mode in the VBT "
- "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-
i915_param_named_unsafe(reset, uint, 0400,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
-i915_param_named_unsafe(vbt_firmware, charp, 0400,
- "Load VBT from specified file under /lib/firmware");
-
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
i915_param_named(error_capture, bool, 0400,
"Record the GPU state following a hang. "
@@ -106,55 +82,10 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0400,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-i915_param_named_unsafe(enable_psr, int, 0400,
- "Enable PSR "
- "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
- "Default: -1 (use per-chip default)");
-
-i915_param_named(psr_safest_params, bool, 0400,
- "Replace PSR VBT parameters by the safest and not optimal ones. This "
- "is helpful to detect if PSR issues are related to bad values set in "
- " VBT. (0=use VBT parameters, 1=use safest parameters)");
-
-i915_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
- "Enable PSR2 selective fetch "
- "(0=disabled, 1=enabled) "
- "Default: 0");
-
-i915_param_named_unsafe(enable_sagv, bool, 0600,
- "Enable system agent voltage/frequency scaling (SAGV) (default: true)");
-
i915_param_named_unsafe(force_probe, charp, 0400,
"Force probe options for specified supported devices. "
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
-i915_param_named_unsafe(disable_power_well, int, 0400,
- "Disable display power wells when possible "
- "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
-
-i915_param_named_unsafe(enable_ips, int, 0400, "Enable IPS (default: true)");
-
-i915_param_named_unsafe(enable_dpt, bool, 0400,
- "Enable display page table (DPT) (default: true)");
-
-i915_param_named_unsafe(load_detect_test, bool, 0400,
- "Force-enable the VGA load detect code for testing (default:false). "
- "For developers only.");
-
-i915_param_named_unsafe(force_reset_modeset_test, bool, 0400,
- "Force a modeset during gpu reset for testing (default:false). "
- "For developers only.");
-
-i915_param_named_unsafe(invert_brightness, int, 0400,
- "Invert backlight brightness "
- "(-1 force normal, 0 machine defaults, 1 force inversion), please "
- "report PCI device ID, subsystem vendor and subsystem device ID "
- "to dri-devel@lists.freedesktop.org, if your machine needs it. "
- "It will then be included in an upcoming module version.");
-
-i915_param_named(disable_display, bool, 0400,
- "Disable display (default: false)");
-
i915_param_named(memtest, bool, 0400,
"Perform a read/write test of all device memory on module load (default: off)");
@@ -162,19 +93,6 @@ i915_param_named(mmio_debug, int, 0400,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
-/* Special case writable file */
-i915_param_named(verbose_state_checks, bool, 0600,
- "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
-
-i915_param_named_unsafe(nuclear_pageflip, bool, 0400,
- "Force enable atomic functionality on platforms that don't have full support yet.");
-
-/* WA to get away with the default setting in VBT for early platforms.Will be removed */
-i915_param_named_unsafe(edp_vswing, int, 0400,
- "Ignore/Override vswing pre-emph table selection from VBT "
- "(0=use value from vbt [default], 1=low power swing(200mV),"
- "2=default swing(400mV))");
-
i915_param_named_unsafe(enable_guc, int, 0400,
"Enable GuC load for GuC submission and/or HuC load. "
"Required functionality can be selected using bitmask values. "
@@ -196,18 +114,11 @@ i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
i915_param_named_unsafe(gsc_firmware_path, charp, 0400,
"GSC firmware path to use instead of the default one");
-i915_param_named_unsafe(enable_dp_mst, bool, 0400,
- "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
-
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
i915_param_named_unsafe(inject_probe_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
-i915_param_named(enable_dpcd_backlight, int, 0400,
- "Enable support for DPCD backlight control"
- "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
-
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index d5194b039aab..1315d7fac850 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -46,21 +46,7 @@ struct drm_printer;
* debugfs file
*/
#define I915_PARAMS_FOR_EACH(param) \
- param(char *, vbt_firmware, NULL, 0400) \
param(int, modeset, -1, 0400) \
- param(int, lvds_channel_mode, 0, 0400) \
- param(int, panel_use_ssc, -1, 0600) \
- param(int, vbt_sdvo_panel_type, -1, 0400) \
- param(int, enable_dc, -1, 0400) \
- param(int, enable_fbc, -1, 0600) \
- param(int, enable_psr, -1, 0600) \
- param(bool, enable_dpt, true, 0400) \
- param(bool, psr_safest_params, false, 0400) \
- param(bool, enable_psr2_sel_fetch, true, 0400) \
- param(bool, enable_sagv, true, 0600) \
- param(int, disable_power_well, -1, 0400) \
- param(int, enable_ips, 1, 0600) \
- param(int, invert_brightness, 0, 0600) \
param(int, enable_guc, -1, 0400) \
param(int, guc_log_level, -1, 0400) \
param(char *, guc_firmware_path, NULL, 0400) \
@@ -69,23 +55,15 @@ struct drm_printer;
param(char *, gsc_firmware_path, NULL, 0400) \
param(bool, memtest, false, 0400) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
- param(int, edp_vswing, 0, 0400) \
param(unsigned int, reset, 3, 0600) \
param(unsigned int, inject_probe_failure, 0, 0) \
- param(int, enable_dpcd_backlight, -1, 0600) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \
param(unsigned int, lmem_size, 0, 0400) \
param(unsigned int, lmem_bar_size, 0, 0400) \
/* leave bools at the end to not create holes */ \
param(bool, enable_hangcheck, true, 0600) \
- param(bool, load_detect_test, false, 0600) \
- param(bool, force_reset_modeset_test, false, 0600) \
param(bool, error_capture, true, IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) ? 0600 : 0) \
- param(bool, disable_display, false, 0400) \
- param(bool, verbose_state_checks, true, 0) \
- param(bool, nuclear_pageflip, false, 0400) \
- param(bool, enable_dp_mst, true, 0600) \
param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0)
#define MEMBER(T, member, ...) T member;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 135e8d8dbdf0..27dc903f0553 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -195,8 +195,6 @@
#define DPIO_SFR_BYPASS (1 << 1)
#define DPIO_CMNRST (1 << 0)
-#define DPIO_PHY(pipe) ((pipe) >> 1)
-
/*
* Per pipe/PLL DPIO regs
*/
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index c61066498bf2..f98577967b7f 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -40,7 +40,7 @@
struct drm_i915_private;
struct timer_list;
-#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
+#define FDO_BUG_URL "https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html"
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index f79cda7a2503..be43614c73fd 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -11,8 +11,6 @@
#include "intel_wakeref.h"
-#include "i915_utils.h"
-
struct device;
struct drm_i915_private;
struct drm_printer;
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 03ea75cd84dd..4f98aa8a861e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -24,6 +24,8 @@
#include "../i915_selftest.h"
+#include "gt/intel_gt.h"
+
static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
unsigned int num_ranges,
bool is_watertight)
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
index f32e9f78770a..40874ebfb64c 100644
--- a/drivers/gpu/drm/i915/soc/intel_gmch.c
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -33,18 +33,22 @@ int intel_gmch_bridge_setup(struct drm_i915_private *i915)
i915->gmch.pdev);
}
+static int mchbar_reg(struct drm_i915_private *i915)
+{
+ return GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+}
+
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_i915_private *i915)
{
- int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
if (GRAPHICS_VER(i915) >= 4)
- pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi);
- pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4, &temp_hi);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
@@ -68,10 +72,10 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915)
}
if (GRAPHICS_VER(i915) >= 4)
- pci_write_config_dword(i915->gmch.pdev, reg + 4,
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4,
upper_32_bits(i915->gmch.mch_res.start));
- pci_write_config_dword(i915->gmch.pdev, reg,
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915),
lower_32_bits(i915->gmch.mch_res.start));
return 0;
}
@@ -79,7 +83,6 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915)
/* Setup MCHBAR if possible, return true if we should disable it again */
void intel_gmch_bar_setup(struct drm_i915_private *i915)
{
- int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -92,7 +95,7 @@ void intel_gmch_bar_setup(struct drm_i915_private *i915)
pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
- pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp);
enabled = temp & 1;
}
@@ -110,15 +113,13 @@ void intel_gmch_bar_setup(struct drm_i915_private *i915)
pci_write_config_dword(i915->gmch.pdev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
- pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
- pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp);
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915), temp | 1);
}
}
void intel_gmch_bar_teardown(struct drm_i915_private *i915)
{
- int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-
if (i915->gmch.mchbar_need_disable) {
if (IS_I915G(i915) || IS_I915GM(i915)) {
u32 deven_val;
@@ -131,10 +132,10 @@ void intel_gmch_bar_teardown(struct drm_i915_private *i915)
} else {
u32 mchbar_val;
- pci_read_config_dword(i915->gmch.pdev, mchbar_reg,
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915),
&mchbar_val);
mchbar_val &= ~1;
- pci_write_config_dword(i915->gmch.pdev, mchbar_reg,
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915),
mchbar_val);
}
}
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index b98dec3ad817..ffa195560d0d 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -166,23 +166,6 @@ u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
return val;
}
-u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
- SB_CRWRDA_NP, reg, &val);
-}
-
u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
{
u32 val = 0;
@@ -227,9 +210,9 @@ static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy p
return IOSF_PORT_DPIO;
}
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg)
{
- u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
+ u32 port = vlv_dpio_phy_iosf_port(i915, phy);
u32 val = 0;
vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
@@ -239,16 +222,16 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
* so ideally we should check the register offset instead...
*/
drm_WARN(&i915->drm, val == 0xffffffff,
- "DPIO read pipe %c reg 0x%x == 0x%x\n",
- pipe_name(pipe), reg, val);
+ "DPIO PHY%d read reg 0x%x == 0x%x\n",
+ phy, reg, val);
return val;
}
void vlv_dpio_write(struct drm_i915_private *i915,
- enum pipe pipe, int reg, u32 val)
+ enum dpio_phy phy, int reg, u32 val)
{
- u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
+ u32 port = vlv_dpio_phy_iosf_port(i915, phy);
vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
}
diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
index 9ce283d96b80..c20cf41b2d39 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.h
+++ b/drivers/gpu/drm/i915/vlv_sideband.h
@@ -11,7 +11,7 @@
#include "vlv_sideband_reg.h"
-enum pipe;
+enum dpio_phy;
struct drm_i915_private;
enum {
@@ -26,9 +26,6 @@ enum {
};
void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
-u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg);
-void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val);
void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
static inline void vlv_bunit_get(struct drm_i915_private *i915)
@@ -75,9 +72,9 @@ static inline void vlv_dpio_get(struct drm_i915_private *i915)
vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_DPIO));
}
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg);
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg);
void vlv_dpio_write(struct drm_i915_private *i915,
- enum pipe pipe, int reg, u32 val);
+ enum dpio_phy phy, int reg, u32 val);
static inline void vlv_dpio_put(struct drm_i915_private *i915)
{
diff --git a/drivers/gpu/drm/imagination/Kconfig b/drivers/gpu/drm/imagination/Kconfig
new file mode 100644
index 000000000000..3bfa2ac212dc
--- /dev/null
+++ b/drivers/gpu/drm/imagination/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+# Copyright (c) 2023 Imagination Technologies Ltd.
+
+config DRM_POWERVR
+ tristate "Imagination Technologies PowerVR (Series 6 and later) & IMG Graphics"
+ depends on ARM64
+ depends on DRM
+ depends on PM
+ select DRM_EXEC
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_SCHED
+ select DRM_GPUVM
+ select FW_LOADER
+ help
+ Choose this option if you have a system that has an Imagination
+ Technologies PowerVR (Series 6 or later) or IMG GPU.
+
+ If "M" is selected, the module will be called powervr.
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
new file mode 100644
index 000000000000..ec6db8e9b403
--- /dev/null
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+# Copyright (c) 2023 Imagination Technologies Ltd.
+
+subdir-ccflags-y := -I$(srctree)/$(src)
+
+powervr-y := \
+ pvr_ccb.o \
+ pvr_cccb.o \
+ pvr_context.o \
+ pvr_device.o \
+ pvr_device_info.o \
+ pvr_drv.o \
+ pvr_free_list.o \
+ pvr_fw.o \
+ pvr_fw_meta.o \
+ pvr_fw_mips.o \
+ pvr_fw_startstop.o \
+ pvr_fw_trace.o \
+ pvr_gem.o \
+ pvr_hwrt.o \
+ pvr_job.o \
+ pvr_mmu.o \
+ pvr_params.o \
+ pvr_power.o \
+ pvr_queue.o \
+ pvr_stream.o \
+ pvr_stream_defs.o \
+ pvr_sync.o \
+ pvr_vm.o \
+ pvr_vm_mips.o
+
+powervr-$(CONFIG_DEBUG_FS) += \
+ pvr_debugfs.o
+
+obj-$(CONFIG_DRM_POWERVR) += powervr.o
diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c
new file mode 100644
index 000000000000..4deeac7ed40a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_ccb.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_ccb.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_free_list.h"
+#include "pvr_fw.h"
+#include "pvr_gem.h"
+#include "pvr_power.h"
+
+#include <drm/drm_managed.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#define RESERVE_SLOT_TIMEOUT (1 * HZ) /* 1s */
+#define RESERVE_SLOT_MIN_RETRIES 10
+
+static void
+ccb_ctrl_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_ccb_ctl *ctrl = cpu_ptr;
+ struct pvr_ccb *pvr_ccb = priv;
+
+ ctrl->write_offset = 0;
+ ctrl->read_offset = 0;
+ ctrl->wrap_mask = pvr_ccb->num_cmds - 1;
+ ctrl->cmd_size = pvr_ccb->cmd_size;
+}
+
+/**
+ * pvr_ccb_init() - Initialise a CCB
+ * @pvr_dev: Device pointer.
+ * @pvr_ccb: Pointer to CCB structure to initialise.
+ * @num_cmds_log2: Log2 of number of commands in this CCB.
+ * @cmd_size: Command size for this CCB.
+ *
+ * Return:
+ * * Zero on success, or
+ * * Any error code returned by pvr_fw_object_create_and_map().
+ */
+static int
+pvr_ccb_init(struct pvr_device *pvr_dev, struct pvr_ccb *pvr_ccb,
+ u32 num_cmds_log2, size_t cmd_size)
+{
+ u32 num_cmds = 1 << num_cmds_log2;
+ u32 ccb_size = num_cmds * cmd_size;
+ int err;
+
+ pvr_ccb->num_cmds = num_cmds;
+ pvr_ccb->cmd_size = cmd_size;
+
+ err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_ccb->lock);
+ if (err)
+ return err;
+
+ /*
+ * Map CCB and control structure as uncached, so we don't have to flush
+ * CPU cache repeatedly when polling for space.
+ */
+ pvr_ccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_ccb->ctrl),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ ccb_ctrl_init, pvr_ccb, &pvr_ccb->ctrl_obj);
+ if (IS_ERR(pvr_ccb->ctrl))
+ return PTR_ERR(pvr_ccb->ctrl);
+
+ pvr_ccb->ccb = pvr_fw_object_create_and_map(pvr_dev, ccb_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &pvr_ccb->ccb_obj);
+ if (IS_ERR(pvr_ccb->ccb)) {
+ err = PTR_ERR(pvr_ccb->ccb);
+ goto err_free_ctrl;
+ }
+
+ pvr_fw_object_get_fw_addr(pvr_ccb->ctrl_obj, &pvr_ccb->ctrl_fw_addr);
+ pvr_fw_object_get_fw_addr(pvr_ccb->ccb_obj, &pvr_ccb->ccb_fw_addr);
+
+ WRITE_ONCE(pvr_ccb->ctrl->write_offset, 0);
+ WRITE_ONCE(pvr_ccb->ctrl->read_offset, 0);
+ WRITE_ONCE(pvr_ccb->ctrl->wrap_mask, num_cmds - 1);
+ WRITE_ONCE(pvr_ccb->ctrl->cmd_size, cmd_size);
+
+ return 0;
+
+err_free_ctrl:
+ pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
+
+ return err;
+}
+
+/**
+ * pvr_ccb_fini() - Release CCB structure
+ * @pvr_ccb: CCB to release.
+ */
+void
+pvr_ccb_fini(struct pvr_ccb *pvr_ccb)
+{
+ pvr_fw_object_unmap_and_destroy(pvr_ccb->ccb_obj);
+ pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
+}
+
+/**
+ * pvr_ccb_slot_available_locked() - Test whether any slots are available in CCB
+ * @pvr_ccb: CCB to test.
+ * @write_offset: Address to store number of next available slot. May be %NULL.
+ *
+ * Caller must hold @pvr_ccb->lock.
+ *
+ * Return:
+ * * %true if a slot is available, or
+ * * %false if no slot is available.
+ */
+static __always_inline bool
+pvr_ccb_slot_available_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset)
+{
+ struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
+ u32 next_write_offset = (READ_ONCE(ctrl->write_offset) + 1) & READ_ONCE(ctrl->wrap_mask);
+
+ lockdep_assert_held(&pvr_ccb->lock);
+
+ if (READ_ONCE(ctrl->read_offset) != next_write_offset) {
+ if (write_offset)
+ *write_offset = next_write_offset;
+ return true;
+ }
+
+ return false;
+}
+
+static void
+process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd)
+{
+ switch (cmd->cmd_type) {
+ case ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
+ pvr_power_reset(pvr_dev, false);
+ break;
+
+ case ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
+ pvr_free_list_process_reconstruct_req(pvr_dev,
+ &cmd->cmd_data.cmd_freelists_reconstruction);
+ break;
+
+ case ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW:
+ pvr_free_list_process_grow_req(pvr_dev, &cmd->cmd_data.cmd_free_list_gs);
+ break;
+
+ default:
+ drm_info(from_pvr_device(pvr_dev), "Received unknown FWCCB command %x\n",
+ cmd->cmd_type);
+ break;
+ }
+}
+
+/**
+ * pvr_fwccb_process() - Process any pending FWCCB commands
+ * @pvr_dev: Target PowerVR device
+ */
+void pvr_fwccb_process(struct pvr_device *pvr_dev)
+{
+ struct rogue_fwif_fwccb_cmd *fwccb = pvr_dev->fwccb.ccb;
+ struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->fwccb.ctrl;
+ u32 read_offset;
+
+ mutex_lock(&pvr_dev->fwccb.lock);
+
+ while ((read_offset = READ_ONCE(ctrl->read_offset)) != READ_ONCE(ctrl->write_offset)) {
+ struct rogue_fwif_fwccb_cmd cmd = fwccb[read_offset];
+
+ WRITE_ONCE(ctrl->read_offset, (read_offset + 1) & READ_ONCE(ctrl->wrap_mask));
+
+ /* Drop FWCCB lock while we process command. */
+ mutex_unlock(&pvr_dev->fwccb.lock);
+
+ process_fwccb_command(pvr_dev, &cmd);
+
+ mutex_lock(&pvr_dev->fwccb.lock);
+ }
+
+ mutex_unlock(&pvr_dev->fwccb.lock);
+}
+
+/**
+ * pvr_kccb_capacity() - Returns the maximum number of usable KCCB slots.
+ * @pvr_dev: Target PowerVR device
+ *
+ * Return:
+ * * The maximum number of active slots.
+ */
+static u32 pvr_kccb_capacity(struct pvr_device *pvr_dev)
+{
+ /* Capacity is the number of slot minus one to cope with the wrapping
+ * mechanisms. If we were to use all slots, we might end up with
+ * read_offset == write_offset, which the FW considers as a KCCB-is-empty
+ * condition.
+ */
+ return pvr_dev->kccb.slot_count - 1;
+}
+
+/**
+ * pvr_kccb_used_slot_count_locked() - Get the number of used slots
+ * @pvr_dev: Device pointer.
+ *
+ * KCCB lock must be held.
+ *
+ * Return:
+ * * The number of slots currently used.
+ */
+static u32
+pvr_kccb_used_slot_count_locked(struct pvr_device *pvr_dev)
+{
+ struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
+ struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
+ u32 wr_offset = READ_ONCE(ctrl->write_offset);
+ u32 rd_offset = READ_ONCE(ctrl->read_offset);
+ u32 used_count;
+
+ lockdep_assert_held(&pvr_ccb->lock);
+
+ if (wr_offset >= rd_offset)
+ used_count = wr_offset - rd_offset;
+ else
+ used_count = wr_offset + pvr_dev->kccb.slot_count - rd_offset;
+
+ return used_count;
+}
+
+/**
+ * pvr_kccb_send_cmd_reserved_powered() - Send command to the KCCB, with the PM ref
+ * held and a slot pre-reserved
+ * @pvr_dev: Device pointer.
+ * @cmd: Command to sent.
+ * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
+ */
+void
+pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev,
+ struct rogue_fwif_kccb_cmd *cmd,
+ u32 *kccb_slot)
+{
+ struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
+ struct rogue_fwif_kccb_cmd *kccb = pvr_ccb->ccb;
+ struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
+ u32 old_write_offset;
+ u32 new_write_offset;
+
+ WARN_ON(pvr_dev->lost);
+
+ mutex_lock(&pvr_ccb->lock);
+
+ if (WARN_ON(!pvr_dev->kccb.reserved_count))
+ goto out_unlock;
+
+ old_write_offset = READ_ONCE(ctrl->write_offset);
+
+ /* We reserved the slot, we should have one available. */
+ if (WARN_ON(!pvr_ccb_slot_available_locked(pvr_ccb, &new_write_offset)))
+ goto out_unlock;
+
+ memcpy(&kccb[old_write_offset], cmd,
+ sizeof(struct rogue_fwif_kccb_cmd));
+ if (kccb_slot) {
+ *kccb_slot = old_write_offset;
+ /* Clear return status for this slot. */
+ WRITE_ONCE(pvr_dev->kccb.rtn[old_write_offset],
+ ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE);
+ }
+ mb(); /* memory barrier */
+ WRITE_ONCE(ctrl->write_offset, new_write_offset);
+ pvr_dev->kccb.reserved_count--;
+
+ /* Kick MTS */
+ pvr_fw_mts_schedule(pvr_dev,
+ PVR_FWIF_DM_GP & ~ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK);
+
+out_unlock:
+ mutex_unlock(&pvr_ccb->lock);
+}
+
+/**
+ * pvr_kccb_try_reserve_slot() - Try to reserve a KCCB slot
+ * @pvr_dev: Device pointer.
+ *
+ * Return:
+ * * true if a KCCB slot was reserved, or
+ * * false otherwise.
+ */
+static bool pvr_kccb_try_reserve_slot(struct pvr_device *pvr_dev)
+{
+ bool reserved = false;
+ u32 used_count;
+
+ mutex_lock(&pvr_dev->kccb.ccb.lock);
+
+ used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
+ if (pvr_dev->kccb.reserved_count < pvr_kccb_capacity(pvr_dev) - used_count) {
+ pvr_dev->kccb.reserved_count++;
+ reserved = true;
+ }
+
+ mutex_unlock(&pvr_dev->kccb.ccb.lock);
+
+ return reserved;
+}
+
+/**
+ * pvr_kccb_reserve_slot_sync() - Try to reserve a slot synchronously
+ * @pvr_dev: Device pointer.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -EBUSY if no slots were reserved after %RESERVE_SLOT_TIMEOUT, with a minimum of
+ * %RESERVE_SLOT_MIN_RETRIES retries.
+ */
+static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev)
+{
+ unsigned long start_timestamp = jiffies;
+ bool reserved = false;
+ u32 retries = 0;
+
+ while ((jiffies - start_timestamp) < (u32)RESERVE_SLOT_TIMEOUT ||
+ retries < RESERVE_SLOT_MIN_RETRIES) {
+ reserved = pvr_kccb_try_reserve_slot(pvr_dev);
+ if (reserved)
+ break;
+
+ usleep_range(1, 50);
+
+ if (retries < U32_MAX)
+ retries++;
+ }
+
+ return reserved ? 0 : -EBUSY;
+}
+
+/**
+ * pvr_kccb_send_cmd_powered() - Send command to the KCCB, with a PM ref held
+ * @pvr_dev: Device pointer.
+ * @cmd: Command to sent.
+ * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
+ *
+ * Returns:
+ * * Zero on success, or
+ * * -EBUSY if timeout while waiting for a free KCCB slot.
+ */
+int
+pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
+ u32 *kccb_slot)
+{
+ int err;
+
+ err = pvr_kccb_reserve_slot_sync(pvr_dev);
+ if (err)
+ return err;
+
+ pvr_kccb_send_cmd_reserved_powered(pvr_dev, cmd, kccb_slot);
+ return 0;
+}
+
+/**
+ * pvr_kccb_send_cmd() - Send command to the KCCB
+ * @pvr_dev: Device pointer.
+ * @cmd: Command to sent.
+ * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
+ *
+ * Returns:
+ * * Zero on success, or
+ * * -EBUSY if timeout while waiting for a free KCCB slot.
+ */
+int
+pvr_kccb_send_cmd(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
+ u32 *kccb_slot)
+{
+ int err;
+
+ err = pvr_power_get(pvr_dev);
+ if (err)
+ return err;
+
+ err = pvr_kccb_send_cmd_powered(pvr_dev, cmd, kccb_slot);
+
+ pvr_power_put(pvr_dev);
+
+ return err;
+}
+
+/**
+ * pvr_kccb_wait_for_completion() - Wait for a KCCB command to complete
+ * @pvr_dev: Device pointer.
+ * @slot_nr: KCCB slot to wait on.
+ * @timeout: Timeout length (in jiffies).
+ * @rtn_out: Location to store KCCB command result. May be %NULL.
+ *
+ * Returns:
+ * * Zero on success, or
+ * * -ETIMEDOUT on timeout.
+ */
+int
+pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr,
+ u32 timeout, u32 *rtn_out)
+{
+ int ret = wait_event_timeout(pvr_dev->kccb.rtn_q, READ_ONCE(pvr_dev->kccb.rtn[slot_nr]) &
+ ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED, timeout);
+
+ if (ret && rtn_out)
+ *rtn_out = READ_ONCE(pvr_dev->kccb.rtn[slot_nr]);
+
+ return ret ? 0 : -ETIMEDOUT;
+}
+
+/**
+ * pvr_kccb_is_idle() - Returns whether the device's KCCB is idle
+ * @pvr_dev: Device pointer
+ *
+ * Returns:
+ * * %true if the KCCB is idle (contains no commands), or
+ * * %false if the KCCB contains pending commands.
+ */
+bool
+pvr_kccb_is_idle(struct pvr_device *pvr_dev)
+{
+ struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->kccb.ccb.ctrl;
+ bool idle;
+
+ mutex_lock(&pvr_dev->kccb.ccb.lock);
+
+ idle = (READ_ONCE(ctrl->write_offset) == READ_ONCE(ctrl->read_offset));
+
+ mutex_unlock(&pvr_dev->kccb.ccb.lock);
+
+ return idle;
+}
+
+static const char *
+pvr_kccb_fence_get_driver_name(struct dma_fence *f)
+{
+ return PVR_DRIVER_NAME;
+}
+
+static const char *
+pvr_kccb_fence_get_timeline_name(struct dma_fence *f)
+{
+ return "kccb";
+}
+
+static const struct dma_fence_ops pvr_kccb_fence_ops = {
+ .get_driver_name = pvr_kccb_fence_get_driver_name,
+ .get_timeline_name = pvr_kccb_fence_get_timeline_name,
+};
+
+/**
+ * struct pvr_kccb_fence - Fence object used to wait for a KCCB slot
+ */
+struct pvr_kccb_fence {
+ /** @base: Base dma_fence object. */
+ struct dma_fence base;
+
+ /** @node: Node used to insert the fence in the pvr_device::kccb::waiters list. */
+ struct list_head node;
+};
+
+/**
+ * pvr_kccb_wake_up_waiters() - Check the KCCB waiters
+ * @pvr_dev: Target PowerVR device
+ *
+ * Signal as many KCCB fences as we have slots available.
+ */
+void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev)
+{
+ struct pvr_kccb_fence *fence, *tmp_fence;
+ u32 used_count, available_count;
+
+ /* Wake up those waiting for KCCB slot execution. */
+ wake_up_all(&pvr_dev->kccb.rtn_q);
+
+ /* Then iterate over all KCCB fences and signal as many as we can. */
+ mutex_lock(&pvr_dev->kccb.ccb.lock);
+ used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
+
+ if (WARN_ON(used_count + pvr_dev->kccb.reserved_count > pvr_kccb_capacity(pvr_dev)))
+ goto out_unlock;
+
+ available_count = pvr_kccb_capacity(pvr_dev) - used_count - pvr_dev->kccb.reserved_count;
+ list_for_each_entry_safe(fence, tmp_fence, &pvr_dev->kccb.waiters, node) {
+ if (!available_count)
+ break;
+
+ list_del(&fence->node);
+ pvr_dev->kccb.reserved_count++;
+ available_count--;
+ dma_fence_signal(&fence->base);
+ dma_fence_put(&fence->base);
+ }
+
+out_unlock:
+ mutex_unlock(&pvr_dev->kccb.ccb.lock);
+}
+
+/**
+ * pvr_kccb_fini() - Cleanup device KCCB
+ * @pvr_dev: Target PowerVR device
+ */
+void pvr_kccb_fini(struct pvr_device *pvr_dev)
+{
+ pvr_ccb_fini(&pvr_dev->kccb.ccb);
+ WARN_ON(!list_empty(&pvr_dev->kccb.waiters));
+ WARN_ON(pvr_dev->kccb.reserved_count);
+}
+
+/**
+ * pvr_kccb_init() - Initialise device KCCB
+ * @pvr_dev: Target PowerVR device
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_ccb_init().
+ */
+int
+pvr_kccb_init(struct pvr_device *pvr_dev)
+{
+ pvr_dev->kccb.slot_count = 1 << ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
+ INIT_LIST_HEAD(&pvr_dev->kccb.waiters);
+ pvr_dev->kccb.fence_ctx.id = dma_fence_context_alloc(1);
+ spin_lock_init(&pvr_dev->kccb.fence_ctx.lock);
+
+ return pvr_ccb_init(pvr_dev, &pvr_dev->kccb.ccb,
+ ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT,
+ sizeof(struct rogue_fwif_kccb_cmd));
+}
+
+/**
+ * pvr_kccb_fence_alloc() - Allocate a pvr_kccb_fence object
+ *
+ * Return:
+ * * NULL if the allocation fails, or
+ * * A valid dma_fence pointer otherwise.
+ */
+struct dma_fence *pvr_kccb_fence_alloc(void)
+{
+ struct pvr_kccb_fence *kccb_fence;
+
+ kccb_fence = kzalloc(sizeof(*kccb_fence), GFP_KERNEL);
+ if (!kccb_fence)
+ return NULL;
+
+ return &kccb_fence->base;
+}
+
+/**
+ * pvr_kccb_fence_put() - Drop a KCCB fence reference
+ * @fence: The fence to drop the reference on.
+ *
+ * If the fence hasn't been initialized yet, dma_fence_free() is called. This
+ * way we have a single function taking care of both cases.
+ */
+void pvr_kccb_fence_put(struct dma_fence *fence)
+{
+ if (!fence)
+ return;
+
+ if (!fence->ops) {
+ dma_fence_free(fence);
+ } else {
+ WARN_ON(fence->ops != &pvr_kccb_fence_ops);
+ dma_fence_put(fence);
+ }
+}
+
+/**
+ * pvr_kccb_reserve_slot() - Reserve a KCCB slot for later use
+ * @pvr_dev: Target PowerVR device
+ * @f: KCCB fence object previously allocated with pvr_kccb_fence_alloc()
+ *
+ * Try to reserve a KCCB slot, and if there's no slot available,
+ * initializes the fence object and queue it to the waiters list.
+ *
+ * If NULL is returned, that means the slot is reserved. In that case,
+ * the @f is freed and shouldn't be accessed after that point.
+ *
+ * Return:
+ * * NULL if a slot was available directly, or
+ * * A valid dma_fence object to wait on if no slot was available.
+ */
+struct dma_fence *
+pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f)
+{
+ struct pvr_kccb_fence *fence = container_of(f, struct pvr_kccb_fence, base);
+ struct dma_fence *out_fence = NULL;
+ u32 used_count;
+
+ mutex_lock(&pvr_dev->kccb.ccb.lock);
+
+ used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
+ if (pvr_dev->kccb.reserved_count >= pvr_kccb_capacity(pvr_dev) - used_count) {
+ dma_fence_init(&fence->base, &pvr_kccb_fence_ops,
+ &pvr_dev->kccb.fence_ctx.lock,
+ pvr_dev->kccb.fence_ctx.id,
+ atomic_inc_return(&pvr_dev->kccb.fence_ctx.seqno));
+ out_fence = dma_fence_get(&fence->base);
+ list_add_tail(&fence->node, &pvr_dev->kccb.waiters);
+ } else {
+ pvr_kccb_fence_put(f);
+ pvr_dev->kccb.reserved_count++;
+ }
+
+ mutex_unlock(&pvr_dev->kccb.ccb.lock);
+
+ return out_fence;
+}
+
+/**
+ * pvr_kccb_release_slot() - Release a KCCB slot reserved with
+ * pvr_kccb_reserve_slot()
+ * @pvr_dev: Target PowerVR device
+ *
+ * Should only be called if something failed after the
+ * pvr_kccb_reserve_slot() call and you know you won't call
+ * pvr_kccb_send_cmd_reserved().
+ */
+void pvr_kccb_release_slot(struct pvr_device *pvr_dev)
+{
+ mutex_lock(&pvr_dev->kccb.ccb.lock);
+ if (!WARN_ON(!pvr_dev->kccb.reserved_count))
+ pvr_dev->kccb.reserved_count--;
+ mutex_unlock(&pvr_dev->kccb.ccb.lock);
+}
+
+/**
+ * pvr_fwccb_init() - Initialise device FWCCB
+ * @pvr_dev: Target PowerVR device
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_ccb_init().
+ */
+int
+pvr_fwccb_init(struct pvr_device *pvr_dev)
+{
+ return pvr_ccb_init(pvr_dev, &pvr_dev->fwccb,
+ ROGUE_FWIF_FWCCB_NUMCMDS_LOG2,
+ sizeof(struct rogue_fwif_fwccb_cmd));
+}
diff --git a/drivers/gpu/drm/imagination/pvr_ccb.h b/drivers/gpu/drm/imagination/pvr_ccb.h
new file mode 100644
index 000000000000..4c8aef31eeb0
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_ccb.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_CCB_H
+#define PVR_CCB_H
+
+#include "pvr_rogue_fwif.h"
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+struct pvr_ccb {
+ /** @ctrl_obj: FW object representing CCB control structure. */
+ struct pvr_fw_object *ctrl_obj;
+ /** @ccb_obj: FW object representing CCB. */
+ struct pvr_fw_object *ccb_obj;
+
+ /** @ctrl_fw_addr: FW virtual address of CCB control structure. */
+ u32 ctrl_fw_addr;
+ /** @ccb_fw_addr: FW virtual address of CCB. */
+ u32 ccb_fw_addr;
+
+ /** @num_cmds: Number of commands in this CCB. */
+ u32 num_cmds;
+
+ /** @cmd_size: Size of each command in this CCB, in bytes. */
+ u32 cmd_size;
+
+ /** @lock: Mutex protecting @ctrl and @ccb. */
+ struct mutex lock;
+ /**
+ * @ctrl: Kernel mapping of CCB control structure. @lock must be held
+ * when accessing.
+ */
+ struct rogue_fwif_ccb_ctl *ctrl;
+ /** @ccb: Kernel mapping of CCB. @lock must be held when accessing. */
+ void *ccb;
+};
+
+int pvr_kccb_init(struct pvr_device *pvr_dev);
+void pvr_kccb_fini(struct pvr_device *pvr_dev);
+int pvr_fwccb_init(struct pvr_device *pvr_dev);
+void pvr_ccb_fini(struct pvr_ccb *ccb);
+
+void pvr_fwccb_process(struct pvr_device *pvr_dev);
+
+struct dma_fence *pvr_kccb_fence_alloc(void);
+void pvr_kccb_fence_put(struct dma_fence *fence);
+struct dma_fence *
+pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f);
+void pvr_kccb_release_slot(struct pvr_device *pvr_dev);
+int pvr_kccb_send_cmd(struct pvr_device *pvr_dev,
+ struct rogue_fwif_kccb_cmd *cmd, u32 *kccb_slot);
+int pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev,
+ struct rogue_fwif_kccb_cmd *cmd,
+ u32 *kccb_slot);
+void pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev,
+ struct rogue_fwif_kccb_cmd *cmd,
+ u32 *kccb_slot);
+int pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr, u32 timeout,
+ u32 *rtn_out);
+bool pvr_kccb_is_idle(struct pvr_device *pvr_dev);
+void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev);
+
+#endif /* PVR_CCB_H */
diff --git a/drivers/gpu/drm/imagination/pvr_cccb.c b/drivers/gpu/drm/imagination/pvr_cccb.c
new file mode 100644
index 000000000000..4fabab41bea7
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_cccb.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_ccb.h"
+#include "pvr_cccb.h"
+#include "pvr_device.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+static __always_inline u32
+get_ccb_space(u32 w_off, u32 r_off, u32 ccb_size)
+{
+ return (((r_off) - (w_off)) + ((ccb_size) - 1)) & ((ccb_size) - 1);
+}
+
+static void
+cccb_ctrl_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_cccb_ctl *ctrl = cpu_ptr;
+ struct pvr_cccb *pvr_cccb = priv;
+
+ WRITE_ONCE(ctrl->write_offset, 0);
+ WRITE_ONCE(ctrl->read_offset, 0);
+ WRITE_ONCE(ctrl->dep_offset, 0);
+ WRITE_ONCE(ctrl->wrap_mask, pvr_cccb->wrap_mask);
+}
+
+/**
+ * pvr_cccb_init() - Initialise a Client CCB
+ * @pvr_dev: Device pointer.
+ * @pvr_cccb: Pointer to Client CCB structure to initialise.
+ * @size_log2: Log2 size of Client CCB in bytes.
+ * @name: Name of owner of Client CCB. Used for fence context.
+ *
+ * Return:
+ * * Zero on success, or
+ * * Any error code returned by pvr_fw_object_create_and_map().
+ */
+int
+pvr_cccb_init(struct pvr_device *pvr_dev, struct pvr_cccb *pvr_cccb,
+ u32 size_log2, const char *name)
+{
+ size_t size = 1 << size_log2;
+ int err;
+
+ pvr_cccb->size = size;
+ pvr_cccb->write_offset = 0;
+ pvr_cccb->wrap_mask = size - 1;
+
+ /*
+ * Map CCCB and control structure as uncached, so we don't have to flush
+ * CPU cache repeatedly when polling for space.
+ */
+ pvr_cccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_cccb->ctrl),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ cccb_ctrl_init, pvr_cccb,
+ &pvr_cccb->ctrl_obj);
+ if (IS_ERR(pvr_cccb->ctrl))
+ return PTR_ERR(pvr_cccb->ctrl);
+
+ pvr_cccb->cccb = pvr_fw_object_create_and_map(pvr_dev, size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &pvr_cccb->cccb_obj);
+ if (IS_ERR(pvr_cccb->cccb)) {
+ err = PTR_ERR(pvr_cccb->cccb);
+ goto err_free_ctrl;
+ }
+
+ pvr_fw_object_get_fw_addr(pvr_cccb->ctrl_obj, &pvr_cccb->ctrl_fw_addr);
+ pvr_fw_object_get_fw_addr(pvr_cccb->cccb_obj, &pvr_cccb->cccb_fw_addr);
+
+ return 0;
+
+err_free_ctrl:
+ pvr_fw_object_unmap_and_destroy(pvr_cccb->ctrl_obj);
+
+ return err;
+}
+
+/**
+ * pvr_cccb_fini() - Release Client CCB structure
+ * @pvr_cccb: Client CCB to release.
+ */
+void
+pvr_cccb_fini(struct pvr_cccb *pvr_cccb)
+{
+ pvr_fw_object_unmap_and_destroy(pvr_cccb->cccb_obj);
+ pvr_fw_object_unmap_and_destroy(pvr_cccb->ctrl_obj);
+}
+
+/**
+ * pvr_cccb_cmdseq_fits() - Check if a command sequence fits in the CCCB
+ * @pvr_cccb: Target Client CCB.
+ * @size: Size of the command sequence.
+ *
+ * Check if a command sequence fits in the CCCB we have at hand.
+ *
+ * Return:
+ * * true if the command sequence fits in the CCCB, or
+ * * false otherwise.
+ */
+bool pvr_cccb_cmdseq_fits(struct pvr_cccb *pvr_cccb, size_t size)
+{
+ struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl;
+ u32 read_offset, remaining;
+ bool fits = false;
+
+ read_offset = READ_ONCE(ctrl->read_offset);
+ remaining = pvr_cccb->size - pvr_cccb->write_offset;
+
+ /* Always ensure we have enough room for a padding command at the end of the CCCB.
+ * If our command sequence does not fit, reserve the remaining space for a padding
+ * command.
+ */
+ if (size + PADDING_COMMAND_SIZE > remaining)
+ size += remaining;
+
+ if (get_ccb_space(pvr_cccb->write_offset, read_offset, pvr_cccb->size) >= size)
+ fits = true;
+
+ return fits;
+}
+
+/**
+ * pvr_cccb_write_command_with_header() - Write a command + command header to a
+ * Client CCB
+ * @pvr_cccb: Target Client CCB.
+ * @cmd_type: Client CCB command type. Must be one of %ROGUE_FWIF_CCB_CMD_TYPE_*.
+ * @cmd_size: Size of command in bytes.
+ * @cmd_data: Pointer to command to write.
+ * @ext_job_ref: External job reference.
+ * @int_job_ref: Internal job reference.
+ *
+ * Caller must make sure there's enough space in CCCB to queue this command. This
+ * can be done by calling pvr_cccb_cmdseq_fits().
+ *
+ * This function is not protected by any lock. The caller must ensure there's
+ * no concurrent caller, which should be guaranteed by the drm_sched model (job
+ * submission is serialized in drm_sched_main()).
+ */
+void
+pvr_cccb_write_command_with_header(struct pvr_cccb *pvr_cccb, u32 cmd_type, u32 cmd_size,
+ void *cmd_data, u32 ext_job_ref, u32 int_job_ref)
+{
+ u32 sz_with_hdr = pvr_cccb_get_size_of_cmd_with_hdr(cmd_size);
+ struct rogue_fwif_ccb_cmd_header cmd_header = {
+ .cmd_type = cmd_type,
+ .cmd_size = ALIGN(cmd_size, 8),
+ .ext_job_ref = ext_job_ref,
+ .int_job_ref = int_job_ref,
+ };
+ struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl;
+ u32 remaining = pvr_cccb->size - pvr_cccb->write_offset;
+ u32 required_size, cccb_space, read_offset;
+
+ /*
+ * Always ensure we have enough room for a padding command at the end of
+ * the CCCB.
+ */
+ if (remaining < sz_with_hdr + PADDING_COMMAND_SIZE) {
+ /*
+ * Command would need to wrap, so we need to pad the remainder
+ * of the CCCB.
+ */
+ required_size = sz_with_hdr + remaining;
+ } else {
+ required_size = sz_with_hdr;
+ }
+
+ read_offset = READ_ONCE(ctrl->read_offset);
+ cccb_space = get_ccb_space(pvr_cccb->write_offset, read_offset, pvr_cccb->size);
+ if (WARN_ON(cccb_space < required_size))
+ return;
+
+ if (required_size != sz_with_hdr) {
+ /* Add padding command */
+ struct rogue_fwif_ccb_cmd_header pad_cmd = {
+ .cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_PADDING,
+ .cmd_size = remaining - sizeof(pad_cmd),
+ };
+
+ memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset], &pad_cmd, sizeof(pad_cmd));
+ pvr_cccb->write_offset = 0;
+ }
+
+ memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset], &cmd_header, sizeof(cmd_header));
+ memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset + sizeof(cmd_header)], cmd_data, cmd_size);
+ pvr_cccb->write_offset += sz_with_hdr;
+}
+
+static void fill_cmd_kick_data(struct pvr_cccb *cccb, u32 ctx_fw_addr,
+ struct pvr_hwrt_data *hwrt,
+ struct rogue_fwif_kccb_cmd_kick_data *k)
+{
+ k->context_fw_addr = ctx_fw_addr;
+ k->client_woff_update = cccb->write_offset;
+ k->client_wrap_mask_update = cccb->wrap_mask;
+
+ if (hwrt) {
+ u32 cleanup_state_offset = offsetof(struct rogue_fwif_hwrtdata, cleanup_state);
+
+ pvr_fw_object_get_fw_addr_offset(hwrt->fw_obj, cleanup_state_offset,
+ &k->cleanup_ctl_fw_addr[k->num_cleanup_ctl++]);
+ }
+}
+
+/**
+ * pvr_cccb_send_kccb_kick: Send KCCB kick to trigger command processing
+ * @pvr_dev: Device pointer.
+ * @pvr_cccb: Pointer to CCCB to process.
+ * @cctx_fw_addr: FW virtual address for context owning this Client CCB.
+ * @hwrt: HWRT data set associated with this kick. May be %NULL.
+ *
+ * You must call pvr_kccb_reserve_slot() and wait for the returned fence to
+ * signal (if this function didn't return NULL) before calling
+ * pvr_cccb_send_kccb_kick().
+ */
+void
+pvr_cccb_send_kccb_kick(struct pvr_device *pvr_dev,
+ struct pvr_cccb *pvr_cccb, u32 cctx_fw_addr,
+ struct pvr_hwrt_data *hwrt)
+{
+ struct rogue_fwif_kccb_cmd cmd_kick = {
+ .cmd_type = ROGUE_FWIF_KCCB_CMD_KICK,
+ };
+
+ fill_cmd_kick_data(pvr_cccb, cctx_fw_addr, hwrt, &cmd_kick.cmd_data.cmd_kick_data);
+
+ /* Make sure the writes to the CCCB are flushed before sending the KICK. */
+ wmb();
+
+ pvr_kccb_send_cmd_reserved_powered(pvr_dev, &cmd_kick, NULL);
+}
+
+void
+pvr_cccb_send_kccb_combined_kick(struct pvr_device *pvr_dev,
+ struct pvr_cccb *geom_cccb,
+ struct pvr_cccb *frag_cccb,
+ u32 geom_ctx_fw_addr,
+ u32 frag_ctx_fw_addr,
+ struct pvr_hwrt_data *hwrt,
+ bool frag_is_pr)
+{
+ struct rogue_fwif_kccb_cmd cmd_kick = {
+ .cmd_type = ROGUE_FWIF_KCCB_CMD_COMBINED_GEOM_FRAG_KICK,
+ };
+
+ fill_cmd_kick_data(geom_cccb, geom_ctx_fw_addr, hwrt,
+ &cmd_kick.cmd_data.combined_geom_frag_cmd_kick_data.geom_cmd_kick_data);
+
+ /* If this is a partial-render job, we don't attach resources to cleanup-ctl array,
+ * because the resources are already retained by the geometry job.
+ */
+ fill_cmd_kick_data(frag_cccb, frag_ctx_fw_addr, frag_is_pr ? NULL : hwrt,
+ &cmd_kick.cmd_data.combined_geom_frag_cmd_kick_data.frag_cmd_kick_data);
+
+ /* Make sure the writes to the CCCB are flushed before sending the KICK. */
+ wmb();
+
+ pvr_kccb_send_cmd_reserved_powered(pvr_dev, &cmd_kick, NULL);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_cccb.h b/drivers/gpu/drm/imagination/pvr_cccb.h
new file mode 100644
index 000000000000..943fe8f2c963
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_cccb.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_CCCB_H
+#define PVR_CCCB_H
+
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_shared.h"
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define PADDING_COMMAND_SIZE sizeof(struct rogue_fwif_ccb_cmd_header)
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/* Forward declaration from pvr_hwrt.h. */
+struct pvr_hwrt_data;
+
+struct pvr_cccb {
+ /** @ctrl_obj: FW object representing CCCB control structure. */
+ struct pvr_fw_object *ctrl_obj;
+
+ /** @ccb_obj: FW object representing CCCB. */
+ struct pvr_fw_object *cccb_obj;
+
+ /**
+ * @ctrl: Kernel mapping of CCCB control structure. @lock must be held
+ * when accessing.
+ */
+ struct rogue_fwif_cccb_ctl *ctrl;
+
+ /** @cccb: Kernel mapping of CCCB. @lock must be held when accessing.*/
+ u8 *cccb;
+
+ /** @ctrl_fw_addr: FW virtual address of CCCB control structure. */
+ u32 ctrl_fw_addr;
+ /** @ccb_fw_addr: FW virtual address of CCCB. */
+ u32 cccb_fw_addr;
+
+ /** @size: Size of CCCB in bytes. */
+ size_t size;
+
+ /** @write_offset: CCCB write offset. */
+ u32 write_offset;
+
+ /** @wrap_mask: CCCB wrap mask. */
+ u32 wrap_mask;
+};
+
+int pvr_cccb_init(struct pvr_device *pvr_dev, struct pvr_cccb *cccb,
+ u32 size_log2, const char *name);
+void pvr_cccb_fini(struct pvr_cccb *cccb);
+
+void pvr_cccb_write_command_with_header(struct pvr_cccb *pvr_cccb,
+ u32 cmd_type, u32 cmd_size, void *cmd_data,
+ u32 ext_job_ref, u32 int_job_ref);
+void pvr_cccb_send_kccb_kick(struct pvr_device *pvr_dev,
+ struct pvr_cccb *pvr_cccb, u32 cctx_fw_addr,
+ struct pvr_hwrt_data *hwrt);
+void pvr_cccb_send_kccb_combined_kick(struct pvr_device *pvr_dev,
+ struct pvr_cccb *geom_cccb,
+ struct pvr_cccb *frag_cccb,
+ u32 geom_ctx_fw_addr,
+ u32 frag_ctx_fw_addr,
+ struct pvr_hwrt_data *hwrt,
+ bool frag_is_pr);
+bool pvr_cccb_cmdseq_fits(struct pvr_cccb *pvr_cccb, size_t size);
+
+/**
+ * pvr_cccb_get_size_of_cmd_with_hdr() - Get the size of a command and its header.
+ * @cmd_size: Command size.
+ *
+ * Returns the size of the command and its header.
+ */
+static __always_inline u32
+pvr_cccb_get_size_of_cmd_with_hdr(u32 cmd_size)
+{
+ WARN_ON(!IS_ALIGNED(cmd_size, 8));
+ return sizeof(struct rogue_fwif_ccb_cmd_header) + ALIGN(cmd_size, 8);
+}
+
+/**
+ * pvr_cccb_cmdseq_can_fit() - Check if a command sequence can fit in the CCCB.
+ * @pvr_cccb: Target Client CCB.
+ * @size: Command sequence size.
+ *
+ * Returns:
+ * * true it the CCCB is big enough to contain a command sequence, or
+ * * false otherwise.
+ */
+static __always_inline bool
+pvr_cccb_cmdseq_can_fit(struct pvr_cccb *pvr_cccb, size_t size)
+{
+ /* We divide the capacity by two to simplify our CCCB fencing logic:
+ * we want to be sure that, no matter what we had queued before, we
+ * are able to either queue our command sequence at the end or add a
+ * padding command and queue the command sequence at the beginning
+ * of the CCCB. If the command sequence size is bigger than half the
+ * CCCB capacity, we'd have to queue the padding command and make sure
+ * the FW is done processing it before queueing our command sequence.
+ */
+ return size + PADDING_COMMAND_SIZE <= pvr_cccb->size / 2;
+}
+
+#endif /* PVR_CCCB_H */
diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c
new file mode 100644
index 000000000000..eded5e955cc0
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_context.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_cccb.h"
+#include "pvr_context.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_gem.h"
+#include "pvr_job.h"
+#include "pvr_power.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_common.h"
+#include "pvr_rogue_fwif_resetframework.h"
+#include "pvr_stream.h"
+#include "pvr_stream_defs.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_auth.h>
+#include <drm/drm_managed.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+static int
+remap_priority(struct pvr_file *pvr_file, s32 uapi_priority,
+ enum pvr_context_priority *priority_out)
+{
+ switch (uapi_priority) {
+ case DRM_PVR_CTX_PRIORITY_LOW:
+ *priority_out = PVR_CTX_PRIORITY_LOW;
+ break;
+ case DRM_PVR_CTX_PRIORITY_NORMAL:
+ *priority_out = PVR_CTX_PRIORITY_MEDIUM;
+ break;
+ case DRM_PVR_CTX_PRIORITY_HIGH:
+ if (!capable(CAP_SYS_NICE) && !drm_is_current_master(from_pvr_file(pvr_file)))
+ return -EACCES;
+ *priority_out = PVR_CTX_PRIORITY_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_fw_obj_size(enum drm_pvr_ctx_type type)
+{
+ switch (type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ return sizeof(struct rogue_fwif_fwrendercontext);
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ return sizeof(struct rogue_fwif_fwcomputecontext);
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ return sizeof(struct rogue_fwif_fwtransfercontext);
+ }
+
+ return -EINVAL;
+}
+
+static int
+process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
+ u64 stream_user_ptr, u32 stream_size, void *dest)
+{
+ void *stream;
+ int err;
+
+ stream = kzalloc(stream_size, GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest);
+ if (err)
+ goto err_free;
+
+ kfree(stream);
+
+ return 0;
+
+err_free:
+ kfree(stream);
+
+ return err;
+}
+
+static int init_render_fw_objs(struct pvr_context *ctx,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ struct rogue_fwif_static_rendercontext_state *static_rendercontext_state;
+ struct rogue_fwif_fwrendercontext *fw_render_context = fw_ctx_map;
+
+ if (!args->static_context_state_len)
+ return -EINVAL;
+
+ static_rendercontext_state = &fw_render_context->static_render_context_state;
+
+ /* Copy static render context state from userspace. */
+ return process_static_context_state(ctx->pvr_dev,
+ &pvr_static_render_context_state_stream,
+ args->static_context_state,
+ args->static_context_state_len,
+ &static_rendercontext_state->ctxswitch_regs[0]);
+}
+
+static int init_compute_fw_objs(struct pvr_context *ctx,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ struct rogue_fwif_fwcomputecontext *fw_compute_context = fw_ctx_map;
+ struct rogue_fwif_cdm_registers_cswitch *ctxswitch_regs;
+
+ if (!args->static_context_state_len)
+ return -EINVAL;
+
+ ctxswitch_regs = &fw_compute_context->static_compute_context_state.ctxswitch_regs;
+
+ /* Copy static render context state from userspace. */
+ return process_static_context_state(ctx->pvr_dev,
+ &pvr_static_compute_context_state_stream,
+ args->static_context_state,
+ args->static_context_state_len,
+ ctxswitch_regs);
+}
+
+static int init_transfer_fw_objs(struct pvr_context *ctx,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ if (args->static_context_state_len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int init_fw_objs(struct pvr_context *ctx,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ switch (ctx->type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ return init_render_fw_objs(ctx, args, fw_ctx_map);
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ return init_compute_fw_objs(ctx, args, fw_ctx_map);
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ return init_transfer_fw_objs(ctx, args, fw_ctx_map);
+ }
+
+ return -EINVAL;
+}
+
+static void
+ctx_fw_data_init(void *cpu_ptr, void *priv)
+{
+ struct pvr_context *ctx = priv;
+
+ memcpy(cpu_ptr, ctx->data, ctx->data_size);
+}
+
+/**
+ * pvr_context_destroy_queues() - Destroy all queues attached to a context.
+ * @ctx: Context to destroy queues on.
+ *
+ * Should be called when the last reference to a context object is dropped.
+ * It releases all resources attached to the queues bound to this context.
+ */
+static void pvr_context_destroy_queues(struct pvr_context *ctx)
+{
+ switch (ctx->type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ pvr_queue_destroy(ctx->queues.fragment);
+ pvr_queue_destroy(ctx->queues.geometry);
+ break;
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ pvr_queue_destroy(ctx->queues.compute);
+ break;
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ pvr_queue_destroy(ctx->queues.transfer);
+ break;
+ }
+}
+
+/**
+ * pvr_context_create_queues() - Create all queues attached to a context.
+ * @ctx: Context to create queues on.
+ * @args: Context creation arguments passed by userspace.
+ * @fw_ctx_map: CPU mapping of the FW context object.
+ *
+ * Return:
+ * * 0 on success, or
+ * * A negative error code otherwise.
+ */
+static int pvr_context_create_queues(struct pvr_context *ctx,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ int err;
+
+ switch (ctx->type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ ctx->queues.geometry = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_GEOMETRY,
+ args, fw_ctx_map);
+ if (IS_ERR(ctx->queues.geometry)) {
+ err = PTR_ERR(ctx->queues.geometry);
+ ctx->queues.geometry = NULL;
+ goto err_destroy_queues;
+ }
+
+ ctx->queues.fragment = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_FRAGMENT,
+ args, fw_ctx_map);
+ if (IS_ERR(ctx->queues.fragment)) {
+ err = PTR_ERR(ctx->queues.fragment);
+ ctx->queues.fragment = NULL;
+ goto err_destroy_queues;
+ }
+ return 0;
+
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ ctx->queues.compute = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_COMPUTE,
+ args, fw_ctx_map);
+ if (IS_ERR(ctx->queues.compute)) {
+ err = PTR_ERR(ctx->queues.compute);
+ ctx->queues.compute = NULL;
+ goto err_destroy_queues;
+ }
+ return 0;
+
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ ctx->queues.transfer = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
+ args, fw_ctx_map);
+ if (IS_ERR(ctx->queues.transfer)) {
+ err = PTR_ERR(ctx->queues.transfer);
+ ctx->queues.transfer = NULL;
+ goto err_destroy_queues;
+ }
+ return 0;
+ }
+
+ return -EINVAL;
+
+err_destroy_queues:
+ pvr_context_destroy_queues(ctx);
+ return err;
+}
+
+/**
+ * pvr_context_kill_queues() - Kill queues attached to context.
+ * @ctx: Context to kill queues on.
+ *
+ * Killing the queues implies making them unusable for future jobs, while still
+ * letting the currently submitted jobs a chance to finish. Queue resources will
+ * stay around until pvr_context_destroy_queues() is called.
+ */
+static void pvr_context_kill_queues(struct pvr_context *ctx)
+{
+ switch (ctx->type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ pvr_queue_kill(ctx->queues.fragment);
+ pvr_queue_kill(ctx->queues.geometry);
+ break;
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ pvr_queue_kill(ctx->queues.compute);
+ break;
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ pvr_queue_kill(ctx->queues.transfer);
+ break;
+ }
+}
+
+/**
+ * pvr_context_create() - Create a context.
+ * @pvr_file: File to attach the created context to.
+ * @args: Context creation arguments.
+ *
+ * Return:
+ * * 0 on success, or
+ * * A negative error code on failure.
+ */
+int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args)
+{
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+ struct pvr_context *ctx;
+ int ctx_size;
+ int err;
+
+ /* Context creation flags are currently unused and must be zero. */
+ if (args->flags)
+ return -EINVAL;
+
+ ctx_size = get_fw_obj_size(args->type);
+ if (ctx_size < 0)
+ return ctx_size;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->data_size = ctx_size;
+ ctx->type = args->type;
+ ctx->flags = args->flags;
+ ctx->pvr_dev = pvr_dev;
+ kref_init(&ctx->ref_count);
+
+ err = remap_priority(pvr_file, args->priority, &ctx->priority);
+ if (err)
+ goto err_free_ctx;
+
+ ctx->vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+ if (IS_ERR(ctx->vm_ctx)) {
+ err = PTR_ERR(ctx->vm_ctx);
+ goto err_free_ctx;
+ }
+
+ ctx->data = kzalloc(ctx_size, GFP_KERNEL);
+ if (!ctx->data) {
+ err = -ENOMEM;
+ goto err_put_vm;
+ }
+
+ err = pvr_context_create_queues(ctx, args, ctx->data);
+ if (err)
+ goto err_free_ctx_data;
+
+ err = init_fw_objs(ctx, args, ctx->data);
+ if (err)
+ goto err_destroy_queues;
+
+ err = pvr_fw_object_create(pvr_dev, ctx_size, PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ ctx_fw_data_init, ctx, &ctx->fw_obj);
+ if (err)
+ goto err_free_ctx_data;
+
+ err = xa_alloc(&pvr_dev->ctx_ids, &ctx->ctx_id, ctx, xa_limit_32b, GFP_KERNEL);
+ if (err)
+ goto err_destroy_fw_obj;
+
+ err = xa_alloc(&pvr_file->ctx_handles, &args->handle, ctx, xa_limit_32b, GFP_KERNEL);
+ if (err) {
+ /*
+ * It's possible that another thread could have taken a reference on the context at
+ * this point as it is in the ctx_ids xarray. Therefore instead of directly
+ * destroying the context, drop a reference instead.
+ */
+ pvr_context_put(ctx);
+ return err;
+ }
+
+ return 0;
+
+err_destroy_fw_obj:
+ pvr_fw_object_destroy(ctx->fw_obj);
+
+err_destroy_queues:
+ pvr_context_destroy_queues(ctx);
+
+err_free_ctx_data:
+ kfree(ctx->data);
+
+err_put_vm:
+ pvr_vm_context_put(ctx->vm_ctx);
+
+err_free_ctx:
+ kfree(ctx);
+ return err;
+}
+
+static void
+pvr_context_release(struct kref *ref_count)
+{
+ struct pvr_context *ctx =
+ container_of(ref_count, struct pvr_context, ref_count);
+ struct pvr_device *pvr_dev = ctx->pvr_dev;
+
+ xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
+ pvr_context_destroy_queues(ctx);
+ pvr_fw_object_destroy(ctx->fw_obj);
+ kfree(ctx->data);
+ pvr_vm_context_put(ctx->vm_ctx);
+ kfree(ctx);
+}
+
+/**
+ * pvr_context_put() - Release reference on context
+ * @ctx: Target context.
+ */
+void
+pvr_context_put(struct pvr_context *ctx)
+{
+ if (ctx)
+ kref_put(&ctx->ref_count, pvr_context_release);
+}
+
+/**
+ * pvr_context_destroy() - Destroy context
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Userspace context handle.
+ *
+ * Removes context from context list and drops initial reference. Context will
+ * then be destroyed once all outstanding references are dropped.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if context not in context list.
+ */
+int
+pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_context *ctx = xa_erase(&pvr_file->ctx_handles, handle);
+
+ if (!ctx)
+ return -EINVAL;
+
+ /* Make sure nothing can be queued to the queues after that point. */
+ pvr_context_kill_queues(ctx);
+
+ /* Release the reference held by the handle set. */
+ pvr_context_put(ctx);
+
+ return 0;
+}
+
+/**
+ * pvr_destroy_contexts_for_file: Destroy any contexts associated with the given file
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all contexts associated with @pvr_file from the device context list and drops initial
+ * references. Contexts will then be destroyed once all outstanding references are dropped.
+ */
+void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
+{
+ struct pvr_context *ctx;
+ unsigned long handle;
+
+ xa_for_each(&pvr_file->ctx_handles, handle, ctx)
+ pvr_context_destroy(pvr_file, handle);
+}
+
+/**
+ * pvr_context_device_init() - Device level initialization for queue related resources.
+ * @pvr_dev: The device to initialize.
+ */
+void pvr_context_device_init(struct pvr_device *pvr_dev)
+{
+ xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
+}
+
+/**
+ * pvr_context_device_fini() - Device level cleanup for queue related resources.
+ * @pvr_dev: The device to cleanup.
+ */
+void pvr_context_device_fini(struct pvr_device *pvr_dev)
+{
+ WARN_ON(!xa_empty(&pvr_dev->ctx_ids));
+ xa_destroy(&pvr_dev->ctx_ids);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h
new file mode 100644
index 000000000000..0c7b97dfa6ba
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_context.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_CONTEXT_H
+#define PVR_CONTEXT_H
+
+#include <drm/gpu_scheduler.h>
+
+#include <linux/compiler_attributes.h>
+#include <linux/dma-fence.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#include "pvr_cccb.h"
+#include "pvr_device.h"
+#include "pvr_queue.h"
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+enum pvr_context_priority {
+ PVR_CTX_PRIORITY_LOW = 0,
+ PVR_CTX_PRIORITY_MEDIUM,
+ PVR_CTX_PRIORITY_HIGH,
+};
+
+/**
+ * struct pvr_context - Context data
+ */
+struct pvr_context {
+ /** @ref_count: Refcount for context. */
+ struct kref ref_count;
+
+ /** @pvr_dev: Pointer to owning device. */
+ struct pvr_device *pvr_dev;
+
+ /** @vm_ctx: Pointer to associated VM context. */
+ struct pvr_vm_context *vm_ctx;
+
+ /** @type: Type of context. */
+ enum drm_pvr_ctx_type type;
+
+ /** @flags: Context flags. */
+ u32 flags;
+
+ /** @priority: Context priority*/
+ enum pvr_context_priority priority;
+
+ /** @fw_obj: FW object representing FW-side context data. */
+ struct pvr_fw_object *fw_obj;
+
+ /** @data: Pointer to local copy of FW context data. */
+ void *data;
+
+ /** @data_size: Size of FW context data, in bytes. */
+ u32 data_size;
+
+ /** @ctx_id: FW context ID. */
+ u32 ctx_id;
+
+ /**
+ * @faulty: Set to 1 when the context queues had unfinished job when
+ * a GPU reset happened.
+ *
+ * In that case, the context is in an inconsistent state and can't be
+ * used anymore.
+ */
+ atomic_t faulty;
+
+ /** @queues: Union containing all kind of queues. */
+ union {
+ struct {
+ /** @geometry: Geometry queue. */
+ struct pvr_queue *geometry;
+
+ /** @fragment: Fragment queue. */
+ struct pvr_queue *fragment;
+ };
+
+ /** @compute: Compute queue. */
+ struct pvr_queue *compute;
+
+ /** @compute: Transfer queue. */
+ struct pvr_queue *transfer;
+ } queues;
+};
+
+static __always_inline struct pvr_queue *
+pvr_context_get_queue_for_job(struct pvr_context *ctx, enum drm_pvr_job_type type)
+{
+ switch (type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.geometry : NULL;
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.fragment : NULL;
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return ctx->type == DRM_PVR_CTX_TYPE_COMPUTE ? ctx->queues.compute : NULL;
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return ctx->type == DRM_PVR_CTX_TYPE_TRANSFER_FRAG ? ctx->queues.transfer : NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * pvr_context_get() - Take additional reference on context.
+ * @ctx: Context pointer.
+ *
+ * Call pvr_context_put() to release.
+ *
+ * Returns:
+ * * The requested context on success, or
+ * * %NULL if no context pointer passed.
+ */
+static __always_inline struct pvr_context *
+pvr_context_get(struct pvr_context *ctx)
+{
+ if (ctx)
+ kref_get(&ctx->ref_count);
+
+ return ctx;
+}
+
+/**
+ * pvr_context_lookup() - Lookup context pointer from handle and file.
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Context handle.
+ *
+ * Takes reference on context. Call pvr_context_put() to release.
+ *
+ * Return:
+ * * The requested context on success, or
+ * * %NULL on failure (context does not exist, or does not belong to @pvr_file).
+ */
+static __always_inline struct pvr_context *
+pvr_context_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_context *ctx;
+
+ /* Take the array lock to protect against context removal. */
+ xa_lock(&pvr_file->ctx_handles);
+ ctx = pvr_context_get(xa_load(&pvr_file->ctx_handles, handle));
+ xa_unlock(&pvr_file->ctx_handles);
+
+ return ctx;
+}
+
+/**
+ * pvr_context_lookup_id() - Lookup context pointer from ID.
+ * @pvr_dev: Device pointer.
+ * @id: FW context ID.
+ *
+ * Takes reference on context. Call pvr_context_put() to release.
+ *
+ * Return:
+ * * The requested context on success, or
+ * * %NULL on failure (context does not exist).
+ */
+static __always_inline struct pvr_context *
+pvr_context_lookup_id(struct pvr_device *pvr_dev, u32 id)
+{
+ struct pvr_context *ctx;
+
+ /* Take the array lock to protect against context removal. */
+ xa_lock(&pvr_dev->ctx_ids);
+
+ /* Contexts are removed from the ctx_ids set in the context release path,
+ * meaning the ref_count reached zero before they get removed. We need
+ * to make sure we're not trying to acquire a context that's being
+ * destroyed.
+ */
+ ctx = xa_load(&pvr_dev->ctx_ids, id);
+ if (!kref_get_unless_zero(&ctx->ref_count))
+ ctx = NULL;
+
+ xa_unlock(&pvr_dev->ctx_ids);
+
+ return ctx;
+}
+
+static __always_inline u32
+pvr_context_get_fw_addr(struct pvr_context *ctx)
+{
+ u32 ctx_fw_addr = 0;
+
+ pvr_fw_object_get_fw_addr(ctx->fw_obj, &ctx_fw_addr);
+
+ return ctx_fw_addr;
+}
+
+void pvr_context_put(struct pvr_context *ctx);
+
+int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args);
+
+int pvr_context_destroy(struct pvr_file *pvr_file, u32 handle);
+
+void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file);
+
+void pvr_context_device_init(struct pvr_device *pvr_dev);
+
+void pvr_context_device_fini(struct pvr_device *pvr_dev);
+
+#endif /* PVR_CONTEXT_H */
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.c b/drivers/gpu/drm/imagination/pvr_debugfs.c
new file mode 100644
index 000000000000..6b77c9b4bde8
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_debugfs.h"
+
+#include "pvr_device.h"
+#include "pvr_fw_trace.h"
+#include "pvr_params.h"
+
+#include <linux/dcache.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
+static const struct pvr_debugfs_entry pvr_debugfs_entries[] = {
+ {"pvr_params", pvr_params_debugfs_init},
+ {"pvr_fw", pvr_fw_trace_debugfs_init},
+};
+
+void
+pvr_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm_dev = minor->dev;
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct dentry *root = minor->debugfs_root;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
+ const struct pvr_debugfs_entry *entry = &pvr_debugfs_entries[i];
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(entry->name, root);
+ if (IS_ERR(dir)) {
+ drm_warn(drm_dev,
+ "failed to create debugfs dir '%s' (err=%d)",
+ entry->name, (int)PTR_ERR(dir));
+ continue;
+ }
+
+ entry->init(pvr_dev, dir);
+ }
+}
+
+/*
+ * Since all entries are created under &drm_minor->debugfs_root, there's no
+ * need for a pvr_debugfs_fini() as DRM will clean up everything under its root
+ * automatically.
+ */
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.h b/drivers/gpu/drm/imagination/pvr_debugfs.h
new file mode 100644
index 000000000000..ebacbd13b84a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DEBUGFS_H
+#define PVR_DEBUGFS_H
+
+/* Forward declaration from <drm/drm_drv.h>. */
+struct drm_minor;
+
+#if defined(CONFIG_DEBUG_FS)
+/* Forward declaration from "pvr_device.h". */
+struct pvr_device;
+
+/* Forward declaration from <linux/dcache.h>. */
+struct dentry;
+
+struct pvr_debugfs_entry {
+ const char *name;
+ void (*init)(struct pvr_device *pvr_dev, struct dentry *dir);
+};
+
+void pvr_debugfs_init(struct drm_minor *minor);
+#else /* defined(CONFIG_DEBUG_FS) */
+#include <linux/compiler_attributes.h>
+
+static __always_inline void pvr_debugfs_init(struct drm_minor *minor) {}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* PVR_DEBUGFS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
new file mode 100644
index 000000000000..1704c0268589
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_device_info.h"
+
+#include "pvr_fw.h"
+#include "pvr_params.h"
+#include "pvr_power.h"
+#include "pvr_queue.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_stream.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_print.h>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/compiler_attributes.h>
+#include <linux/compiler_types.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* Major number for the supported version of the firmware. */
+#define PVR_FW_VERSION_MAJOR 1
+
+/**
+ * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's
+ * control registers.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Sets struct pvr_device->regs.
+ *
+ * This method of mapping the device control registers into memory ensures that
+ * they are unmapped when the driver is detached (i.e. no explicit cleanup is
+ * required).
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by devm_platform_ioremap_resource().
+ */
+static int
+pvr_device_reg_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
+ struct resource *regs_resource;
+ void __iomem *regs;
+
+ pvr_dev->regs_resource = NULL;
+ pvr_dev->regs = NULL;
+
+ regs = devm_platform_get_and_ioremap_resource(plat_dev, 0, &regs_resource);
+ if (IS_ERR(regs))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(regs),
+ "failed to ioremap gpu registers\n");
+
+ pvr_dev->regs = regs;
+ pvr_dev->regs_resource = regs_resource;
+
+ return 0;
+}
+
+/**
+ * pvr_device_clk_init() - Initialize clocks required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Sets struct pvr_device->core_clk, struct pvr_device->sys_clk and
+ * struct pvr_device->mem_clk.
+ *
+ * Three clocks are required by the PowerVR device: core, sys and mem. On
+ * return, this function guarantees that the clocks are in one of the following
+ * states:
+ *
+ * * All successfully initialized,
+ * * Core errored, sys and mem uninitialized,
+ * * Core deinitialized, sys errored, mem uninitialized, or
+ * * Core and sys deinitialized, mem errored.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by devm_clk_get(), or
+ * * Any error returned by devm_clk_get_optional().
+ */
+static int pvr_device_clk_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct clk *core_clk;
+ struct clk *sys_clk;
+ struct clk *mem_clk;
+
+ core_clk = devm_clk_get(drm_dev->dev, "core");
+ if (IS_ERR(core_clk))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
+ "failed to get core clock\n");
+
+ sys_clk = devm_clk_get_optional(drm_dev->dev, "sys");
+ if (IS_ERR(sys_clk))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk),
+ "failed to get sys clock\n");
+
+ mem_clk = devm_clk_get_optional(drm_dev->dev, "mem");
+ if (IS_ERR(mem_clk))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk),
+ "failed to get mem clock\n");
+
+ pvr_dev->core_clk = core_clk;
+ pvr_dev->sys_clk = sys_clk;
+ pvr_dev->mem_clk = mem_clk;
+
+ return 0;
+}
+
+/**
+ * pvr_device_process_active_queues() - Process all queue related events.
+ * @pvr_dev: PowerVR device to check
+ *
+ * This is called any time we receive a FW event. It iterates over all
+ * active queues and calls pvr_queue_process() on them.
+ */
+static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
+{
+ struct pvr_queue *queue, *tmp_queue;
+ LIST_HEAD(active_queues);
+
+ mutex_lock(&pvr_dev->queues.lock);
+
+ /* Move all active queues to a temporary list. Queues that remain
+ * active after we're done processing them are re-inserted to
+ * the queues.active list by pvr_queue_process().
+ */
+ list_splice_init(&pvr_dev->queues.active, &active_queues);
+
+ list_for_each_entry_safe(queue, tmp_queue, &active_queues, node)
+ pvr_queue_process(queue);
+
+ mutex_unlock(&pvr_dev->queues.lock);
+}
+
+static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
+{
+ struct pvr_device *pvr_dev = data;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* We are in the threaded handler, we can keep dequeuing events until we
+ * don't see any. This should allow us to reduce the number of interrupts
+ * when the GPU is receiving a massive amount of short jobs.
+ */
+ while (pvr_fw_irq_pending(pvr_dev)) {
+ pvr_fw_irq_clear(pvr_dev);
+
+ if (pvr_dev->fw_dev.booted) {
+ pvr_fwccb_process(pvr_dev);
+ pvr_kccb_wake_up_waiters(pvr_dev);
+ pvr_device_process_active_queues(pvr_dev);
+ }
+
+ pm_runtime_mark_last_busy(from_pvr_device(pvr_dev)->dev);
+
+ ret = IRQ_HANDLED;
+ }
+
+ /* Unmask FW irqs before returning, so new interrupts can be received. */
+ pvr_fw_irq_enable(pvr_dev);
+ return ret;
+}
+
+static irqreturn_t pvr_device_irq_handler(int irq, void *data)
+{
+ struct pvr_device *pvr_dev = data;
+
+ if (!pvr_fw_irq_pending(pvr_dev))
+ return IRQ_NONE; /* Spurious IRQ - ignore. */
+
+ /* Mask the FW interrupts before waking up the thread. Will be unmasked
+ * when the thread handler is done processing events.
+ */
+ pvr_fw_irq_disable(pvr_dev);
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success,
+ * * Any error returned by platform_get_irq_byname(), or
+ * * Any error returned by request_irq().
+ */
+static int
+pvr_device_irq_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
+
+ init_waitqueue_head(&pvr_dev->kccb.rtn_q);
+
+ pvr_dev->irq = platform_get_irq(plat_dev, 0);
+ if (pvr_dev->irq < 0)
+ return pvr_dev->irq;
+
+ /* Clear any pending events before requesting the IRQ line. */
+ pvr_fw_irq_clear(pvr_dev);
+ pvr_fw_irq_enable(pvr_dev);
+
+ return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
+ pvr_device_irq_thread_handler,
+ IRQF_SHARED, "gpu", pvr_dev);
+}
+
+/**
+ * pvr_device_irq_fini() - Deinitialise IRQ required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ */
+static void
+pvr_device_irq_fini(struct pvr_device *pvr_dev)
+{
+ free_irq(pvr_dev->irq, pvr_dev);
+}
+
+/**
+ * pvr_build_firmware_filename() - Construct a PowerVR firmware filename
+ * @pvr_dev: Target PowerVR device.
+ * @base: First part of the filename.
+ * @major: Major version number.
+ *
+ * A PowerVR firmware filename consists of three parts separated by underscores
+ * (``'_'``) along with a '.fw' file suffix. The first part is the exact value
+ * of @base, the second part is the hardware version string derived from @pvr_fw
+ * and the final part is the firmware version number constructed from @major with
+ * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw.
+ *
+ * The returned string will have been slab allocated and must be freed with
+ * kfree().
+ *
+ * Return:
+ * * The constructed filename on success, or
+ * * Any error returned by kasprintf().
+ */
+static char *
+pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base,
+ u8 major)
+{
+ struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
+
+ return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b,
+ gpu_id->v, gpu_id->n, gpu_id->c, major);
+}
+
+static void
+pvr_release_firmware(void *data)
+{
+ struct pvr_device *pvr_dev = data;
+
+ release_firmware(pvr_dev->fw_dev.firmware);
+}
+
+/**
+ * pvr_request_firmware() - Load firmware for a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * See pvr_build_firmware_filename() for details on firmware file naming.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by pvr_build_firmware_filename(), or
+ * * Any error returned by request_firmware().
+ */
+static int
+pvr_request_firmware(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = &pvr_dev->base;
+ char *filename;
+ const struct firmware *fw;
+ int err;
+
+ filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
+ PVR_FW_VERSION_MAJOR);
+ if (!filename)
+ return -ENOMEM;
+
+ /*
+ * This function takes a copy of &filename, meaning we can free our
+ * instance before returning.
+ */
+ err = request_firmware(&fw, filename, pvr_dev->base.dev);
+ if (err) {
+ drm_err(drm_dev, "failed to load firmware %s (err=%d)\n",
+ filename, err);
+ goto err_free_filename;
+ }
+
+ drm_info(drm_dev, "loaded firmware %s\n", filename);
+ kfree(filename);
+
+ pvr_dev->fw_dev.firmware = fw;
+
+ return devm_add_action_or_reset(drm_dev->dev, pvr_release_firmware, pvr_dev);
+
+err_free_filename:
+ kfree(filename);
+
+ return err;
+}
+
+/**
+ * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control registers.
+ *
+ * Sets struct pvr_dev.gpu_id.
+ *
+ * @pvr_dev: Target PowerVR device.
+ */
+static void
+pvr_load_gpu_id(struct pvr_device *pvr_dev)
+{
+ struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
+ u64 bvnc;
+
+ /*
+ * Try reading the BVNC using the newer (cleaner) method first. If the
+ * B value is zero, fall back to the older method.
+ */
+ bvnc = pvr_cr_read64(pvr_dev, ROGUE_CR_CORE_ID__PBVNC);
+
+ gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID);
+ if (gpu_id->b != 0) {
+ gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID);
+ gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS);
+ gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID);
+ } else {
+ u32 core_rev = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_REVISION);
+ u32 core_id = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_ID);
+ u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG);
+
+ gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR);
+ gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR);
+ gpu_id->n = FIELD_GET(0xFF00, core_id_config);
+ gpu_id->c = FIELD_GET(0x00FF, core_id_config);
+ }
+}
+
+/**
+ * pvr_set_dma_info() - Set PowerVR device DMA information
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Sets the DMA mask and max segment size for the PowerVR device.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by PVR_FEATURE_VALUE(), or
+ * * Any error returned by dma_set_mask().
+ */
+
+static int
+pvr_set_dma_info(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ u16 phys_bus_width;
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
+ if (err) {
+ drm_err(drm_dev, "Failed to get device physical bus width\n");
+ return err;
+ }
+
+ err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width));
+ if (err) {
+ drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err);
+ return err;
+ }
+
+ dma_set_max_seg_size(drm_dev->dev, UINT_MAX);
+
+ return 0;
+}
+
+/**
+ * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * The following steps are taken to ensure the device is ready:
+ *
+ * 1. Read the hardware version information from control registers,
+ * 2. Initialise the hardware feature information,
+ * 3. Setup the device DMA information,
+ * 4. Setup the device-scoped memory context, and
+ * 5. Load firmware into the device.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%ENODEV if the GPU is not supported,
+ * * Any error returned by pvr_set_dma_info(),
+ * * Any error returned by pvr_memory_context_init(), or
+ * * Any error returned by pvr_request_firmware().
+ */
+static int
+pvr_device_gpu_init(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ pvr_load_gpu_id(pvr_dev);
+
+ err = pvr_request_firmware(pvr_dev);
+ if (err)
+ return err;
+
+ err = pvr_fw_validate_init_device_info(pvr_dev);
+ if (err)
+ return err;
+
+ if (PVR_HAS_FEATURE(pvr_dev, meta))
+ pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META;
+ else if (PVR_HAS_FEATURE(pvr_dev, mips))
+ pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS;
+ else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor))
+ pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV;
+ else
+ return -EINVAL;
+
+ pvr_stream_create_musthave_masks(pvr_dev);
+
+ err = pvr_set_dma_info(pvr_dev);
+ if (err)
+ return err;
+
+ if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
+ pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false);
+ if (IS_ERR(pvr_dev->kernel_vm_ctx))
+ return PTR_ERR(pvr_dev->kernel_vm_ctx);
+ }
+
+ err = pvr_fw_init(pvr_dev);
+ if (err)
+ goto err_vm_ctx_put;
+
+ return 0;
+
+err_vm_ctx_put:
+ if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
+ pvr_vm_context_put(pvr_dev->kernel_vm_ctx);
+ pvr_dev->kernel_vm_ctx = NULL;
+ }
+
+ return err;
+}
+
+/**
+ * pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ */
+static void
+pvr_device_gpu_fini(struct pvr_device *pvr_dev)
+{
+ pvr_fw_fini(pvr_dev);
+
+ if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
+ WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx));
+ pvr_dev->kernel_vm_ctx = NULL;
+ }
+}
+
+/**
+ * pvr_device_init() - Initialize a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * If this function returns successfully, the device will have been fully
+ * initialized. Otherwise, any parts of the device initialized before an error
+ * occurs will be de-initialized before returning.
+ *
+ * NOTE: The initialization steps currently taken are the bare minimum required
+ * to read from the control registers. The device is unlikely to function
+ * until further initialization steps are added. [This note should be
+ * removed when that happens.]
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by pvr_device_reg_init(),
+ * * Any error returned by pvr_device_clk_init(), or
+ * * Any error returned by pvr_device_gpu_init().
+ */
+int
+pvr_device_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct device *dev = drm_dev->dev;
+ int err;
+
+ /*
+ * Setup device parameters. We do this first in case other steps
+ * depend on them.
+ */
+ err = pvr_device_params_init(&pvr_dev->params);
+ if (err)
+ return err;
+
+ /* Enable and initialize clocks required for the device to operate. */
+ err = pvr_device_clk_init(pvr_dev);
+ if (err)
+ return err;
+
+ /* Explicitly power the GPU so we can access control registers before the FW is booted. */
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
+
+ /* Map the control registers into memory. */
+ err = pvr_device_reg_init(pvr_dev);
+ if (err)
+ goto err_pm_runtime_put;
+
+ /* Perform GPU-specific initialization steps. */
+ err = pvr_device_gpu_init(pvr_dev);
+ if (err)
+ goto err_pm_runtime_put;
+
+ err = pvr_device_irq_init(pvr_dev);
+ if (err)
+ goto err_device_gpu_fini;
+
+ pm_runtime_put(dev);
+
+ return 0;
+
+err_device_gpu_fini:
+ pvr_device_gpu_fini(pvr_dev);
+
+err_pm_runtime_put:
+ pm_runtime_put_sync_suspend(dev);
+
+ return err;
+}
+
+/**
+ * pvr_device_fini() - Deinitialize a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ */
+void
+pvr_device_fini(struct pvr_device *pvr_dev)
+{
+ /*
+ * Deinitialization stages are performed in reverse order compared to
+ * the initialization stages in pvr_device_init().
+ */
+ pvr_device_irq_fini(pvr_dev);
+ pvr_device_gpu_fini(pvr_dev);
+}
+
+bool
+pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk)
+{
+ switch (quirk) {
+ case 47217:
+ return PVR_HAS_QUIRK(pvr_dev, 47217);
+ case 48545:
+ return PVR_HAS_QUIRK(pvr_dev, 48545);
+ case 49927:
+ return PVR_HAS_QUIRK(pvr_dev, 49927);
+ case 51764:
+ return PVR_HAS_QUIRK(pvr_dev, 51764);
+ case 62269:
+ return PVR_HAS_QUIRK(pvr_dev, 62269);
+ default:
+ return false;
+ };
+}
+
+bool
+pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement)
+{
+ switch (enhancement) {
+ case 35421:
+ return PVR_HAS_ENHANCEMENT(pvr_dev, 35421);
+ case 42064:
+ return PVR_HAS_ENHANCEMENT(pvr_dev, 42064);
+ default:
+ return false;
+ };
+}
+
+/**
+ * pvr_device_has_feature() - Look up device feature based on feature definition
+ * @pvr_dev: Device pointer.
+ * @feature: Feature to look up. Should be one of %PVR_FEATURE_*.
+ *
+ * Returns:
+ * * %true if feature is present on device, or
+ * * %false if feature is not present on device.
+ */
+bool
+pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature)
+{
+ switch (feature) {
+ case PVR_FEATURE_CLUSTER_GROUPING:
+ return PVR_HAS_FEATURE(pvr_dev, cluster_grouping);
+
+ case PVR_FEATURE_COMPUTE_MORTON_CAPABLE:
+ return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable);
+
+ case PVR_FEATURE_FB_CDC_V4:
+ return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4);
+
+ case PVR_FEATURE_GPU_MULTICORE_SUPPORT:
+ return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support);
+
+ case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE:
+ return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode);
+
+ case PVR_FEATURE_S7_TOP_INFRASTRUCTURE:
+ return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure);
+
+ case PVR_FEATURE_TESSELLATION:
+ return PVR_HAS_FEATURE(pvr_dev, tessellation);
+
+ case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS:
+ return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers);
+
+ case PVR_FEATURE_VDM_DRAWINDIRECT:
+ return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect);
+
+ case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS:
+ return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls);
+
+ case PVR_FEATURE_ZLS_SUBTILE:
+ return PVR_HAS_FEATURE(pvr_dev, zls_subtile);
+
+ /* Derived features. */
+ case PVR_FEATURE_CDM_USER_MODE_QUEUE: {
+ u8 cdm_control_stream_format = 0;
+
+ PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format);
+ return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4);
+ }
+
+ case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP:
+ if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) {
+ u8 fbcdc_algorithm = 0;
+
+ PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm);
+ return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4));
+ }
+ return false;
+
+ default:
+ WARN(true, "Looking up undefined feature %u\n", feature);
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
new file mode 100644
index 000000000000..2ca7e535799f
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -0,0 +1,721 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DEVICE_H
+#define PVR_DEVICE_H
+
+#include "pvr_ccb.h"
+#include "pvr_device_info.h"
+#include "pvr_fw.h"
+#include "pvr_params.h"
+#include "pvr_rogue_fwif_stream.h"
+#include "pvr_stream.h"
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_mm.h>
+
+#include <linux/bits.h>
+#include <linux/compiler_attributes.h>
+#include <linux/compiler_types.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/xarray.h>
+
+/* Forward declaration from <linux/clk.h>. */
+struct clk;
+
+/* Forward declaration from <linux/firmware.h>. */
+struct firmware;
+
+/**
+ * struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device
+ * @b: Branch ID.
+ * @v: Version ID.
+ * @n: Number of scalable units.
+ * @c: Config ID.
+ */
+struct pvr_gpu_id {
+ u16 b, v, n, c;
+};
+
+/**
+ * struct pvr_fw_version - Firmware version information
+ * @major: Major version number.
+ * @minor: Minor version number.
+ */
+struct pvr_fw_version {
+ u16 major, minor;
+};
+
+/**
+ * struct pvr_device - powervr-specific wrapper for &struct drm_device
+ */
+struct pvr_device {
+ /**
+ * @base: The underlying &struct drm_device.
+ *
+ * Do not access this member directly, instead call
+ * from_pvr_device().
+ */
+ struct drm_device base;
+
+ /** @gpu_id: GPU ID detected at runtime. */
+ struct pvr_gpu_id gpu_id;
+
+ /**
+ * @features: Hardware feature information.
+ *
+ * Do not access this member directly, instead use PVR_HAS_FEATURE()
+ * or PVR_FEATURE_VALUE() macros.
+ */
+ struct pvr_device_features features;
+
+ /**
+ * @quirks: Hardware quirk information.
+ *
+ * Do not access this member directly, instead use PVR_HAS_QUIRK().
+ */
+ struct pvr_device_quirks quirks;
+
+ /**
+ * @enhancements: Hardware enhancement information.
+ *
+ * Do not access this member directly, instead use
+ * PVR_HAS_ENHANCEMENT().
+ */
+ struct pvr_device_enhancements enhancements;
+
+ /** @fw_version: Firmware version detected at runtime. */
+ struct pvr_fw_version fw_version;
+
+ /** @regs_resource: Resource representing device control registers. */
+ struct resource *regs_resource;
+
+ /**
+ * @regs: Device control registers.
+ *
+ * These are mapped into memory when the device is initialized; that
+ * location is where this pointer points.
+ */
+ void __iomem *regs;
+
+ /**
+ * @core_clk: General core clock.
+ *
+ * This is the primary clock used by the entire GPU core.
+ */
+ struct clk *core_clk;
+
+ /**
+ * @sys_clk: Optional system bus clock.
+ *
+ * This may be used on some platforms to provide an independent clock to the SoC Interface
+ * (SOCIF). If present, this needs to be enabled/disabled together with @core_clk.
+ */
+ struct clk *sys_clk;
+
+ /**
+ * @mem_clk: Optional memory clock.
+ *
+ * This may be used on some platforms to provide an independent clock to the Memory
+ * Interface (MEMIF). If present, this needs to be enabled/disabled together with @core_clk.
+ */
+ struct clk *mem_clk;
+
+ /** @irq: IRQ number. */
+ int irq;
+
+ /** @fwccb: Firmware CCB. */
+ struct pvr_ccb fwccb;
+
+ /**
+ * @kernel_vm_ctx: Virtual memory context used for kernel mappings.
+ *
+ * This is used for mappings in the firmware address region when a META firmware processor
+ * is in use.
+ *
+ * When a MIPS firmware processor is in use, this will be %NULL.
+ */
+ struct pvr_vm_context *kernel_vm_ctx;
+
+ /** @fw_dev: Firmware related data. */
+ struct pvr_fw_device fw_dev;
+
+ /**
+ * @params: Device-specific parameters.
+ *
+ * The values of these parameters are initialized from the
+ * defaults specified as module parameters. They may be
+ * modified at runtime via debugfs (if enabled).
+ */
+ struct pvr_device_params params;
+
+ /** @stream_musthave_quirks: Bit array of "must-have" quirks for stream commands. */
+ u32 stream_musthave_quirks[PVR_STREAM_TYPE_MAX][PVR_STREAM_EXTHDR_TYPE_MAX];
+
+ /**
+ * @mmu_flush_cache_flags: Records which MMU caches require flushing
+ * before submitting the next job.
+ */
+ atomic_t mmu_flush_cache_flags;
+
+ /**
+ * @ctx_ids: Array of contexts belonging to this device. Array members
+ * are of type "struct pvr_context *".
+ *
+ * This array is used to allocate IDs used by the firmware.
+ */
+ struct xarray ctx_ids;
+
+ /**
+ * @free_list_ids: Array of free lists belonging to this device. Array members
+ * are of type "struct pvr_free_list *".
+ *
+ * This array is used to allocate IDs used by the firmware.
+ */
+ struct xarray free_list_ids;
+
+ /**
+ * @job_ids: Array of jobs belonging to this device. Array members
+ * are of type "struct pvr_job *".
+ */
+ struct xarray job_ids;
+
+ /**
+ * @queues: Queue-related fields.
+ */
+ struct {
+ /** @active: Active queue list. */
+ struct list_head active;
+
+ /** @idle: Idle queue list. */
+ struct list_head idle;
+
+ /** @lock: Lock protecting access to the active/idle lists. */
+ struct mutex lock;
+ } queues;
+
+ /**
+ * @watchdog: Watchdog for communications with firmware.
+ */
+ struct {
+ /** @work: Work item for watchdog callback. */
+ struct delayed_work work;
+
+ /**
+ * @old_kccb_cmds_executed: KCCB command execution count at last
+ * watchdog poll.
+ */
+ u32 old_kccb_cmds_executed;
+
+ /**
+ * @kccb_stall_count: Number of watchdog polls KCCB has been
+ * stalled for.
+ */
+ u32 kccb_stall_count;
+ } watchdog;
+
+ /**
+ * @kccb: Circular buffer for communications with firmware.
+ */
+ struct {
+ /** @ccb: Kernel CCB. */
+ struct pvr_ccb ccb;
+
+ /** @rtn_q: Waitqueue for KCCB command return waiters. */
+ wait_queue_head_t rtn_q;
+
+ /** @rtn_obj: Object representing KCCB return slots. */
+ struct pvr_fw_object *rtn_obj;
+
+ /**
+ * @rtn: Pointer to CPU mapping of KCCB return slots. Must be
+ * accessed by READ_ONCE()/WRITE_ONCE().
+ */
+ u32 *rtn;
+
+ /** @slot_count: Total number of KCCB slots available. */
+ u32 slot_count;
+
+ /** @reserved_count: Number of KCCB slots reserved for future use. */
+ u32 reserved_count;
+
+ /**
+ * @waiters: List of KCCB slot waiters.
+ */
+ struct list_head waiters;
+
+ /** @fence_ctx: KCCB fence context. */
+ struct {
+ /** @id: KCCB fence context ID allocated with dma_fence_context_alloc(). */
+ u64 id;
+
+ /** @seqno: Sequence number incremented each time a fence is created. */
+ atomic_t seqno;
+
+ /**
+ * @lock: Lock used to synchronize access to fences allocated by this
+ * context.
+ */
+ spinlock_t lock;
+ } fence_ctx;
+ } kccb;
+
+ /**
+ * @lost: %true if the device has been lost.
+ *
+ * This variable is set if the device has become irretrievably unavailable, e.g. if the
+ * firmware processor has stopped responding and can not be revived via a hard reset.
+ */
+ bool lost;
+
+ /**
+ * @reset_sem: Reset semaphore.
+ *
+ * GPU reset code will lock this for writing. Any code that submits commands to the firmware
+ * that isn't in an IRQ handler or on the scheduler workqueue must lock this for reading.
+ * Once this has been successfully locked, &pvr_dev->lost _must_ be checked, and -%EIO must
+ * be returned if it is set.
+ */
+ struct rw_semaphore reset_sem;
+
+ /** @sched_wq: Workqueue for schedulers. */
+ struct workqueue_struct *sched_wq;
+};
+
+/**
+ * struct pvr_file - powervr-specific data to be assigned to &struct
+ * drm_file.driver_priv
+ */
+struct pvr_file {
+ /**
+ * @file: A reference to the parent &struct drm_file.
+ *
+ * Do not access this member directly, instead call from_pvr_file().
+ */
+ struct drm_file *file;
+
+ /**
+ * @pvr_dev: A reference to the powervr-specific wrapper for the
+ * associated device. Saves on repeated calls to to_pvr_device().
+ */
+ struct pvr_device *pvr_dev;
+
+ /**
+ * @ctx_handles: Array of contexts belonging to this file. Array members
+ * are of type "struct pvr_context *".
+ *
+ * This array is used to allocate handles returned to userspace.
+ */
+ struct xarray ctx_handles;
+
+ /**
+ * @free_list_handles: Array of free lists belonging to this file. Array
+ * members are of type "struct pvr_free_list *".
+ *
+ * This array is used to allocate handles returned to userspace.
+ */
+ struct xarray free_list_handles;
+
+ /**
+ * @hwrt_handles: Array of HWRT datasets belonging to this file. Array
+ * members are of type "struct pvr_hwrt_dataset *".
+ *
+ * This array is used to allocate handles returned to userspace.
+ */
+ struct xarray hwrt_handles;
+
+ /**
+ * @vm_ctx_handles: Array of VM contexts belonging to this file. Array
+ * members are of type "struct pvr_vm_context *".
+ *
+ * This array is used to allocate handles returned to userspace.
+ */
+ struct xarray vm_ctx_handles;
+};
+
+/**
+ * PVR_HAS_FEATURE() - Tests whether a PowerVR device has a given feature
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @feature: [IN] Hardware feature name.
+ *
+ * Feature names are derived from those found in &struct pvr_device_features by
+ * dropping the 'has_' prefix, which is applied by this macro.
+ *
+ * Return:
+ * * true if the named feature is present in the hardware
+ * * false if the named feature is not present in the hardware
+ */
+#define PVR_HAS_FEATURE(pvr_dev, feature) ((pvr_dev)->features.has_##feature)
+
+/**
+ * PVR_FEATURE_VALUE() - Gets a PowerVR device feature value
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @feature: [IN] Feature name.
+ * @value_out: [OUT] Feature value.
+ *
+ * This macro will get a feature value for those features that have values.
+ * If the feature is not present, nothing will be stored to @value_out.
+ *
+ * Feature names are derived from those found in &struct pvr_device_features by
+ * dropping the 'has_' prefix.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if the named feature is not present in the hardware
+ */
+#define PVR_FEATURE_VALUE(pvr_dev, feature, value_out) \
+ ({ \
+ struct pvr_device *_pvr_dev = pvr_dev; \
+ int _ret = -EINVAL; \
+ if (_pvr_dev->features.has_##feature) { \
+ *(value_out) = _pvr_dev->features.feature; \
+ _ret = 0; \
+ } \
+ _ret; \
+ })
+
+/**
+ * PVR_HAS_QUIRK() - Tests whether a physical device has a given quirk
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @quirk: [IN] Hardware quirk name.
+ *
+ * Quirk numbers are derived from those found in #pvr_device_quirks by
+ * dropping the 'has_brn' prefix, which is applied by this macro.
+ *
+ * Returns
+ * * true if the quirk is present in the hardware, or
+ * * false if the quirk is not present in the hardware.
+ */
+#define PVR_HAS_QUIRK(pvr_dev, quirk) ((pvr_dev)->quirks.has_brn##quirk)
+
+/**
+ * PVR_HAS_ENHANCEMENT() - Tests whether a physical device has a given
+ * enhancement
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @enhancement: [IN] Hardware enhancement name.
+ *
+ * Enhancement numbers are derived from those found in #pvr_device_enhancements
+ * by dropping the 'has_ern' prefix, which is applied by this macro.
+ *
+ * Returns
+ * * true if the enhancement is present in the hardware, or
+ * * false if the enhancement is not present in the hardware.
+ */
+#define PVR_HAS_ENHANCEMENT(pvr_dev, enhancement) ((pvr_dev)->enhancements.has_ern##enhancement)
+
+#define from_pvr_device(pvr_dev) (&(pvr_dev)->base)
+
+#define to_pvr_device(drm_dev) container_of_const(drm_dev, struct pvr_device, base)
+
+#define from_pvr_file(pvr_file) ((pvr_file)->file)
+
+#define to_pvr_file(file) ((file)->driver_priv)
+
+/**
+ * PVR_PACKED_BVNC() - Packs B, V, N and C values into a 64-bit unsigned integer
+ * @b: Branch ID.
+ * @v: Version ID.
+ * @n: Number of scalable units.
+ * @c: Config ID.
+ *
+ * The packed layout is as follows:
+ *
+ * +--------+--------+--------+-------+
+ * | 63..48 | 47..32 | 31..16 | 15..0 |
+ * +========+========+========+=======+
+ * | B | V | N | C |
+ * +--------+--------+--------+-------+
+ *
+ * pvr_gpu_id_to_packed_bvnc() should be used instead of this macro when a
+ * &struct pvr_gpu_id is available in order to ensure proper type checking.
+ *
+ * Return: Packed BVNC.
+ */
+/* clang-format off */
+#define PVR_PACKED_BVNC(b, v, n, c) \
+ ((((u64)(b) & GENMASK_ULL(15, 0)) << 48) | \
+ (((u64)(v) & GENMASK_ULL(15, 0)) << 32) | \
+ (((u64)(n) & GENMASK_ULL(15, 0)) << 16) | \
+ (((u64)(c) & GENMASK_ULL(15, 0)) << 0))
+/* clang-format on */
+
+/**
+ * pvr_gpu_id_to_packed_bvnc() - Packs B, V, N and C values into a 64-bit
+ * unsigned integer
+ * @gpu_id: GPU ID.
+ *
+ * The packed layout is as follows:
+ *
+ * +--------+--------+--------+-------+
+ * | 63..48 | 47..32 | 31..16 | 15..0 |
+ * +========+========+========+=======+
+ * | B | V | N | C |
+ * +--------+--------+--------+-------+
+ *
+ * This should be used in preference to PVR_PACKED_BVNC() when a &struct
+ * pvr_gpu_id is available in order to ensure proper type checking.
+ *
+ * Return: Packed BVNC.
+ */
+static __always_inline u64
+pvr_gpu_id_to_packed_bvnc(struct pvr_gpu_id *gpu_id)
+{
+ return PVR_PACKED_BVNC(gpu_id->b, gpu_id->v, gpu_id->n, gpu_id->c);
+}
+
+static __always_inline void
+packed_bvnc_to_pvr_gpu_id(u64 bvnc, struct pvr_gpu_id *gpu_id)
+{
+ gpu_id->b = (bvnc & GENMASK_ULL(63, 48)) >> 48;
+ gpu_id->v = (bvnc & GENMASK_ULL(47, 32)) >> 32;
+ gpu_id->n = (bvnc & GENMASK_ULL(31, 16)) >> 16;
+ gpu_id->c = bvnc & GENMASK_ULL(15, 0);
+}
+
+int pvr_device_init(struct pvr_device *pvr_dev);
+void pvr_device_fini(struct pvr_device *pvr_dev);
+void pvr_device_reset(struct pvr_device *pvr_dev);
+
+bool
+pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk);
+bool
+pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement);
+bool
+pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature);
+
+/**
+ * PVR_CR_FIELD_GET() - Extract a single field from a PowerVR control register
+ * @val: Value of the target register.
+ * @field: Field specifier, as defined in "pvr_rogue_cr_defs.h".
+ *
+ * Return: The extracted field.
+ */
+#define PVR_CR_FIELD_GET(val, field) FIELD_GET(~ROGUE_CR_##field##_CLRMSK, val)
+
+/**
+ * pvr_cr_read32() - Read a 32-bit register from a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ *
+ * Return: The value of the requested register.
+ */
+static __always_inline u32
+pvr_cr_read32(struct pvr_device *pvr_dev, u32 reg)
+{
+ return ioread32(pvr_dev->regs + reg);
+}
+
+/**
+ * pvr_cr_read64() - Read a 64-bit register from a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ *
+ * Return: The value of the requested register.
+ */
+static __always_inline u64
+pvr_cr_read64(struct pvr_device *pvr_dev, u32 reg)
+{
+ return ioread64(pvr_dev->regs + reg);
+}
+
+/**
+ * pvr_cr_write32() - Write to a 32-bit register in a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ * @val: Value to write.
+ */
+static __always_inline void
+pvr_cr_write32(struct pvr_device *pvr_dev, u32 reg, u32 val)
+{
+ iowrite32(val, pvr_dev->regs + reg);
+}
+
+/**
+ * pvr_cr_write64() - Write to a 64-bit register in a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ * @val: Value to write.
+ */
+static __always_inline void
+pvr_cr_write64(struct pvr_device *pvr_dev, u32 reg, u64 val)
+{
+ iowrite64(val, pvr_dev->regs + reg);
+}
+
+/**
+ * pvr_cr_poll_reg32() - Wait for a 32-bit register to match a given value by
+ * polling
+ * @pvr_dev: Target PowerVR device.
+ * @reg_addr: Address of register.
+ * @reg_value: Expected register value (after masking).
+ * @reg_mask: Mask of bits valid for comparison with @reg_value.
+ * @timeout_usec: Timeout length, in us.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%ETIMEDOUT on timeout.
+ */
+static __always_inline int
+pvr_cr_poll_reg32(struct pvr_device *pvr_dev, u32 reg_addr, u32 reg_value,
+ u32 reg_mask, u64 timeout_usec)
+{
+ u32 value;
+
+ return readl_poll_timeout(pvr_dev->regs + reg_addr, value,
+ (value & reg_mask) == reg_value, 0, timeout_usec);
+}
+
+/**
+ * pvr_cr_poll_reg64() - Wait for a 64-bit register to match a given value by
+ * polling
+ * @pvr_dev: Target PowerVR device.
+ * @reg_addr: Address of register.
+ * @reg_value: Expected register value (after masking).
+ * @reg_mask: Mask of bits valid for comparison with @reg_value.
+ * @timeout_usec: Timeout length, in us.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%ETIMEDOUT on timeout.
+ */
+static __always_inline int
+pvr_cr_poll_reg64(struct pvr_device *pvr_dev, u32 reg_addr, u64 reg_value,
+ u64 reg_mask, u64 timeout_usec)
+{
+ u64 value;
+
+ return readq_poll_timeout(pvr_dev->regs + reg_addr, value,
+ (value & reg_mask) == reg_value, 0, timeout_usec);
+}
+
+/**
+ * pvr_round_up_to_cacheline_size() - Round up a provided size to be cacheline
+ * aligned
+ * @pvr_dev: Target PowerVR device.
+ * @size: Initial size, in bytes.
+ *
+ * Returns:
+ * * Size aligned to cacheline size.
+ */
+static __always_inline size_t
+pvr_round_up_to_cacheline_size(struct pvr_device *pvr_dev, size_t size)
+{
+ u16 slc_cacheline_size_bits = 0;
+ u16 slc_cacheline_size_bytes;
+
+ WARN_ON(!PVR_HAS_FEATURE(pvr_dev, slc_cache_line_size_bits));
+ PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_bits,
+ &slc_cacheline_size_bits);
+ slc_cacheline_size_bytes = slc_cacheline_size_bits / 8;
+
+ return round_up(size, slc_cacheline_size_bytes);
+}
+
+/**
+ * DOC: IOCTL validation helpers
+ *
+ * To validate the constraints imposed on IOCTL argument structs, a collection
+ * of macros and helper functions exist in ``pvr_device.h``.
+ *
+ * Of the current helpers, it should only be necessary to call
+ * PVR_IOCTL_UNION_PADDING_CHECK() directly. This macro should be used once in
+ * every code path which extracts a union member from a struct passed from
+ * userspace.
+ */
+
+/**
+ * pvr_ioctl_union_padding_check() - Validate that the implicit padding between
+ * the end of a union member and the end of the union itself is zeroed.
+ * @instance: Pointer to the instance of the struct to validate.
+ * @union_offset: Offset into the type of @instance of the target union. Must
+ * be 64-bit aligned.
+ * @union_size: Size of the target union in the type of @instance. Must be
+ * 64-bit aligned.
+ * @member_size: Size of the target member in the target union specified by
+ * @union_offset and @union_size. It is assumed that the offset of the target
+ * member is zero relative to @union_offset. Must be 64-bit aligned.
+ *
+ * You probably want to use PVR_IOCTL_UNION_PADDING_CHECK() instead of calling
+ * this function directly, since that macro abstracts away much of the setup,
+ * and also provides some static validation. See its docs for details.
+ *
+ * Return:
+ * * %true if every byte between the end of the used member of the union and
+ * the end of that union is zeroed, or
+ * * %false otherwise.
+ */
+static __always_inline bool
+pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
+ size_t union_size, size_t member_size)
+{
+ /*
+ * void pointer arithmetic is technically illegal - cast to a byte
+ * pointer so this addition works safely.
+ */
+ void *padding_start = ((u8 *)instance) + union_offset + member_size;
+ size_t padding_size = union_size - member_size;
+
+ return !memchr_inv(padding_start, 0, padding_size);
+}
+
+/**
+ * PVR_STATIC_ASSERT_64BIT_ALIGNED() - Inline assertion for 64-bit alignment.
+ * @static_expr_: Target expression to evaluate.
+ *
+ * If @static_expr_ does not evaluate to a constant integer which would be a
+ * 64-bit aligned address (i.e. a multiple of 8), compilation will fail.
+ *
+ * Return:
+ * The value of @static_expr_.
+ */
+#define PVR_STATIC_ASSERT_64BIT_ALIGNED(static_expr_) \
+ ({ \
+ static_assert(((static_expr_) & (sizeof(u64) - 1)) == 0); \
+ (static_expr_); \
+ })
+
+/**
+ * PVR_IOCTL_UNION_PADDING_CHECK() - Validate that the implicit padding between
+ * the end of a union member and the end of the union itself is zeroed.
+ * @struct_instance_: An expression which evaluates to a pointer to a UAPI data
+ * struct.
+ * @union_: The name of the union member of @struct_instance_ to check. If the
+ * union member is nested within the type of @struct_instance_, this may
+ * contain the member access operator (".").
+ * @member_: The name of the member of @union_ to assess.
+ *
+ * This is a wrapper around pvr_ioctl_union_padding_check() which performs
+ * alignment checks and simplifies things for the caller.
+ *
+ * Return:
+ * * %true if every byte in @struct_instance_ between the end of @member_ and
+ * the end of @union_ is zeroed, or
+ * * %false otherwise.
+ */
+#define PVR_IOCTL_UNION_PADDING_CHECK(struct_instance_, union_, member_) \
+ ({ \
+ typeof(struct_instance_) __instance = (struct_instance_); \
+ size_t __union_offset = PVR_STATIC_ASSERT_64BIT_ALIGNED( \
+ offsetof(typeof(*__instance), union_)); \
+ size_t __union_size = PVR_STATIC_ASSERT_64BIT_ALIGNED( \
+ sizeof(__instance->union_)); \
+ size_t __member_size = PVR_STATIC_ASSERT_64BIT_ALIGNED( \
+ sizeof(__instance->union_.member_)); \
+ pvr_ioctl_union_padding_check(__instance, __union_offset, \
+ __union_size, __member_size); \
+ })
+
+#define PVR_FW_PROCESSOR_TYPE_META 0
+#define PVR_FW_PROCESSOR_TYPE_MIPS 1
+#define PVR_FW_PROCESSOR_TYPE_RISCV 2
+
+#endif /* PVR_DEVICE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_device_info.c b/drivers/gpu/drm/imagination/pvr_device_info.c
new file mode 100644
index 000000000000..d3301cde7d11
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device_info.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_device_info.h"
+#include "pvr_rogue_fwif_dev_info.h"
+
+#include <drm/drm_print.h>
+
+#include <linux/bits.h>
+#include <linux/minmax.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#define QUIRK_MAPPING(quirk) \
+ [PVR_FW_HAS_BRN_##quirk] = offsetof(struct pvr_device, quirks.has_brn##quirk)
+
+static const uintptr_t quirks_mapping[] = {
+ QUIRK_MAPPING(44079),
+ QUIRK_MAPPING(47217),
+ QUIRK_MAPPING(48492),
+ QUIRK_MAPPING(48545),
+ QUIRK_MAPPING(49927),
+ QUIRK_MAPPING(50767),
+ QUIRK_MAPPING(51764),
+ QUIRK_MAPPING(62269),
+ QUIRK_MAPPING(63142),
+ QUIRK_MAPPING(63553),
+ QUIRK_MAPPING(66011),
+ QUIRK_MAPPING(71242),
+};
+
+#undef QUIRK_MAPPING
+
+#define ENHANCEMENT_MAPPING(enhancement) \
+ [PVR_FW_HAS_ERN_##enhancement] = offsetof(struct pvr_device, \
+ enhancements.has_ern##enhancement)
+
+static const uintptr_t enhancements_mapping[] = {
+ ENHANCEMENT_MAPPING(35421),
+ ENHANCEMENT_MAPPING(38020),
+ ENHANCEMENT_MAPPING(38748),
+ ENHANCEMENT_MAPPING(42064),
+ ENHANCEMENT_MAPPING(42290),
+ ENHANCEMENT_MAPPING(42606),
+ ENHANCEMENT_MAPPING(47025),
+ ENHANCEMENT_MAPPING(57596),
+};
+
+#undef ENHANCEMENT_MAPPING
+
+static void pvr_device_info_set_common(struct pvr_device *pvr_dev, const u64 *bitmask,
+ u32 bitmask_size, const uintptr_t *mapping, u32 mapping_max)
+{
+ const u32 mapping_max_size = (mapping_max + 63) >> 6;
+ const u32 nr_bits = min(bitmask_size * 64, mapping_max);
+
+ /* Warn if any unsupported values in the bitmask. */
+ if (bitmask_size > mapping_max_size) {
+ if (mapping == quirks_mapping)
+ drm_warn(from_pvr_device(pvr_dev), "Unsupported quirks in firmware image");
+ else
+ drm_warn(from_pvr_device(pvr_dev),
+ "Unsupported enhancements in firmware image");
+ } else if (bitmask_size == mapping_max_size && (mapping_max & 63)) {
+ u64 invalid_mask = ~0ull << (mapping_max & 63);
+
+ if (bitmask[bitmask_size - 1] & invalid_mask) {
+ if (mapping == quirks_mapping)
+ drm_warn(from_pvr_device(pvr_dev),
+ "Unsupported quirks in firmware image");
+ else
+ drm_warn(from_pvr_device(pvr_dev),
+ "Unsupported enhancements in firmware image");
+ }
+ }
+
+ for (u32 i = 0; i < nr_bits; i++) {
+ if (bitmask[i >> 6] & BIT_ULL(i & 63))
+ *(bool *)((u8 *)pvr_dev + mapping[i]) = true;
+ }
+}
+
+/**
+ * pvr_device_info_set_quirks() - Set device quirks from device information in firmware
+ * @pvr_dev: Device pointer.
+ * @quirks: Pointer to quirks mask in device information.
+ * @quirks_size: Size of quirks mask, in u64s.
+ */
+void pvr_device_info_set_quirks(struct pvr_device *pvr_dev, const u64 *quirks, u32 quirks_size)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(quirks_mapping) != PVR_FW_HAS_BRN_MAX);
+
+ pvr_device_info_set_common(pvr_dev, quirks, quirks_size, quirks_mapping,
+ ARRAY_SIZE(quirks_mapping));
+}
+
+/**
+ * pvr_device_info_set_enhancements() - Set device enhancements from device information in firmware
+ * @pvr_dev: Device pointer.
+ * @enhancements: Pointer to enhancements mask in device information.
+ * @enhancements_size: Size of enhancements mask, in u64s.
+ */
+void pvr_device_info_set_enhancements(struct pvr_device *pvr_dev, const u64 *enhancements,
+ u32 enhancements_size)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(enhancements_mapping) != PVR_FW_HAS_ERN_MAX);
+
+ pvr_device_info_set_common(pvr_dev, enhancements, enhancements_size,
+ enhancements_mapping, ARRAY_SIZE(enhancements_mapping));
+}
+
+#define FEATURE_MAPPING(fw_feature, feature) \
+ [PVR_FW_HAS_FEATURE_##fw_feature] = { \
+ .flag_offset = offsetof(struct pvr_device, features.has_##feature), \
+ .value_offset = 0 \
+ }
+
+#define FEATURE_MAPPING_VALUE(fw_feature, feature) \
+ [PVR_FW_HAS_FEATURE_##fw_feature] = { \
+ .flag_offset = offsetof(struct pvr_device, features.has_##feature), \
+ .value_offset = offsetof(struct pvr_device, features.feature) \
+ }
+
+static const struct {
+ uintptr_t flag_offset;
+ uintptr_t value_offset;
+} features_mapping[] = {
+ FEATURE_MAPPING(AXI_ACELITE, axi_acelite),
+ FEATURE_MAPPING_VALUE(CDM_CONTROL_STREAM_FORMAT, cdm_control_stream_format),
+ FEATURE_MAPPING(CLUSTER_GROUPING, cluster_grouping),
+ FEATURE_MAPPING_VALUE(COMMON_STORE_SIZE_IN_DWORDS, common_store_size_in_dwords),
+ FEATURE_MAPPING(COMPUTE, compute),
+ FEATURE_MAPPING(COMPUTE_MORTON_CAPABLE, compute_morton_capable),
+ FEATURE_MAPPING(COMPUTE_OVERLAP, compute_overlap),
+ FEATURE_MAPPING(COREID_PER_OS, coreid_per_os),
+ FEATURE_MAPPING(DYNAMIC_DUST_POWER, dynamic_dust_power),
+ FEATURE_MAPPING_VALUE(ECC_RAMS, ecc_rams),
+ FEATURE_MAPPING_VALUE(FBCDC, fbcdc),
+ FEATURE_MAPPING_VALUE(FBCDC_ALGORITHM, fbcdc_algorithm),
+ FEATURE_MAPPING_VALUE(FBCDC_ARCHITECTURE, fbcdc_architecture),
+ FEATURE_MAPPING_VALUE(FBC_MAX_DEFAULT_DESCRIPTORS, fbc_max_default_descriptors),
+ FEATURE_MAPPING_VALUE(FBC_MAX_LARGE_DESCRIPTORS, fbc_max_large_descriptors),
+ FEATURE_MAPPING(FB_CDC_V4, fb_cdc_v4),
+ FEATURE_MAPPING(GPU_MULTICORE_SUPPORT, gpu_multicore_support),
+ FEATURE_MAPPING(GPU_VIRTUALISATION, gpu_virtualisation),
+ FEATURE_MAPPING(GS_RTA_SUPPORT, gs_rta_support),
+ FEATURE_MAPPING(IRQ_PER_OS, irq_per_os),
+ FEATURE_MAPPING_VALUE(ISP_MAX_TILES_IN_FLIGHT, isp_max_tiles_in_flight),
+ FEATURE_MAPPING_VALUE(ISP_SAMPLES_PER_PIXEL, isp_samples_per_pixel),
+ FEATURE_MAPPING(ISP_ZLS_D24_S8_PACKING_OGL_MODE, isp_zls_d24_s8_packing_ogl_mode),
+ FEATURE_MAPPING_VALUE(LAYOUT_MARS, layout_mars),
+ FEATURE_MAPPING_VALUE(MAX_PARTITIONS, max_partitions),
+ FEATURE_MAPPING_VALUE(META, meta),
+ FEATURE_MAPPING_VALUE(META_COREMEM_SIZE, meta_coremem_size),
+ FEATURE_MAPPING(MIPS, mips),
+ FEATURE_MAPPING_VALUE(NUM_CLUSTERS, num_clusters),
+ FEATURE_MAPPING_VALUE(NUM_ISP_IPP_PIPES, num_isp_ipp_pipes),
+ FEATURE_MAPPING_VALUE(NUM_OSIDS, num_osids),
+ FEATURE_MAPPING_VALUE(NUM_RASTER_PIPES, num_raster_pipes),
+ FEATURE_MAPPING(PBE2_IN_XE, pbe2_in_xe),
+ FEATURE_MAPPING(PBVNC_COREID_REG, pbvnc_coreid_reg),
+ FEATURE_MAPPING(PERFBUS, perfbus),
+ FEATURE_MAPPING(PERF_COUNTER_BATCH, perf_counter_batch),
+ FEATURE_MAPPING_VALUE(PHYS_BUS_WIDTH, phys_bus_width),
+ FEATURE_MAPPING(RISCV_FW_PROCESSOR, riscv_fw_processor),
+ FEATURE_MAPPING(ROGUEXE, roguexe),
+ FEATURE_MAPPING(S7_TOP_INFRASTRUCTURE, s7_top_infrastructure),
+ FEATURE_MAPPING(SIMPLE_INTERNAL_PARAMETER_FORMAT, simple_internal_parameter_format),
+ FEATURE_MAPPING(SIMPLE_INTERNAL_PARAMETER_FORMAT_V2, simple_internal_parameter_format_v2),
+ FEATURE_MAPPING_VALUE(SIMPLE_PARAMETER_FORMAT_VERSION, simple_parameter_format_version),
+ FEATURE_MAPPING_VALUE(SLC_BANKS, slc_banks),
+ FEATURE_MAPPING_VALUE(SLC_CACHE_LINE_SIZE_BITS, slc_cache_line_size_bits),
+ FEATURE_MAPPING(SLC_SIZE_CONFIGURABLE, slc_size_configurable),
+ FEATURE_MAPPING_VALUE(SLC_SIZE_IN_KILOBYTES, slc_size_in_kilobytes),
+ FEATURE_MAPPING(SOC_TIMER, soc_timer),
+ FEATURE_MAPPING(SYS_BUS_SECURE_RESET, sys_bus_secure_reset),
+ FEATURE_MAPPING(TESSELLATION, tessellation),
+ FEATURE_MAPPING(TILE_REGION_PROTECTION, tile_region_protection),
+ FEATURE_MAPPING_VALUE(TILE_SIZE_X, tile_size_x),
+ FEATURE_MAPPING_VALUE(TILE_SIZE_Y, tile_size_y),
+ FEATURE_MAPPING(TLA, tla),
+ FEATURE_MAPPING(TPU_CEM_DATAMASTER_GLOBAL_REGISTERS, tpu_cem_datamaster_global_registers),
+ FEATURE_MAPPING(TPU_DM_GLOBAL_REGISTERS, tpu_dm_global_registers),
+ FEATURE_MAPPING(TPU_FILTERING_MODE_CONTROL, tpu_filtering_mode_control),
+ FEATURE_MAPPING_VALUE(USC_MIN_OUTPUT_REGISTERS_PER_PIX, usc_min_output_registers_per_pix),
+ FEATURE_MAPPING(VDM_DRAWINDIRECT, vdm_drawindirect),
+ FEATURE_MAPPING(VDM_OBJECT_LEVEL_LLS, vdm_object_level_lls),
+ FEATURE_MAPPING_VALUE(VIRTUAL_ADDRESS_SPACE_BITS, virtual_address_space_bits),
+ FEATURE_MAPPING(WATCHDOG_TIMER, watchdog_timer),
+ FEATURE_MAPPING(WORKGROUP_PROTECTION, workgroup_protection),
+ FEATURE_MAPPING_VALUE(XE_ARCHITECTURE, xe_architecture),
+ FEATURE_MAPPING(XE_MEMORY_HIERARCHY, xe_memory_hierarchy),
+ FEATURE_MAPPING(XE_TPU2, xe_tpu2),
+ FEATURE_MAPPING_VALUE(XPU_MAX_REGBANKS_ADDR_WIDTH, xpu_max_regbanks_addr_width),
+ FEATURE_MAPPING_VALUE(XPU_MAX_SLAVES, xpu_max_slaves),
+ FEATURE_MAPPING_VALUE(XPU_REGISTER_BROADCAST, xpu_register_broadcast),
+ FEATURE_MAPPING(XT_TOP_INFRASTRUCTURE, xt_top_infrastructure),
+ FEATURE_MAPPING(ZLS_SUBTILE, zls_subtile),
+};
+
+#undef FEATURE_MAPPING_VALUE
+#undef FEATURE_MAPPING
+
+/**
+ * pvr_device_info_set_features() - Set device features from device information in firmware
+ * @pvr_dev: Device pointer.
+ * @features: Pointer to features mask in device information.
+ * @features_size: Size of features mask, in u64s.
+ * @feature_param_size: Size of feature parameters, in u64s.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL on malformed stream.
+ */
+int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features, u32 features_size,
+ u32 feature_param_size)
+{
+ const u32 mapping_max = ARRAY_SIZE(features_mapping);
+ const u32 mapping_max_size = (mapping_max + 63) >> 6;
+ const u32 nr_bits = min(features_size * 64, mapping_max);
+ const u64 *feature_params = features + features_size;
+ u32 param_idx = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(features_mapping) != PVR_FW_HAS_FEATURE_MAX);
+
+ /* Verify no unsupported values in the bitmask. */
+ if (features_size > mapping_max_size) {
+ drm_warn(from_pvr_device(pvr_dev), "Unsupported features in firmware image");
+ } else if (features_size == mapping_max_size &&
+ ((mapping_max & 63) != 0)) {
+ u64 invalid_mask = ~0ull << (mapping_max & 63);
+
+ if (features[features_size - 1] & invalid_mask)
+ drm_warn(from_pvr_device(pvr_dev),
+ "Unsupported features in firmware image");
+ }
+
+ for (u32 i = 0; i < nr_bits; i++) {
+ if (features[i >> 6] & BIT_ULL(i & 63)) {
+ *(bool *)((u8 *)pvr_dev + features_mapping[i].flag_offset) = true;
+
+ if (features_mapping[i].value_offset) {
+ if (param_idx >= feature_param_size)
+ return -EINVAL;
+
+ *(u64 *)((u8 *)pvr_dev + features_mapping[i].value_offset) =
+ feature_params[param_idx];
+ param_idx++;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_device_info.h b/drivers/gpu/drm/imagination/pvr_device_info.h
new file mode 100644
index 000000000000..f61fb988b553
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device_info.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DEVICE_INFO_H
+#define PVR_DEVICE_INFO_H
+
+#include <linux/types.h>
+
+struct pvr_device;
+
+/*
+ * struct pvr_device_features - Hardware feature information
+ */
+struct pvr_device_features {
+ bool has_axi_acelite;
+ bool has_cdm_control_stream_format;
+ bool has_cluster_grouping;
+ bool has_common_store_size_in_dwords;
+ bool has_compute;
+ bool has_compute_morton_capable;
+ bool has_compute_overlap;
+ bool has_coreid_per_os;
+ bool has_dynamic_dust_power;
+ bool has_ecc_rams;
+ bool has_fb_cdc_v4;
+ bool has_fbc_max_default_descriptors;
+ bool has_fbc_max_large_descriptors;
+ bool has_fbcdc;
+ bool has_fbcdc_algorithm;
+ bool has_fbcdc_architecture;
+ bool has_gpu_multicore_support;
+ bool has_gpu_virtualisation;
+ bool has_gs_rta_support;
+ bool has_irq_per_os;
+ bool has_isp_max_tiles_in_flight;
+ bool has_isp_samples_per_pixel;
+ bool has_isp_zls_d24_s8_packing_ogl_mode;
+ bool has_layout_mars;
+ bool has_max_partitions;
+ bool has_meta;
+ bool has_meta_coremem_size;
+ bool has_mips;
+ bool has_num_clusters;
+ bool has_num_isp_ipp_pipes;
+ bool has_num_osids;
+ bool has_num_raster_pipes;
+ bool has_pbe2_in_xe;
+ bool has_pbvnc_coreid_reg;
+ bool has_perfbus;
+ bool has_perf_counter_batch;
+ bool has_phys_bus_width;
+ bool has_riscv_fw_processor;
+ bool has_roguexe;
+ bool has_s7_top_infrastructure;
+ bool has_simple_internal_parameter_format;
+ bool has_simple_internal_parameter_format_v2;
+ bool has_simple_parameter_format_version;
+ bool has_slc_banks;
+ bool has_slc_cache_line_size_bits;
+ bool has_slc_size_configurable;
+ bool has_slc_size_in_kilobytes;
+ bool has_soc_timer;
+ bool has_sys_bus_secure_reset;
+ bool has_tessellation;
+ bool has_tile_region_protection;
+ bool has_tile_size_x;
+ bool has_tile_size_y;
+ bool has_tla;
+ bool has_tpu_cem_datamaster_global_registers;
+ bool has_tpu_dm_global_registers;
+ bool has_tpu_filtering_mode_control;
+ bool has_usc_min_output_registers_per_pix;
+ bool has_vdm_drawindirect;
+ bool has_vdm_object_level_lls;
+ bool has_virtual_address_space_bits;
+ bool has_watchdog_timer;
+ bool has_workgroup_protection;
+ bool has_xe_architecture;
+ bool has_xe_memory_hierarchy;
+ bool has_xe_tpu2;
+ bool has_xpu_max_regbanks_addr_width;
+ bool has_xpu_max_slaves;
+ bool has_xpu_register_broadcast;
+ bool has_xt_top_infrastructure;
+ bool has_zls_subtile;
+
+ u64 cdm_control_stream_format;
+ u64 common_store_size_in_dwords;
+ u64 ecc_rams;
+ u64 fbc_max_default_descriptors;
+ u64 fbc_max_large_descriptors;
+ u64 fbcdc;
+ u64 fbcdc_algorithm;
+ u64 fbcdc_architecture;
+ u64 isp_max_tiles_in_flight;
+ u64 isp_samples_per_pixel;
+ u64 layout_mars;
+ u64 max_partitions;
+ u64 meta;
+ u64 meta_coremem_size;
+ u64 num_clusters;
+ u64 num_isp_ipp_pipes;
+ u64 num_osids;
+ u64 num_raster_pipes;
+ u64 phys_bus_width;
+ u64 simple_parameter_format_version;
+ u64 slc_banks;
+ u64 slc_cache_line_size_bits;
+ u64 slc_size_in_kilobytes;
+ u64 tile_size_x;
+ u64 tile_size_y;
+ u64 usc_min_output_registers_per_pix;
+ u64 virtual_address_space_bits;
+ u64 xe_architecture;
+ u64 xpu_max_regbanks_addr_width;
+ u64 xpu_max_slaves;
+ u64 xpu_register_broadcast;
+};
+
+/*
+ * struct pvr_device_quirks - Hardware quirk information
+ */
+struct pvr_device_quirks {
+ bool has_brn44079;
+ bool has_brn47217;
+ bool has_brn48492;
+ bool has_brn48545;
+ bool has_brn49927;
+ bool has_brn50767;
+ bool has_brn51764;
+ bool has_brn62269;
+ bool has_brn63142;
+ bool has_brn63553;
+ bool has_brn66011;
+ bool has_brn71242;
+};
+
+/*
+ * struct pvr_device_enhancements - Hardware enhancement information
+ */
+struct pvr_device_enhancements {
+ bool has_ern35421;
+ bool has_ern38020;
+ bool has_ern38748;
+ bool has_ern42064;
+ bool has_ern42290;
+ bool has_ern42606;
+ bool has_ern47025;
+ bool has_ern57596;
+};
+
+void pvr_device_info_set_quirks(struct pvr_device *pvr_dev, const u64 *bitmask,
+ u32 bitmask_len);
+void pvr_device_info_set_enhancements(struct pvr_device *pvr_dev, const u64 *bitmask,
+ u32 bitmask_len);
+int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features, u32 features_size,
+ u32 feature_param_size);
+
+/*
+ * Meta cores
+ *
+ * These are the values for the 'meta' feature when the feature is present
+ * (as per &struct pvr_device_features)/
+ */
+#define PVR_META_MTP218 (1)
+#define PVR_META_MTP219 (2)
+#define PVR_META_LTP218 (3)
+#define PVR_META_LTP217 (4)
+
+enum {
+ PVR_FEATURE_CDM_USER_MODE_QUEUE,
+ PVR_FEATURE_CLUSTER_GROUPING,
+ PVR_FEATURE_COMPUTE_MORTON_CAPABLE,
+ PVR_FEATURE_FB_CDC_V4,
+ PVR_FEATURE_GPU_MULTICORE_SUPPORT,
+ PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE,
+ PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP,
+ PVR_FEATURE_S7_TOP_INFRASTRUCTURE,
+ PVR_FEATURE_TESSELLATION,
+ PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS,
+ PVR_FEATURE_VDM_DRAWINDIRECT,
+ PVR_FEATURE_VDM_OBJECT_LEVEL_LLS,
+ PVR_FEATURE_ZLS_SUBTILE,
+};
+
+#endif /* PVR_DEVICE_INFO_H */
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
new file mode 100644
index 000000000000..5c3b2d58d766
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -0,0 +1,1501 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_context.h"
+#include "pvr_debugfs.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_free_list.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+#include "pvr_job.h"
+#include "pvr_mmu.h"
+#include "pvr_power.h"
+#include "pvr_rogue_defs.h"
+#include "pvr_rogue_fwif_client.h"
+#include "pvr_rogue_fwif_shared.h"
+#include "pvr_vm.h"
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/xarray.h>
+
+/**
+ * DOC: PowerVR (Series 6 and later) and IMG Graphics Driver
+ *
+ * This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
+ *
+ * * AXE-1-16M (found in Texas Instruments AM62)
+ */
+
+/**
+ * pvr_ioctl_create_bo() - IOCTL to create a GEM buffer object.
+ * @drm_dev: [IN] Target DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_bo_args.
+ * @file: [IN] DRM file-private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_BO.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if the value of &drm_pvr_ioctl_create_bo_args.size is zero
+ * or wider than &typedef size_t,
+ * * -%EINVAL if any bits in &drm_pvr_ioctl_create_bo_args.flags that are
+ * reserved or undefined are set,
+ * * -%EINVAL if any padding fields in &drm_pvr_ioctl_create_bo_args are not
+ * zero,
+ * * Any error encountered while creating the object (see
+ * pvr_gem_object_create()), or
+ * * Any error encountered while transferring ownership of the object into a
+ * userspace-accessible handle (see pvr_gem_object_into_handle()).
+ */
+static int
+pvr_ioctl_create_bo(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_create_bo_args *args = raw_args;
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct pvr_file *pvr_file = to_pvr_file(file);
+
+ struct pvr_gem_object *pvr_obj;
+ size_t sanitized_size;
+
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ /* All padding fields must be zeroed. */
+ if (args->_padding_c != 0) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ /*
+ * On 64-bit platforms (our primary target), size_t is a u64. However,
+ * on other architectures we have to check for overflow when casting
+ * down to size_t from u64.
+ *
+ * We also disallow zero-sized allocations, and reserved (kernel-only)
+ * flags.
+ */
+ if (args->size > SIZE_MAX || args->size == 0 || args->flags &
+ ~DRM_PVR_BO_FLAGS_MASK || args->size & (PVR_DEVICE_PAGE_SIZE - 1)) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ sanitized_size = (size_t)args->size;
+
+ /*
+ * Create a buffer object and transfer ownership to a userspace-
+ * accessible handle.
+ */
+ pvr_obj = pvr_gem_object_create(pvr_dev, sanitized_size, args->flags);
+ if (IS_ERR(pvr_obj)) {
+ err = PTR_ERR(pvr_obj);
+ goto err_drm_dev_exit;
+ }
+
+ /* This function will not modify &args->handle unless it succeeds. */
+ err = pvr_gem_object_into_handle(pvr_obj, pvr_file, &args->handle);
+ if (err)
+ goto err_destroy_obj;
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_destroy_obj:
+ /*
+ * GEM objects are refcounted, so there is no explicit destructor
+ * function. Instead, we release the singular reference we currently
+ * hold on the object and let GEM take care of the rest.
+ */
+ pvr_gem_object_put(pvr_obj);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * pvr_ioctl_get_bo_mmap_offset() - IOCTL to generate a "fake" offset to be
+ * used when calling mmap() from userspace to map the given GEM buffer object
+ * @drm_dev: [IN] DRM device (unused).
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_get_bo_mmap_offset_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET.
+ *
+ * This IOCTL does *not* perform an mmap. See the docs on
+ * &struct drm_pvr_ioctl_get_bo_mmap_offset_args for details.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%ENOENT if the handle does not reference a valid GEM buffer object,
+ * * -%EINVAL if any padding fields in &struct
+ * drm_pvr_ioctl_get_bo_mmap_offset_args are not zero, or
+ * * Any error returned by drm_gem_create_mmap_offset().
+ */
+static int
+pvr_ioctl_get_bo_mmap_offset(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_get_bo_mmap_offset_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gem_object *gem_obj;
+ int idx;
+ int ret;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ /* All padding fields must be zeroed. */
+ if (args->_padding_4 != 0) {
+ ret = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ /*
+ * Obtain a kernel reference to the buffer object. This reference is
+ * counted and must be manually dropped before returning. If a buffer
+ * object cannot be found for the specified handle, return -%ENOENT (No
+ * such file or directory).
+ */
+ pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
+ if (!pvr_obj) {
+ ret = -ENOENT;
+ goto err_drm_dev_exit;
+ }
+
+ gem_obj = gem_from_pvr_gem(pvr_obj);
+
+ /*
+ * Allocate a fake offset which can be used in userspace calls to mmap
+ * on the DRM device file. If this fails, return the error code. This
+ * operation is idempotent.
+ */
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret != 0) {
+ /* Drop our reference to the buffer object. */
+ drm_gem_object_put(gem_obj);
+ goto err_drm_dev_exit;
+ }
+
+ /*
+ * Read out the fake offset allocated by the earlier call to
+ * drm_gem_create_mmap_offset.
+ */
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+
+ /* Drop our reference to the buffer object. */
+ pvr_gem_object_put(pvr_obj);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return ret;
+}
+
+static __always_inline u64
+pvr_fw_version_packed(u32 major, u32 minor)
+{
+ return ((u64)major << 32) | minor;
+}
+
+static u32
+rogue_get_common_store_partition_space_size(struct pvr_device *pvr_dev)
+{
+ u32 max_partitions = 0;
+ u32 tile_size_x = 0;
+ u32 tile_size_y = 0;
+
+ PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &tile_size_x);
+ PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &tile_size_y);
+ PVR_FEATURE_VALUE(pvr_dev, max_partitions, &max_partitions);
+
+ if (tile_size_x == 16 && tile_size_y == 16) {
+ u32 usc_min_output_registers_per_pix = 0;
+
+ PVR_FEATURE_VALUE(pvr_dev, usc_min_output_registers_per_pix,
+ &usc_min_output_registers_per_pix);
+
+ return tile_size_x * tile_size_y * max_partitions *
+ usc_min_output_registers_per_pix;
+ }
+
+ return max_partitions * 1024;
+}
+
+static u32
+rogue_get_common_store_alloc_region_size(struct pvr_device *pvr_dev)
+{
+ u32 common_store_size_in_dwords = 512 * 4 * 4;
+ u32 alloc_region_size;
+
+ PVR_FEATURE_VALUE(pvr_dev, common_store_size_in_dwords, &common_store_size_in_dwords);
+
+ alloc_region_size = common_store_size_in_dwords - (256U * 4U) -
+ rogue_get_common_store_partition_space_size(pvr_dev);
+
+ if (PVR_HAS_QUIRK(pvr_dev, 44079)) {
+ u32 common_store_split_point = (768U * 4U * 4U);
+
+ return min(common_store_split_point - (256U * 4U), alloc_region_size);
+ }
+
+ return alloc_region_size;
+}
+
+static inline u32
+rogue_get_num_phantoms(struct pvr_device *pvr_dev)
+{
+ u32 num_clusters = 1;
+
+ PVR_FEATURE_VALUE(pvr_dev, num_clusters, &num_clusters);
+
+ return ROGUE_REQ_NUM_PHANTOMS(num_clusters);
+}
+
+static inline u32
+rogue_get_max_coeffs(struct pvr_device *pvr_dev)
+{
+ u32 max_coeff_additional_portion = ROGUE_MAX_VERTEX_SHARED_REGISTERS;
+ u32 pending_allocation_shared_regs = 2U * 1024U;
+ u32 pending_allocation_coeff_regs = 0U;
+ u32 num_phantoms = rogue_get_num_phantoms(pvr_dev);
+ u32 tiles_in_flight = 0;
+ u32 max_coeff_pixel_portion;
+
+ PVR_FEATURE_VALUE(pvr_dev, isp_max_tiles_in_flight, &tiles_in_flight);
+ max_coeff_pixel_portion = DIV_ROUND_UP(tiles_in_flight, num_phantoms);
+ max_coeff_pixel_portion *= ROGUE_MAX_PIXEL_SHARED_REGISTERS;
+
+ /*
+ * Compute tasks on cores with BRN48492 and without compute overlap may lock
+ * up without two additional lines of coeffs.
+ */
+ if (PVR_HAS_QUIRK(pvr_dev, 48492) && !PVR_HAS_FEATURE(pvr_dev, compute_overlap))
+ pending_allocation_coeff_regs = 2U * 1024U;
+
+ if (PVR_HAS_ENHANCEMENT(pvr_dev, 38748))
+ pending_allocation_shared_regs = 0;
+
+ if (PVR_HAS_ENHANCEMENT(pvr_dev, 38020))
+ max_coeff_additional_portion += ROGUE_MAX_COMPUTE_SHARED_REGISTERS;
+
+ return rogue_get_common_store_alloc_region_size(pvr_dev) + pending_allocation_coeff_regs -
+ (max_coeff_pixel_portion + max_coeff_additional_portion +
+ pending_allocation_shared_regs);
+}
+
+static inline u32
+rogue_get_cdm_max_local_mem_size_regs(struct pvr_device *pvr_dev)
+{
+ u32 available_coeffs_in_dwords = rogue_get_max_coeffs(pvr_dev);
+
+ if (PVR_HAS_QUIRK(pvr_dev, 48492) && PVR_HAS_FEATURE(pvr_dev, roguexe) &&
+ !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) {
+ /* Driver must not use the 2 reserved lines. */
+ available_coeffs_in_dwords -= ROGUE_CSRM_LINE_SIZE_IN_DWORDS * 2;
+ }
+
+ /*
+ * The maximum amount of local memory available to a kernel is the minimum
+ * of the total number of coefficient registers available and the max common
+ * store allocation size which can be made by the CDM.
+ *
+ * If any coeff lines are reserved for tessellation or pixel then we need to
+ * subtract those too.
+ */
+ return min(available_coeffs_in_dwords, (u32)ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS);
+}
+
+/**
+ * pvr_dev_query_gpu_info_get()
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ * struct drm_pvr_dev_query_gpu_info.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ *
+ * Returns:
+ * * 0 on success, or if size is requested using a NULL pointer, or
+ * * -%E2BIG if the indicated length of the allocation is less than is
+ * required to contain the copied data, or
+ * * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_gpu_info_get(struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ struct drm_pvr_dev_query_gpu_info gpu_info = {0};
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_gpu_info);
+ return 0;
+ }
+
+ gpu_info.gpu_id =
+ pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id);
+ gpu_info.num_phantoms = rogue_get_num_phantoms(pvr_dev);
+
+ err = PVR_UOBJ_SET(args->pointer, args->size, gpu_info);
+ if (err < 0)
+ return err;
+
+ if (args->size > sizeof(gpu_info))
+ args->size = sizeof(gpu_info);
+ return 0;
+}
+
+/**
+ * pvr_dev_query_runtime_info_get()
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ * struct drm_pvr_dev_query_runtime_info.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ *
+ * Returns:
+ * * 0 on success, or if size is requested using a NULL pointer, or
+ * * -%E2BIG if the indicated length of the allocation is less than is
+ * required to contain the copied data, or
+ * * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_runtime_info_get(struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ struct drm_pvr_dev_query_runtime_info runtime_info = {0};
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_runtime_info);
+ return 0;
+ }
+
+ runtime_info.free_list_min_pages =
+ pvr_get_free_list_min_pages(pvr_dev);
+ runtime_info.free_list_max_pages =
+ ROGUE_PM_MAX_FREELIST_SIZE / ROGUE_PM_PAGE_SIZE;
+ runtime_info.common_store_alloc_region_size =
+ rogue_get_common_store_alloc_region_size(pvr_dev);
+ runtime_info.common_store_partition_space_size =
+ rogue_get_common_store_partition_space_size(pvr_dev);
+ runtime_info.max_coeffs = rogue_get_max_coeffs(pvr_dev);
+ runtime_info.cdm_max_local_mem_size_regs =
+ rogue_get_cdm_max_local_mem_size_regs(pvr_dev);
+
+ err = PVR_UOBJ_SET(args->pointer, args->size, runtime_info);
+ if (err < 0)
+ return err;
+
+ if (args->size > sizeof(runtime_info))
+ args->size = sizeof(runtime_info);
+ return 0;
+}
+
+/**
+ * pvr_dev_query_quirks_get() - Unpack array of quirks at the address given
+ * in a struct drm_pvr_dev_query_quirks, or gets the amount of space required
+ * for it.
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ * struct drm_pvr_dev_query_query_quirks.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ * If the userspace pointer in the query object is NULL, or the count is
+ * short, no data is copied.
+ * The count field will be updated to that copied, or if either pointer is
+ * NULL, that which would have been copied.
+ * The size field in the query object will be updated to the size copied.
+ *
+ * Returns:
+ * * 0 on success, or if size/count is requested using a NULL pointer, or
+ * * -%EINVAL if args contained non-zero reserved fields, or
+ * * -%E2BIG if the indicated length of the allocation is less than is
+ * required to contain the copied data, or
+ * * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_quirks_get(struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ /*
+ * @FIXME - hardcoding of numbers here is intended as an
+ * intermediate step so the UAPI can be fixed, but requires a
+ * a refactor in the future to store them in a more appropriate
+ * location
+ */
+ static const u32 umd_quirks_musthave[] = {
+ 47217,
+ 49927,
+ 62269,
+ };
+ static const u32 umd_quirks[] = {
+ 48545,
+ 51764,
+ };
+ struct drm_pvr_dev_query_quirks query;
+ u32 out[ARRAY_SIZE(umd_quirks_musthave) + ARRAY_SIZE(umd_quirks)];
+ size_t out_musthave_count = 0;
+ size_t out_count = 0;
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_quirks);
+ return 0;
+ }
+
+ err = PVR_UOBJ_GET(query, args->size, args->pointer);
+
+ if (err < 0)
+ return err;
+ if (query._padding_c)
+ return -EINVAL;
+
+ for (int i = 0; i < ARRAY_SIZE(umd_quirks_musthave); i++) {
+ if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks_musthave[i])) {
+ out[out_count++] = umd_quirks_musthave[i];
+ out_musthave_count++;
+ }
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(umd_quirks); i++) {
+ if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks[i]))
+ out[out_count++] = umd_quirks[i];
+ }
+
+ if (!query.quirks)
+ goto copy_out;
+ if (query.count < out_count)
+ return -E2BIG;
+
+ if (copy_to_user(u64_to_user_ptr(query.quirks), out,
+ out_count * sizeof(u32))) {
+ return -EFAULT;
+ }
+
+ query.musthave_count = out_musthave_count;
+
+copy_out:
+ query.count = out_count;
+ err = PVR_UOBJ_SET(args->pointer, args->size, query);
+ if (err < 0)
+ return err;
+
+ args->size = sizeof(query);
+ return 0;
+}
+
+/**
+ * pvr_dev_query_enhancements_get() - Unpack array of enhancements at the
+ * address given in a struct drm_pvr_dev_query_enhancements, or gets the amount
+ * of space required for it.
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ * struct drm_pvr_dev_query_enhancements.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ * If the userspace pointer in the query object is NULL, or the count is
+ * short, no data is copied.
+ * The count field will be updated to that copied, or if either pointer is
+ * NULL, that which would have been copied.
+ * The size field in the query object will be updated to the size copied.
+ *
+ * Returns:
+ * * 0 on success, or if size/count is requested using a NULL pointer, or
+ * * -%EINVAL if args contained non-zero reserved fields, or
+ * * -%E2BIG if the indicated length of the allocation is less than is
+ * required to contain the copied data, or
+ * * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_enhancements_get(struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ /*
+ * @FIXME - hardcoding of numbers here is intended as an
+ * intermediate step so the UAPI can be fixed, but requires a
+ * a refactor in the future to store them in a more appropriate
+ * location
+ */
+ const u32 umd_enhancements[] = {
+ 35421,
+ 42064,
+ };
+ struct drm_pvr_dev_query_enhancements query;
+ u32 out[ARRAY_SIZE(umd_enhancements)];
+ size_t out_idx = 0;
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_enhancements);
+ return 0;
+ }
+
+ err = PVR_UOBJ_GET(query, args->size, args->pointer);
+
+ if (err < 0)
+ return err;
+ if (query._padding_a)
+ return -EINVAL;
+ if (query._padding_c)
+ return -EINVAL;
+
+ for (int i = 0; i < ARRAY_SIZE(umd_enhancements); i++) {
+ if (pvr_device_has_uapi_enhancement(pvr_dev, umd_enhancements[i]))
+ out[out_idx++] = umd_enhancements[i];
+ }
+
+ if (!query.enhancements)
+ goto copy_out;
+ if (query.count < out_idx)
+ return -E2BIG;
+
+ if (copy_to_user(u64_to_user_ptr(query.enhancements), out,
+ out_idx * sizeof(u32))) {
+ return -EFAULT;
+ }
+
+copy_out:
+ query.count = out_idx;
+ err = PVR_UOBJ_SET(args->pointer, args->size, query);
+ if (err < 0)
+ return err;
+
+ args->size = sizeof(query);
+ return 0;
+}
+
+/**
+ * pvr_ioctl_dev_query() - IOCTL to copy information about a device
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_dev_query_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DEV_QUERY.
+ * If the given receiving struct pointer is NULL, or the indicated size is too
+ * small, the expected size of the struct type will be returned in the size
+ * argument field.
+ *
+ * Return:
+ * * 0 on success or when fetching the size with args->pointer == NULL, or
+ * * -%E2BIG if the indicated size of the receiving struct is less than is
+ * required to contain the copied data, or
+ * * -%EINVAL if the indicated struct type is unknown, or
+ * * -%ENOMEM if local memory could not be allocated, or
+ * * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_ioctl_dev_query(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct drm_pvr_ioctl_dev_query_args *args = raw_args;
+ int idx;
+ int ret = -EINVAL;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ switch ((enum drm_pvr_dev_query)args->type) {
+ case DRM_PVR_DEV_QUERY_GPU_INFO_GET:
+ ret = pvr_dev_query_gpu_info_get(pvr_dev, args);
+ break;
+
+ case DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET:
+ ret = pvr_dev_query_runtime_info_get(pvr_dev, args);
+ break;
+
+ case DRM_PVR_DEV_QUERY_QUIRKS_GET:
+ ret = pvr_dev_query_quirks_get(pvr_dev, args);
+ break;
+
+ case DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET:
+ ret = pvr_dev_query_enhancements_get(pvr_dev, args);
+ break;
+
+ case DRM_PVR_DEV_QUERY_HEAP_INFO_GET:
+ ret = pvr_heap_info_get(pvr_dev, args);
+ break;
+
+ case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET:
+ ret = pvr_static_data_areas_get(pvr_dev, args);
+ break;
+ }
+
+ drm_dev_exit(idx);
+
+ return ret;
+}
+
+/**
+ * pvr_ioctl_create_context() - IOCTL to create a context
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_context_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_CONTEXT.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if provided arguments are invalid, or
+ * * -%EFAULT if arguments can't be copied from userspace, or
+ * * Any error returned by pvr_create_render_context().
+ */
+static int
+pvr_ioctl_create_context(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_create_context_args *args = raw_args;
+ struct pvr_file *pvr_file = file->driver_priv;
+ int idx;
+ int ret;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ ret = pvr_context_create(pvr_file, args);
+
+ drm_dev_exit(idx);
+
+ return ret;
+}
+
+/**
+ * pvr_ioctl_destroy_context() - IOCTL to destroy a context
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_destroy_context_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DESTROY_CONTEXT.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if context not in context list.
+ */
+static int
+pvr_ioctl_destroy_context(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_destroy_context_args *args = raw_args;
+ struct pvr_file *pvr_file = file->driver_priv;
+
+ if (args->_padding_4)
+ return -EINVAL;
+
+ return pvr_context_destroy(pvr_file, args->handle);
+}
+
+/**
+ * pvr_ioctl_create_free_list() - IOCTL to create a free list
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_free_list_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_FREE_LIST.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_free_list_create().
+ */
+static int
+pvr_ioctl_create_free_list(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_create_free_list_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_free_list *free_list;
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ free_list = pvr_free_list_create(pvr_file, args);
+ if (IS_ERR(free_list)) {
+ err = PTR_ERR(free_list);
+ goto err_drm_dev_exit;
+ }
+
+ /* Allocate object handle for userspace. */
+ err = xa_alloc(&pvr_file->free_list_handles,
+ &args->handle,
+ free_list,
+ xa_limit_32b,
+ GFP_KERNEL);
+ if (err < 0)
+ goto err_cleanup;
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_cleanup:
+ pvr_free_list_put(free_list);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * pvr_ioctl_destroy_free_list() - IOCTL to destroy a free list
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_destroy_free_list_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DESTROY_FREE_LIST.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if free list not in object list.
+ */
+static int
+pvr_ioctl_destroy_free_list(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_destroy_free_list_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_free_list *free_list;
+
+ if (args->_padding_4)
+ return -EINVAL;
+
+ free_list = xa_erase(&pvr_file->free_list_handles, args->handle);
+ if (!free_list)
+ return -EINVAL;
+
+ pvr_free_list_put(free_list);
+ return 0;
+}
+
+/**
+ * pvr_ioctl_create_hwrt_dataset() - IOCTL to create a HWRT dataset
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_hwrt_dataset_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_HWRT_DATASET.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_hwrt_dataset_create().
+ */
+static int
+pvr_ioctl_create_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_hwrt_dataset *hwrt;
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ hwrt = pvr_hwrt_dataset_create(pvr_file, args);
+ if (IS_ERR(hwrt)) {
+ err = PTR_ERR(hwrt);
+ goto err_drm_dev_exit;
+ }
+
+ /* Allocate object handle for userspace. */
+ err = xa_alloc(&pvr_file->hwrt_handles,
+ &args->handle,
+ hwrt,
+ xa_limit_32b,
+ GFP_KERNEL);
+ if (err < 0)
+ goto err_cleanup;
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_cleanup:
+ pvr_hwrt_dataset_put(hwrt);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * pvr_ioctl_destroy_hwrt_dataset() - IOCTL to destroy a HWRT dataset
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_destroy_hwrt_dataset_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if HWRT dataset not in object list.
+ */
+static int
+pvr_ioctl_destroy_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_destroy_hwrt_dataset_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_hwrt_dataset *hwrt;
+
+ if (args->_padding_4)
+ return -EINVAL;
+
+ hwrt = xa_erase(&pvr_file->hwrt_handles, args->handle);
+ if (!hwrt)
+ return -EINVAL;
+
+ pvr_hwrt_dataset_put(hwrt);
+ return 0;
+}
+
+/**
+ * pvr_ioctl_create_vm_context() - IOCTL to create a VM context
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_vm_context_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_VM_CONTEXT.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_vm_create_context().
+ */
+static int
+pvr_ioctl_create_vm_context(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_create_vm_context_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_vm_context *vm_ctx;
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ if (args->_padding_4) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ vm_ctx = pvr_vm_create_context(pvr_file->pvr_dev, true);
+ if (IS_ERR(vm_ctx)) {
+ err = PTR_ERR(vm_ctx);
+ goto err_drm_dev_exit;
+ }
+
+ /* Allocate object handle for userspace. */
+ err = xa_alloc(&pvr_file->vm_ctx_handles,
+ &args->handle,
+ vm_ctx,
+ xa_limit_32b,
+ GFP_KERNEL);
+ if (err < 0)
+ goto err_cleanup;
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_cleanup:
+ pvr_vm_context_put(vm_ctx);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * pvr_ioctl_destroy_vm_context() - IOCTL to destroy a VM context
+* @drm_dev: [IN] DRM device.
+* @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+* &struct drm_pvr_ioctl_destroy_vm_context_args.
+* @file: [IN] DRM file private data.
+*
+* Called from userspace with %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT.
+*
+* Return:
+* * 0 on success, or
+* * -%EINVAL if object not in object list.
+ */
+static int
+pvr_ioctl_destroy_vm_context(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_destroy_vm_context_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_vm_context *vm_ctx;
+
+ if (args->_padding_4)
+ return -EINVAL;
+
+ vm_ctx = xa_erase(&pvr_file->vm_ctx_handles, args->handle);
+ if (!vm_ctx)
+ return -EINVAL;
+
+ pvr_vm_context_put(vm_ctx);
+ return 0;
+}
+
+/**
+ * pvr_ioctl_vm_map() - IOCTL to map buffer to GPU address space.
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_vm_map_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_VM_MAP.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if &drm_pvr_ioctl_vm_op_map_args.flags is not zero,
+ * * -%EINVAL if the bounds specified by &drm_pvr_ioctl_vm_op_map_args.offset
+ * and &drm_pvr_ioctl_vm_op_map_args.size are not valid or do not fall
+ * within the buffer object specified by
+ * &drm_pvr_ioctl_vm_op_map_args.handle,
+ * * -%EINVAL if the bounds specified by
+ * &drm_pvr_ioctl_vm_op_map_args.device_addr and
+ * &drm_pvr_ioctl_vm_op_map_args.size do not form a valid device-virtual
+ * address range which falls entirely within a single heap, or
+ * * -%ENOENT if &drm_pvr_ioctl_vm_op_map_args.handle does not refer to a
+ * valid PowerVR buffer object.
+ */
+static int
+pvr_ioctl_vm_map(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct drm_pvr_ioctl_vm_map_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_vm_context *vm_ctx;
+
+ struct pvr_gem_object *pvr_obj;
+ size_t pvr_obj_size;
+
+ u64 offset_plus_size;
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ /* Initial validation of args. */
+ if (args->_padding_14) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ if (args->flags != 0 ||
+ check_add_overflow(args->offset, args->size, &offset_plus_size) ||
+ !pvr_find_heap_containing(pvr_dev, args->device_addr, args->size)) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+ if (!vm_ctx) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
+ if (!pvr_obj) {
+ err = -ENOENT;
+ goto err_put_vm_context;
+ }
+
+ pvr_obj_size = pvr_gem_object_size(pvr_obj);
+
+ /*
+ * Validate offset and size args. The alignment of these will be
+ * checked when mapping; for now just check that they're within valid
+ * bounds
+ */
+ if (args->offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) {
+ err = -EINVAL;
+ goto err_put_pvr_object;
+ }
+
+ err = pvr_vm_map(vm_ctx, pvr_obj, args->offset,
+ args->device_addr, args->size);
+ if (err)
+ goto err_put_pvr_object;
+
+ /*
+ * In order to set up the mapping, we needed a reference to &pvr_obj.
+ * However, pvr_vm_map() obtains and stores its own reference, so we
+ * must release ours before returning.
+ */
+
+err_put_pvr_object:
+ pvr_gem_object_put(pvr_obj);
+
+err_put_vm_context:
+ pvr_vm_context_put(vm_ctx);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * pvr_ioctl_vm_unmap() - IOCTL to unmap buffer from GPU address space.
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_vm_unmap_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_VM_UNMAP.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if &drm_pvr_ioctl_vm_op_unmap_args.device_addr is not a valid
+ * device page-aligned device-virtual address, or
+ * * -%ENOENT if there is currently no PowerVR buffer object mapped at
+ * &drm_pvr_ioctl_vm_op_unmap_args.device_addr.
+ */
+static int
+pvr_ioctl_vm_unmap(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_vm_unmap_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_vm_context *vm_ctx;
+ int err;
+
+ /* Initial validation of args. */
+ if (args->_padding_4)
+ return -EINVAL;
+
+ vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+ if (!vm_ctx)
+ return -EINVAL;
+
+ err = pvr_vm_unmap(vm_ctx, args->device_addr, args->size);
+
+ pvr_vm_context_put(vm_ctx);
+
+ return err;
+}
+
+/*
+ * pvr_ioctl_submit_job() - IOCTL to submit a job to the GPU
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_submit_job_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_SUBMIT_JOB.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if arguments are invalid.
+ */
+static int
+pvr_ioctl_submit_jobs(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_submit_jobs_args *args = raw_args;
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ err = pvr_submit_jobs(pvr_dev, pvr_file, args);
+
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+int
+pvr_get_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, void *out)
+{
+ if (usr_stride < min_stride)
+ return -EINVAL;
+
+ return copy_struct_from_user(out, obj_size, u64_to_user_ptr(usr_ptr), usr_stride);
+}
+
+int
+pvr_set_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, const void *in)
+{
+ if (usr_stride < min_stride)
+ return -EINVAL;
+
+ if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_stride, obj_size)))
+ return -EFAULT;
+
+ if (usr_stride > obj_size &&
+ clear_user(u64_to_user_ptr(usr_ptr + obj_size), usr_stride - obj_size)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int
+pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, void **out)
+{
+ int ret = 0;
+ void *out_alloc;
+
+ if (in->stride < min_stride)
+ return -EINVAL;
+
+ if (!in->count)
+ return 0;
+
+ out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
+ if (!out_alloc)
+ return -ENOMEM;
+
+ if (obj_size == in->stride) {
+ if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
+ (unsigned long)obj_size * in->count))
+ ret = -EFAULT;
+ } else {
+ void __user *in_ptr = u64_to_user_ptr(in->array);
+ void *out_ptr = out_alloc;
+
+ for (u32 i = 0; i < in->count; i++) {
+ ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
+ if (ret)
+ break;
+
+ out_ptr += obj_size;
+ in_ptr += in->stride;
+ }
+ }
+
+ if (ret) {
+ kvfree(out_alloc);
+ return ret;
+ }
+
+ *out = out_alloc;
+ return 0;
+}
+
+int
+pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size,
+ const void *in)
+{
+ if (out->stride < min_stride)
+ return -EINVAL;
+
+ if (!out->count)
+ return 0;
+
+ if (obj_size == out->stride) {
+ if (copy_to_user(u64_to_user_ptr(out->array), in,
+ (unsigned long)obj_size * out->count))
+ return -EFAULT;
+ } else {
+ u32 cpy_elem_size = min_t(u32, out->stride, obj_size);
+ void __user *out_ptr = u64_to_user_ptr(out->array);
+ const void *in_ptr = in;
+
+ for (u32 i = 0; i < out->count; i++) {
+ if (copy_to_user(out_ptr, in_ptr, cpy_elem_size))
+ return -EFAULT;
+
+ out_ptr += obj_size;
+ in_ptr += out->stride;
+ }
+
+ if (out->stride > obj_size &&
+ clear_user(u64_to_user_ptr(out->array + obj_size),
+ out->stride - obj_size)) {
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+#define DRM_PVR_IOCTL(_name, _func, _flags) \
+ DRM_IOCTL_DEF_DRV(PVR_##_name, pvr_ioctl_##_func, _flags)
+
+/* clang-format off */
+
+static const struct drm_ioctl_desc pvr_drm_driver_ioctls[] = {
+ DRM_PVR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(GET_BO_MMAP_OFFSET, get_bo_mmap_offset, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(CREATE_VM_CONTEXT, create_vm_context, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(DESTROY_VM_CONTEXT, destroy_vm_context, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(VM_MAP, vm_map, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(VM_UNMAP, vm_unmap, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(CREATE_CONTEXT, create_context, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(DESTROY_CONTEXT, destroy_context, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(CREATE_FREE_LIST, create_free_list, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(DESTROY_FREE_LIST, destroy_free_list, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(CREATE_HWRT_DATASET, create_hwrt_dataset, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(DESTROY_HWRT_DATASET, destroy_hwrt_dataset, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(SUBMIT_JOBS, submit_jobs, DRM_RENDER_ALLOW),
+};
+
+/* clang-format on */
+
+#undef DRM_PVR_IOCTL
+
+/**
+ * pvr_drm_driver_open() - Driver callback when a new &struct drm_file is opened
+ * @drm_dev: [IN] DRM device.
+ * @file: [IN] DRM file private data.
+ *
+ * Allocates powervr-specific file private data (&struct pvr_file).
+ *
+ * Registered in &pvr_drm_driver.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%ENOMEM if the allocation of a &struct ipvr_file fails, or
+ * * Any error returned by pvr_memory_context_init().
+ */
+static int
+pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct pvr_file *pvr_file;
+
+ pvr_file = kzalloc(sizeof(*pvr_file), GFP_KERNEL);
+ if (!pvr_file)
+ return -ENOMEM;
+
+ /*
+ * Store reference to base DRM file private data for use by
+ * from_pvr_file.
+ */
+ pvr_file->file = file;
+
+ /*
+ * Store reference to powervr-specific outer device struct in file
+ * private data for convenient access.
+ */
+ pvr_file->pvr_dev = pvr_dev;
+
+ xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1);
+ xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1);
+ xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1);
+ xa_init_flags(&pvr_file->vm_ctx_handles, XA_FLAGS_ALLOC1);
+
+ /*
+ * Store reference to powervr-specific file private data in DRM file
+ * private data.
+ */
+ file->driver_priv = pvr_file;
+
+ return 0;
+}
+
+/**
+ * pvr_drm_driver_postclose() - One of the driver callbacks when a &struct
+ * drm_file is closed.
+ * @drm_dev: [IN] DRM device (unused).
+ * @file: [IN] DRM file private data.
+ *
+ * Frees powervr-specific file private data (&struct pvr_file).
+ *
+ * Registered in &pvr_drm_driver.
+ */
+static void
+pvr_drm_driver_postclose(__always_unused struct drm_device *drm_dev,
+ struct drm_file *file)
+{
+ struct pvr_file *pvr_file = to_pvr_file(file);
+
+ /* Kill remaining contexts. */
+ pvr_destroy_contexts_for_file(pvr_file);
+
+ /* Drop references on any remaining objects. */
+ pvr_destroy_free_lists_for_file(pvr_file);
+ pvr_destroy_hwrt_datasets_for_file(pvr_file);
+ pvr_destroy_vm_contexts_for_file(pvr_file);
+
+ kfree(pvr_file);
+ file->driver_priv = NULL;
+}
+
+DEFINE_DRM_GEM_FOPS(pvr_drm_driver_fops);
+
+static struct drm_driver pvr_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_GEM_GPUVA | DRIVER_RENDER |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+ .open = pvr_drm_driver_open,
+ .postclose = pvr_drm_driver_postclose,
+ .ioctls = pvr_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(pvr_drm_driver_ioctls),
+ .fops = &pvr_drm_driver_fops,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = pvr_debugfs_init,
+#endif
+
+ .name = PVR_DRIVER_NAME,
+ .desc = PVR_DRIVER_DESC,
+ .date = PVR_DRIVER_DATE,
+ .major = PVR_DRIVER_MAJOR,
+ .minor = PVR_DRIVER_MINOR,
+ .patchlevel = PVR_DRIVER_PATCHLEVEL,
+
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+ .gem_create_object = pvr_gem_create_object,
+};
+
+static int
+pvr_probe(struct platform_device *plat_dev)
+{
+ struct pvr_device *pvr_dev;
+ struct drm_device *drm_dev;
+ int err;
+
+ pvr_dev = devm_drm_dev_alloc(&plat_dev->dev, &pvr_drm_driver,
+ struct pvr_device, base);
+ if (IS_ERR(pvr_dev))
+ return PTR_ERR(pvr_dev);
+
+ drm_dev = &pvr_dev->base;
+
+ platform_set_drvdata(plat_dev, drm_dev);
+
+ init_rwsem(&pvr_dev->reset_sem);
+
+ pvr_context_device_init(pvr_dev);
+
+ err = pvr_queue_device_init(pvr_dev);
+ if (err)
+ goto err_context_fini;
+
+ devm_pm_runtime_enable(&plat_dev->dev);
+ pm_runtime_mark_last_busy(&plat_dev->dev);
+
+ pm_runtime_set_autosuspend_delay(&plat_dev->dev, 50);
+ pm_runtime_use_autosuspend(&plat_dev->dev);
+ pvr_watchdog_init(pvr_dev);
+
+ err = pvr_device_init(pvr_dev);
+ if (err)
+ goto err_watchdog_fini;
+
+ err = drm_dev_register(drm_dev, 0);
+ if (err)
+ goto err_device_fini;
+
+ xa_init_flags(&pvr_dev->free_list_ids, XA_FLAGS_ALLOC1);
+ xa_init_flags(&pvr_dev->job_ids, XA_FLAGS_ALLOC1);
+
+ return 0;
+
+err_device_fini:
+ pvr_device_fini(pvr_dev);
+
+err_watchdog_fini:
+ pvr_watchdog_fini(pvr_dev);
+
+ pvr_queue_device_fini(pvr_dev);
+
+err_context_fini:
+ pvr_context_device_fini(pvr_dev);
+
+ return err;
+}
+
+static int
+pvr_remove(struct platform_device *plat_dev)
+{
+ struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+
+ WARN_ON(!xa_empty(&pvr_dev->job_ids));
+ WARN_ON(!xa_empty(&pvr_dev->free_list_ids));
+
+ xa_destroy(&pvr_dev->job_ids);
+ xa_destroy(&pvr_dev->free_list_ids);
+
+ pm_runtime_suspend(drm_dev->dev);
+ pvr_device_fini(pvr_dev);
+ drm_dev_unplug(drm_dev);
+ pvr_watchdog_fini(pvr_dev);
+ pvr_queue_device_fini(pvr_dev);
+ pvr_context_device_fini(pvr_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "img,img-axe", .data = NULL },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static const struct dev_pm_ops pvr_pm_ops = {
+ RUNTIME_PM_OPS(pvr_power_device_suspend, pvr_power_device_resume, pvr_power_device_idle)
+};
+
+static struct platform_driver pvr_driver = {
+ .probe = pvr_probe,
+ .remove = pvr_remove,
+ .driver = {
+ .name = PVR_DRIVER_NAME,
+ .pm = &pvr_pm_ops,
+ .of_match_table = dt_match,
+ },
+};
+module_platform_driver(pvr_driver);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION(PVR_DRIVER_DESC);
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_drv.h b/drivers/gpu/drm/imagination/pvr_drv.h
new file mode 100644
index 000000000000..378fe477b759
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_drv.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DRV_H
+#define PVR_DRV_H
+
+#include "linux/compiler_attributes.h"
+#include <uapi/drm/pvr_drm.h>
+
+#define PVR_DRIVER_NAME "powervr"
+#define PVR_DRIVER_DESC "Imagination PowerVR (Series 6 and later) & IMG Graphics"
+#define PVR_DRIVER_DATE "20230904"
+
+/*
+ * Driver interface version:
+ * - 1.0: Initial interface
+ */
+#define PVR_DRIVER_MAJOR 1
+#define PVR_DRIVER_MINOR 0
+#define PVR_DRIVER_PATCHLEVEL 0
+
+int pvr_get_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 obj_size, void *out);
+int pvr_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 obj_size, const void *in);
+int pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size,
+ void **out);
+int pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size,
+ const void *in);
+
+#define PVR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
+ (offsetof(_typename, _last_mandatory_field) + \
+ sizeof(((_typename *)NULL)->_last_mandatory_field))
+
+/* NOLINTBEGIN(bugprone-macro-parentheses) */
+#define PVR_UOBJ_DECL(_typename, _last_mandatory_field) \
+ , _typename : PVR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
+/* NOLINTEND(bugprone-macro-parentheses) */
+
+/**
+ * DOC: PVR user objects.
+ *
+ * Macros used to aid copying structured and array data to and from
+ * userspace. Objects can differ in size, provided the minimum size
+ * allowed is specified (using the last mandatory field in the struct).
+ * All types used with PVR_UOBJ_GET/SET macros must be listed here under
+ * PVR_UOBJ_MIN_SIZE, with the last mandatory struct field specified.
+ */
+
+/**
+ * PVR_UOBJ_MIN_SIZE() - Fetch the minimum copy size of a compatible type object.
+ * @_obj_name: The name of the object. Cannot be a typename - this is deduced.
+ *
+ * This cannot fail. Using the macro with an incompatible type will result in a
+ * compiler error.
+ *
+ * To add compatibility for a type, list it within the macro in an orderly
+ * fashion. The second argument is the name of the last mandatory field of the
+ * struct type, which is used to calculate the size. See also PVR_UOBJ_DECL().
+ *
+ * Return: The minimum copy size.
+ */
+#define PVR_UOBJ_MIN_SIZE(_obj_name) _Generic(_obj_name \
+ PVR_UOBJ_DECL(struct drm_pvr_job, hwrt) \
+ PVR_UOBJ_DECL(struct drm_pvr_sync_op, value) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_gpu_info, num_phantoms) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_runtime_info, cdm_max_local_mem_size_regs) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_quirks, _padding_c) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_enhancements, _padding_c) \
+ PVR_UOBJ_DECL(struct drm_pvr_heap, page_size_log2) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_heap_info, heaps) \
+ PVR_UOBJ_DECL(struct drm_pvr_static_data_area, offset) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_static_data_areas, static_data_areas) \
+ )
+
+/**
+ * PVR_UOBJ_GET() - Copies from _src_usr_ptr to &_dest_obj.
+ * @_dest_obj: The destination container object in kernel space.
+ * @_usr_size: The size of the source container in user space.
+ * @_src_usr_ptr: __u64 raw pointer to the source container in user space.
+ *
+ * Return: Error code. See pvr_get_uobj().
+ */
+#define PVR_UOBJ_GET(_dest_obj, _usr_size, _src_usr_ptr) \
+ pvr_get_uobj(_src_usr_ptr, _usr_size, \
+ PVR_UOBJ_MIN_SIZE(_dest_obj), \
+ sizeof(_dest_obj), &(_dest_obj))
+
+/**
+ * PVR_UOBJ_SET() - Copies from &_src_obj to _dest_usr_ptr.
+ * @_dest_usr_ptr: __u64 raw pointer to the destination container in user space.
+ * @_usr_size: The size of the destination container in user space.
+ * @_src_obj: The source container object in kernel space.
+ *
+ * Return: Error code. See pvr_set_uobj().
+ */
+#define PVR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
+ pvr_set_uobj(_dest_usr_ptr, _usr_size, \
+ PVR_UOBJ_MIN_SIZE(_src_obj), \
+ sizeof(_src_obj), &(_src_obj))
+
+/**
+ * PVR_UOBJ_GET_ARRAY() - Copies from @_src_drm_pvr_obj_array.array to
+ * alloced memory and returns a pointer in _dest_array.
+ * @_dest_array: The destination C array object in kernel space.
+ * @_src_drm_pvr_obj_array: The &struct drm_pvr_obj_array containing a __u64 raw
+ * pointer to the source C array in user space and the size of each array
+ * element in user space (the 'stride').
+ *
+ * Return: Error code. See pvr_get_uobj_array().
+ */
+#define PVR_UOBJ_GET_ARRAY(_dest_array, _src_drm_pvr_obj_array) \
+ pvr_get_uobj_array(_src_drm_pvr_obj_array, \
+ PVR_UOBJ_MIN_SIZE((_dest_array)[0]), \
+ sizeof((_dest_array)[0]), (void **)&(_dest_array))
+
+/**
+ * PVR_UOBJ_SET_ARRAY() - Copies from _src_array to @_dest_drm_pvr_obj_array.array.
+ * @_dest_drm_pvr_obj_array: The &struct drm_pvr_obj_array containing a __u64 raw
+ * pointer to the destination C array in user space and the size of each array
+ * element in user space (the 'stride').
+ * @_src_array: The source C array object in kernel space.
+ *
+ * Return: Error code. See pvr_set_uobj_array().
+ */
+#define PVR_UOBJ_SET_ARRAY(_dest_drm_pvr_obj_array, _src_array) \
+ pvr_set_uobj_array(_dest_drm_pvr_obj_array, \
+ PVR_UOBJ_MIN_SIZE((_src_array)[0]), \
+ sizeof((_src_array)[0]), _src_array)
+
+#endif /* PVR_DRV_H */
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c
new file mode 100644
index 000000000000..5e51bc980751
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_free_list.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_gem.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#define FREE_LIST_ENTRY_SIZE sizeof(u32)
+
+#define FREE_LIST_ALIGNMENT \
+ ((ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE / FREE_LIST_ENTRY_SIZE) - 1)
+
+#define FREE_LIST_MIN_PAGES 50
+#define FREE_LIST_MIN_PAGES_BRN66011 40
+#define FREE_LIST_MIN_PAGES_ROGUEXE 25
+
+/**
+ * pvr_get_free_list_min_pages() - Get minimum free list size for this device
+ * @pvr_dev: Device pointer.
+ *
+ * Returns:
+ * * Minimum free list size, in PM physical pages.
+ */
+u32
+pvr_get_free_list_min_pages(struct pvr_device *pvr_dev)
+{
+ u32 value;
+
+ if (PVR_HAS_FEATURE(pvr_dev, roguexe)) {
+ if (PVR_HAS_QUIRK(pvr_dev, 66011))
+ value = FREE_LIST_MIN_PAGES_BRN66011;
+ else
+ value = FREE_LIST_MIN_PAGES_ROGUEXE;
+ } else {
+ value = FREE_LIST_MIN_PAGES;
+ }
+
+ return value;
+}
+
+static int
+free_list_create_kernel_structure(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_free_list_args *args,
+ struct pvr_free_list *free_list)
+{
+ struct pvr_gem_object *free_list_obj;
+ struct pvr_vm_context *vm_ctx;
+ u64 free_list_size;
+ int err;
+
+ if (args->grow_threshold > 100 ||
+ args->initial_num_pages > args->max_num_pages ||
+ args->grow_num_pages > args->max_num_pages ||
+ args->max_num_pages == 0 ||
+ (args->initial_num_pages < args->max_num_pages && !args->grow_num_pages) ||
+ (args->initial_num_pages == args->max_num_pages && args->grow_num_pages))
+ return -EINVAL;
+
+ if ((args->initial_num_pages & FREE_LIST_ALIGNMENT) ||
+ (args->max_num_pages & FREE_LIST_ALIGNMENT) ||
+ (args->grow_num_pages & FREE_LIST_ALIGNMENT))
+ return -EINVAL;
+
+ vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+ if (!vm_ctx)
+ return -EINVAL;
+
+ free_list_obj = pvr_vm_find_gem_object(vm_ctx, args->free_list_gpu_addr,
+ NULL, &free_list_size);
+ if (!free_list_obj) {
+ err = -EINVAL;
+ goto err_put_vm_context;
+ }
+
+ if ((free_list_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS) ||
+ !(free_list_obj->flags & DRM_PVR_BO_PM_FW_PROTECT) ||
+ free_list_size < (args->max_num_pages * FREE_LIST_ENTRY_SIZE)) {
+ err = -EINVAL;
+ goto err_put_free_list_obj;
+ }
+
+ free_list->pvr_dev = pvr_file->pvr_dev;
+ free_list->current_pages = 0;
+ free_list->max_pages = args->max_num_pages;
+ free_list->grow_pages = args->grow_num_pages;
+ free_list->grow_threshold = args->grow_threshold;
+ free_list->obj = free_list_obj;
+ free_list->free_list_gpu_addr = args->free_list_gpu_addr;
+ free_list->initial_num_pages = args->initial_num_pages;
+
+ pvr_vm_context_put(vm_ctx);
+
+ return 0;
+
+err_put_free_list_obj:
+ pvr_gem_object_put(free_list_obj);
+
+err_put_vm_context:
+ pvr_vm_context_put(vm_ctx);
+
+ return err;
+}
+
+static void
+free_list_destroy_kernel_structure(struct pvr_free_list *free_list)
+{
+ WARN_ON(!list_empty(&free_list->hwrt_list));
+
+ pvr_gem_object_put(free_list->obj);
+}
+
+/**
+ * calculate_free_list_ready_pages_locked() - Function to work out the number of free
+ * list pages to reserve for growing within
+ * the FW without having to wait for the
+ * host to progress a grow request
+ * @free_list: Pointer to free list.
+ * @pages: Total pages currently in free list.
+ *
+ * If the threshold or grow size means less than the alignment size (4 pages on
+ * Rogue), then the feature is not used.
+ *
+ * Caller must hold &free_list->lock.
+ *
+ * Return: number of pages to reserve.
+ */
+static u32
+calculate_free_list_ready_pages_locked(struct pvr_free_list *free_list, u32 pages)
+{
+ u32 ready_pages;
+
+ lockdep_assert_held(&free_list->lock);
+
+ ready_pages = ((pages * free_list->grow_threshold) / 100);
+
+ /* The number of pages must be less than the grow size. */
+ ready_pages = min(ready_pages, free_list->grow_pages);
+
+ /*
+ * The number of pages must be a multiple of the free list align size.
+ */
+ ready_pages &= ~FREE_LIST_ALIGNMENT;
+
+ return ready_pages;
+}
+
+static u32
+calculate_free_list_ready_pages(struct pvr_free_list *free_list, u32 pages)
+{
+ u32 ret;
+
+ mutex_lock(&free_list->lock);
+
+ ret = calculate_free_list_ready_pages_locked(free_list, pages);
+
+ mutex_unlock(&free_list->lock);
+
+ return ret;
+}
+
+static void
+free_list_fw_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_freelist *fw_data = cpu_ptr;
+ struct pvr_free_list *free_list = priv;
+ u32 ready_pages;
+
+ /* Fill out FW structure */
+ ready_pages = calculate_free_list_ready_pages(free_list,
+ free_list->initial_num_pages);
+
+ fw_data->max_pages = free_list->max_pages;
+ fw_data->current_pages = free_list->initial_num_pages - ready_pages;
+ fw_data->grow_pages = free_list->grow_pages;
+ fw_data->ready_pages = ready_pages;
+ fw_data->freelist_id = free_list->fw_id;
+ fw_data->grow_pending = false;
+ fw_data->current_stack_top = fw_data->current_pages - 1;
+ fw_data->freelist_dev_addr = free_list->free_list_gpu_addr;
+ fw_data->current_dev_addr = (fw_data->freelist_dev_addr +
+ ((fw_data->max_pages - fw_data->current_pages) *
+ FREE_LIST_ENTRY_SIZE)) &
+ ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1);
+}
+
+static int
+free_list_create_fw_structure(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_free_list_args *args,
+ struct pvr_free_list *free_list)
+{
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+
+ /*
+ * Create and map the FW structure so we can initialise it. This is not
+ * accessed on the CPU side post-initialisation so the mapping lifetime
+ * is only for this function.
+ */
+ free_list->fw_data = pvr_fw_object_create_and_map(pvr_dev, sizeof(*free_list->fw_data),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ free_list_fw_init, free_list,
+ &free_list->fw_obj);
+ if (IS_ERR(free_list->fw_data))
+ return PTR_ERR(free_list->fw_data);
+
+ return 0;
+}
+
+static void
+free_list_destroy_fw_structure(struct pvr_free_list *free_list)
+{
+ pvr_fw_object_unmap_and_destroy(free_list->fw_obj);
+}
+
+static int
+pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
+ struct sg_table *sgt, u32 offset, u32 num_pages)
+{
+ struct sg_dma_page_iter dma_iter;
+ u32 *page_list;
+
+ lockdep_assert_held(&free_list->lock);
+
+ page_list = pvr_gem_object_vmap(free_list->obj);
+ if (IS_ERR(page_list))
+ return PTR_ERR(page_list);
+
+ offset /= FREE_LIST_ENTRY_SIZE;
+ /* clang-format off */
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+ dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
+ u64 dma_pfn = dma_addr >>
+ ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+ u32 dma_addr_offset;
+
+ BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE);
+
+ for (dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
+ dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) {
+ WARN_ON_ONCE(dma_pfn >> 32);
+
+ page_list[offset++] = (u32)dma_pfn;
+ dma_pfn++;
+
+ num_pages--;
+ if (!num_pages)
+ break;
+ }
+
+ if (!num_pages)
+ break;
+ }
+ /* clang-format on */
+
+ /* Make sure our free_list update is flushed. */
+ wmb();
+
+ pvr_gem_object_vunmap(free_list->obj);
+
+ return 0;
+}
+
+static int
+pvr_free_list_insert_node_locked(struct pvr_free_list_node *free_list_node)
+{
+ struct pvr_free_list *free_list = free_list_node->free_list;
+ struct sg_table *sgt;
+ u32 start_page;
+ u32 offset;
+ int err;
+
+ lockdep_assert_held(&free_list->lock);
+
+ start_page = free_list->max_pages - free_list->current_pages -
+ free_list_node->num_pages;
+ offset = (start_page * FREE_LIST_ENTRY_SIZE) &
+ ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1);
+
+ sgt = drm_gem_shmem_get_pages_sgt(&free_list_node->mem_obj->base);
+ if (WARN_ON(IS_ERR(sgt)))
+ return PTR_ERR(sgt);
+
+ err = pvr_free_list_insert_pages_locked(free_list, sgt,
+ offset, free_list_node->num_pages);
+ if (!err)
+ free_list->current_pages += free_list_node->num_pages;
+
+ return err;
+}
+
+static int
+pvr_free_list_grow(struct pvr_free_list *free_list, u32 num_pages)
+{
+ struct pvr_device *pvr_dev = free_list->pvr_dev;
+ struct pvr_free_list_node *free_list_node;
+ int err;
+
+ mutex_lock(&free_list->lock);
+
+ if (num_pages & FREE_LIST_ALIGNMENT) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
+ free_list_node = kzalloc(sizeof(*free_list_node), GFP_KERNEL);
+ if (!free_list_node) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+
+ free_list_node->num_pages = num_pages;
+ free_list_node->free_list = free_list;
+
+ free_list_node->mem_obj = pvr_gem_object_create(pvr_dev,
+ num_pages <<
+ ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+ PVR_BO_FW_FLAGS_DEVICE_CACHED);
+ if (IS_ERR(free_list_node->mem_obj)) {
+ err = PTR_ERR(free_list_node->mem_obj);
+ goto err_free;
+ }
+
+ err = pvr_free_list_insert_node_locked(free_list_node);
+ if (err)
+ goto err_destroy_gem_object;
+
+ list_add_tail(&free_list_node->node, &free_list->mem_block_list);
+
+ /*
+ * Reserve a number ready pages to allow the FW to process OOM quickly
+ * and asynchronously request a grow.
+ */
+ free_list->ready_pages =
+ calculate_free_list_ready_pages_locked(free_list,
+ free_list->current_pages);
+ free_list->current_pages -= free_list->ready_pages;
+
+ mutex_unlock(&free_list->lock);
+
+ return 0;
+
+err_destroy_gem_object:
+ pvr_gem_object_put(free_list_node->mem_obj);
+
+err_free:
+ kfree(free_list_node);
+
+err_unlock:
+ mutex_unlock(&free_list->lock);
+
+ return err;
+}
+
+void pvr_free_list_process_grow_req(struct pvr_device *pvr_dev,
+ struct rogue_fwif_fwccb_cmd_freelist_gs_data *req)
+{
+ struct pvr_free_list *free_list = pvr_free_list_lookup_id(pvr_dev, req->freelist_id);
+ struct rogue_fwif_kccb_cmd resp_cmd = {
+ .cmd_type = ROGUE_FWIF_KCCB_CMD_FREELIST_GROW_UPDATE,
+ };
+ struct rogue_fwif_freelist_gs_data *resp = &resp_cmd.cmd_data.free_list_gs_data;
+ u32 grow_pages = 0;
+
+ /* If we don't have a freelist registered for this ID, we can't do much. */
+ if (WARN_ON(!free_list))
+ return;
+
+ /* Since the FW made the request, it has already consumed the ready pages,
+ * update the host struct.
+ */
+ free_list->current_pages += free_list->ready_pages;
+ free_list->ready_pages = 0;
+
+ /* If the grow succeeds, update the grow_pages argument. */
+ if (!pvr_free_list_grow(free_list, free_list->grow_pages))
+ grow_pages = free_list->grow_pages;
+
+ /* Now prepare the response and send it back to the FW. */
+ pvr_fw_object_get_fw_addr(free_list->fw_obj, &resp->freelist_fw_addr);
+ resp->delta_pages = grow_pages;
+ resp->new_pages = free_list->current_pages + free_list->ready_pages;
+ resp->ready_pages = free_list->ready_pages;
+ pvr_free_list_put(free_list);
+
+ WARN_ON(pvr_kccb_send_cmd(pvr_dev, &resp_cmd, NULL));
+}
+
+static void
+pvr_free_list_free_node(struct pvr_free_list_node *free_list_node)
+{
+ pvr_gem_object_put(free_list_node->mem_obj);
+
+ kfree(free_list_node);
+}
+
+/**
+ * pvr_free_list_create() - Create a new free list and return an object pointer
+ * @pvr_file: Pointer to pvr_file structure.
+ * @args: Creation arguments from userspace.
+ *
+ * Return:
+ * * Pointer to new free_list, or
+ * * ERR_PTR(-%ENOMEM) on out of memory.
+ */
+struct pvr_free_list *
+pvr_free_list_create(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_free_list_args *args)
+{
+ struct pvr_free_list *free_list;
+ int err;
+
+ /* Create and fill out the kernel structure */
+ free_list = kzalloc(sizeof(*free_list), GFP_KERNEL);
+
+ if (!free_list)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&free_list->ref_count);
+ INIT_LIST_HEAD(&free_list->mem_block_list);
+ INIT_LIST_HEAD(&free_list->hwrt_list);
+ mutex_init(&free_list->lock);
+
+ err = free_list_create_kernel_structure(pvr_file, args, free_list);
+ if (err < 0)
+ goto err_free;
+
+ /* Allocate global object ID for firmware. */
+ err = xa_alloc(&pvr_file->pvr_dev->free_list_ids,
+ &free_list->fw_id,
+ free_list,
+ xa_limit_32b,
+ GFP_KERNEL);
+ if (err)
+ goto err_destroy_kernel_structure;
+
+ err = free_list_create_fw_structure(pvr_file, args, free_list);
+ if (err < 0)
+ goto err_free_fw_id;
+
+ err = pvr_free_list_grow(free_list, args->initial_num_pages);
+ if (err < 0)
+ goto err_fw_struct_cleanup;
+
+ return free_list;
+
+err_fw_struct_cleanup:
+ WARN_ON(pvr_fw_structure_cleanup(free_list->pvr_dev,
+ ROGUE_FWIF_CLEANUP_FREELIST,
+ free_list->fw_obj, 0));
+
+err_free_fw_id:
+ xa_erase(&free_list->pvr_dev->free_list_ids, free_list->fw_id);
+
+err_destroy_kernel_structure:
+ free_list_destroy_kernel_structure(free_list);
+
+err_free:
+ mutex_destroy(&free_list->lock);
+ kfree(free_list);
+
+ return ERR_PTR(err);
+}
+
+static void
+pvr_free_list_release(struct kref *ref_count)
+{
+ struct pvr_free_list *free_list =
+ container_of(ref_count, struct pvr_free_list, ref_count);
+ struct list_head *pos, *n;
+ int err;
+
+ xa_erase(&free_list->pvr_dev->free_list_ids, free_list->fw_id);
+
+ err = pvr_fw_structure_cleanup(free_list->pvr_dev,
+ ROGUE_FWIF_CLEANUP_FREELIST,
+ free_list->fw_obj, 0);
+ if (err == -EBUSY) {
+ /* Flush the FWCCB to process any HWR or freelist reconstruction
+ * request that might keep the freelist busy, and try again.
+ */
+ pvr_fwccb_process(free_list->pvr_dev);
+ err = pvr_fw_structure_cleanup(free_list->pvr_dev,
+ ROGUE_FWIF_CLEANUP_FREELIST,
+ free_list->fw_obj, 0);
+ }
+
+ WARN_ON(err);
+
+ /* clang-format off */
+ list_for_each_safe(pos, n, &free_list->mem_block_list) {
+ struct pvr_free_list_node *free_list_node =
+ container_of(pos, struct pvr_free_list_node, node);
+
+ list_del(pos);
+ pvr_free_list_free_node(free_list_node);
+ }
+ /* clang-format on */
+
+ free_list_destroy_kernel_structure(free_list);
+ free_list_destroy_fw_structure(free_list);
+ mutex_destroy(&free_list->lock);
+ kfree(free_list);
+}
+
+/**
+ * pvr_destroy_free_lists_for_file: Destroy any free lists associated with the
+ * given file.
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all free lists associated with @pvr_file from the device free_list
+ * list and drops initial references. Free lists will then be destroyed once
+ * all outstanding references are dropped.
+ */
+void pvr_destroy_free_lists_for_file(struct pvr_file *pvr_file)
+{
+ struct pvr_free_list *free_list;
+ unsigned long handle;
+
+ xa_for_each(&pvr_file->free_list_handles, handle, free_list) {
+ (void)free_list;
+ pvr_free_list_put(xa_erase(&pvr_file->free_list_handles, handle));
+ }
+}
+
+/**
+ * pvr_free_list_put() - Release reference on free list
+ * @free_list: Pointer to list to release reference on
+ */
+void
+pvr_free_list_put(struct pvr_free_list *free_list)
+{
+ if (free_list)
+ kref_put(&free_list->ref_count, pvr_free_list_release);
+}
+
+void pvr_free_list_add_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data)
+{
+ mutex_lock(&free_list->lock);
+
+ list_add_tail(&hwrt_data->freelist_node, &free_list->hwrt_list);
+
+ mutex_unlock(&free_list->lock);
+}
+
+void pvr_free_list_remove_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data)
+{
+ mutex_lock(&free_list->lock);
+
+ list_del(&hwrt_data->freelist_node);
+
+ mutex_unlock(&free_list->lock);
+}
+
+static void
+pvr_free_list_reconstruct(struct pvr_device *pvr_dev, u32 freelist_id)
+{
+ struct pvr_free_list *free_list = pvr_free_list_lookup_id(pvr_dev, freelist_id);
+ struct pvr_free_list_node *free_list_node;
+ struct rogue_fwif_freelist *fw_data;
+ struct pvr_hwrt_data *hwrt_data;
+
+ if (!free_list)
+ return;
+
+ mutex_lock(&free_list->lock);
+
+ /* Rebuild the free list based on the memory block list. */
+ free_list->current_pages = 0;
+
+ list_for_each_entry(free_list_node, &free_list->mem_block_list, node)
+ WARN_ON(pvr_free_list_insert_node_locked(free_list_node));
+
+ /*
+ * Remove the ready pages, which are reserved to allow the FW to process OOM quickly and
+ * asynchronously request a grow.
+ */
+ free_list->current_pages -= free_list->ready_pages;
+
+ fw_data = free_list->fw_data;
+ fw_data->current_stack_top = fw_data->current_pages - 1;
+ fw_data->allocated_page_count = 0;
+ fw_data->allocated_mmu_page_count = 0;
+
+ /* Reset the state of any associated HWRTs. */
+ list_for_each_entry(hwrt_data, &free_list->hwrt_list, freelist_node) {
+ struct rogue_fwif_hwrtdata *hwrt_fw_data = pvr_fw_object_vmap(hwrt_data->fw_obj);
+
+ if (!WARN_ON(IS_ERR(hwrt_fw_data))) {
+ hwrt_fw_data->state = ROGUE_FWIF_RTDATA_STATE_HWR;
+ hwrt_fw_data->hwrt_data_flags &= ~HWRTDATA_HAS_LAST_GEOM;
+ }
+
+ pvr_fw_object_vunmap(hwrt_data->fw_obj);
+ }
+
+ mutex_unlock(&free_list->lock);
+
+ pvr_free_list_put(free_list);
+}
+
+void
+pvr_free_list_process_reconstruct_req(struct pvr_device *pvr_dev,
+ struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data *req)
+{
+ struct rogue_fwif_kccb_cmd resp_cmd = {
+ .cmd_type = ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE,
+ };
+ struct rogue_fwif_freelists_reconstruction_data *resp =
+ &resp_cmd.cmd_data.free_lists_reconstruction_data;
+
+ for (u32 i = 0; i < req->freelist_count; i++)
+ pvr_free_list_reconstruct(pvr_dev, req->freelist_ids[i]);
+
+ resp->freelist_count = req->freelist_count;
+ memcpy(resp->freelist_ids, req->freelist_ids,
+ req->freelist_count * sizeof(resp->freelist_ids[0]));
+
+ WARN_ON(pvr_kccb_send_cmd(pvr_dev, &resp_cmd, NULL));
+}
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.h b/drivers/gpu/drm/imagination/pvr_free_list.h
new file mode 100644
index 000000000000..bfb4f5fc622c
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_free_list.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FREE_LIST_H
+#define PVR_FREE_LIST_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#include "pvr_device.h"
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_gem_object;
+
+/* Forward declaration from pvr_hwrt.h. */
+struct pvr_hwrt_data;
+
+/**
+ * struct pvr_free_list_node - structure representing an allocation in the free
+ * list
+ */
+struct pvr_free_list_node {
+ /** @node: List node for &pvr_free_list.mem_block_list. */
+ struct list_head node;
+
+ /** @free_list: Pointer to owning free list. */
+ struct pvr_free_list *free_list;
+
+ /** @num_pages: Number of pages in this node. */
+ u32 num_pages;
+
+ /** @mem_obj: GEM object representing the pages in this node. */
+ struct pvr_gem_object *mem_obj;
+};
+
+/**
+ * struct pvr_free_list - structure representing a free list
+ */
+struct pvr_free_list {
+ /** @ref_count: Reference count of object. */
+ struct kref ref_count;
+
+ /** @pvr_dev: Pointer to device that owns this object. */
+ struct pvr_device *pvr_dev;
+
+ /** @obj: GEM object representing the free list. */
+ struct pvr_gem_object *obj;
+
+ /** @fw_obj: FW object representing the FW-side structure. */
+ struct pvr_fw_object *fw_obj;
+
+ /** @fw_data: Pointer to CPU mapping of the FW-side structure. */
+ struct rogue_fwif_freelist *fw_data;
+
+ /**
+ * @lock: Mutex protecting modification of the free list. Must be held when accessing any
+ * of the members below.
+ */
+ struct mutex lock;
+
+ /** @fw_id: Firmware ID for this object. */
+ u32 fw_id;
+
+ /** @current_pages: Current number of pages in free list. */
+ u32 current_pages;
+
+ /** @max_pages: Maximum number of pages in free list. */
+ u32 max_pages;
+
+ /** @grow_pages: Pages to grow free list by per request. */
+ u32 grow_pages;
+
+ /**
+ * @grow_threshold: Percentage of FL memory used that should trigger a
+ * new grow request.
+ */
+ u32 grow_threshold;
+
+ /**
+ * @ready_pages: Number of pages reserved for FW to use while a grow
+ * request is being processed.
+ */
+ u32 ready_pages;
+
+ /** @mem_block_list: List of memory blocks in this free list. */
+ struct list_head mem_block_list;
+
+ /** @hwrt_list: List of HWRTs using this free list. */
+ struct list_head hwrt_list;
+
+ /** @initial_num_pages: Initial number of pages in free list. */
+ u32 initial_num_pages;
+
+ /** @free_list_gpu_addr: Address of free list in GPU address space. */
+ u64 free_list_gpu_addr;
+};
+
+struct pvr_free_list *
+pvr_free_list_create(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_free_list_args *args);
+
+void
+pvr_destroy_free_lists_for_file(struct pvr_file *pvr_file);
+
+u32
+pvr_get_free_list_min_pages(struct pvr_device *pvr_dev);
+
+static __always_inline struct pvr_free_list *
+pvr_free_list_get(struct pvr_free_list *free_list)
+{
+ if (free_list)
+ kref_get(&free_list->ref_count);
+
+ return free_list;
+}
+
+/**
+ * pvr_free_list_lookup() - Lookup free list pointer from handle and file
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ *
+ * Takes reference on free list object. Call pvr_free_list_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, is not a free list, or
+ * does not belong to @pvr_file)
+ */
+static __always_inline struct pvr_free_list *
+pvr_free_list_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_free_list *free_list;
+
+ xa_lock(&pvr_file->free_list_handles);
+ free_list = pvr_free_list_get(xa_load(&pvr_file->free_list_handles, handle));
+ xa_unlock(&pvr_file->free_list_handles);
+
+ return free_list;
+}
+
+/**
+ * pvr_free_list_lookup_id() - Lookup free list pointer from FW ID
+ * @pvr_dev: Device pointer.
+ * @id: FW object ID.
+ *
+ * Takes reference on free list object. Call pvr_free_list_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, or is not a free list)
+ */
+static __always_inline struct pvr_free_list *
+pvr_free_list_lookup_id(struct pvr_device *pvr_dev, u32 id)
+{
+ struct pvr_free_list *free_list;
+
+ xa_lock(&pvr_dev->free_list_ids);
+
+ /* Contexts are removed from the ctx_ids set in the context release path,
+ * meaning the ref_count reached zero before they get removed. We need
+ * to make sure we're not trying to acquire a context that's being
+ * destroyed.
+ */
+ free_list = xa_load(&pvr_dev->free_list_ids, id);
+ if (free_list && !kref_get_unless_zero(&free_list->ref_count))
+ free_list = NULL;
+ xa_unlock(&pvr_dev->free_list_ids);
+
+ return free_list;
+}
+
+void
+pvr_free_list_put(struct pvr_free_list *free_list);
+
+void
+pvr_free_list_add_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data);
+void
+pvr_free_list_remove_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data);
+
+void pvr_free_list_process_grow_req(struct pvr_device *pvr_dev,
+ struct rogue_fwif_fwccb_cmd_freelist_gs_data *req);
+
+void
+pvr_free_list_process_reconstruct_req(struct pvr_device *pvr_dev,
+ struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data *req);
+
+#endif /* PVR_FREE_LIST_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
new file mode 100644
index 000000000000..3debc9870a82
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -0,0 +1,1489 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_ccb.h"
+#include "pvr_device.h"
+#include "pvr_device_info.h"
+#include "pvr_fw.h"
+#include "pvr_fw_info.h"
+#include "pvr_fw_startstop.h"
+#include "pvr_fw_trace.h"
+#include "pvr_gem.h"
+#include "pvr_power.h"
+#include "pvr_rogue_fwif_dev_info.h"
+#include "pvr_rogue_heap_config.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_mm.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/sizes.h>
+
+#define FW_MAX_SUPPORTED_MAJOR_VERSION 1
+
+#define FW_BOOT_TIMEOUT_USEC 5000000
+
+/* Config heap occupies top 192k of the firmware heap. */
+#define PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY SZ_64K
+#define PVR_ROGUE_FW_CONFIG_HEAP_SIZE (3 * PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
+
+/* Main firmware allocations should come from the remainder of the heap. */
+#define PVR_ROGUE_FW_MAIN_HEAP_BASE ROGUE_FW_HEAP_BASE
+
+/* Offsets from start of configuration area of FW heap. */
+#define PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET 0
+#define PVR_ROGUE_FWIF_OSINIT_OFFSET \
+ (PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
+#define PVR_ROGUE_FWIF_SYSINIT_OFFSET \
+ (PVR_ROGUE_FWIF_OSINIT_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
+
+#define PVR_ROGUE_FAULT_PAGE_SIZE SZ_4K
+
+#define PVR_SYNC_OBJ_SIZE sizeof(u32)
+
+const struct pvr_fw_layout_entry *
+pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id)
+{
+ const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
+ u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
+ u32 entry;
+
+ for (entry = 0; entry < num_layout_entries; entry++) {
+ if (layout_entries[entry].id == id)
+ return &layout_entries[entry];
+ }
+
+ return NULL;
+}
+
+static const struct pvr_fw_layout_entry *
+pvr_fw_find_private_data(struct pvr_device *pvr_dev)
+{
+ const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
+ u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
+ u32 entry;
+
+ for (entry = 0; entry < num_layout_entries; entry++) {
+ if (layout_entries[entry].id == META_PRIVATE_DATA ||
+ layout_entries[entry].id == MIPS_PRIVATE_DATA ||
+ layout_entries[entry].id == RISCV_PRIVATE_DATA)
+ return &layout_entries[entry];
+ }
+
+ return NULL;
+}
+
+#define DEV_INFO_MASK_SIZE(x) DIV_ROUND_UP(x, 64)
+
+/**
+ * pvr_fw_validate() - Parse firmware header and check compatibility
+ * @pvr_dev: Device pointer.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -EINVAL if firmware is incompatible.
+ */
+static int
+pvr_fw_validate(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ const struct firmware *firmware = pvr_dev->fw_dev.firmware;
+ const struct pvr_fw_layout_entry *layout_entries;
+ const struct pvr_fw_info_header *header;
+ const u8 *fw = firmware->data;
+ u32 fw_offset = firmware->size - SZ_4K;
+ u32 layout_table_size;
+ u32 entry;
+
+ if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE))
+ return -EINVAL;
+
+ header = (const struct pvr_fw_info_header *)&fw[fw_offset];
+
+ if (header->info_version != PVR_FW_INFO_VERSION) {
+ drm_err(drm_dev, "Unsupported fw info version %u\n",
+ header->info_version);
+ return -EINVAL;
+ }
+
+ if (header->header_len != sizeof(struct pvr_fw_info_header) ||
+ header->layout_entry_size != sizeof(struct pvr_fw_layout_entry) ||
+ header->layout_entry_num > PVR_FW_INFO_MAX_NUM_ENTRIES) {
+ drm_err(drm_dev, "FW info format mismatch\n");
+ return -EINVAL;
+ }
+
+ if (!(header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ||
+ header->fw_version_major > FW_MAX_SUPPORTED_MAJOR_VERSION ||
+ header->fw_version_major == 0) {
+ drm_err(drm_dev, "Unsupported FW version %u.%u (build: %u%s)\n",
+ header->fw_version_major, header->fw_version_minor,
+ header->fw_version_build,
+ (header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ? " OS" : "");
+ return -EINVAL;
+ }
+
+ if (pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id) != header->bvnc) {
+ struct pvr_gpu_id fw_gpu_id;
+
+ packed_bvnc_to_pvr_gpu_id(header->bvnc, &fw_gpu_id);
+ drm_err(drm_dev, "FW built for incorrect GPU ID %i.%i.%i.%i (expected %i.%i.%i.%i)\n",
+ fw_gpu_id.b, fw_gpu_id.v, fw_gpu_id.n, fw_gpu_id.c,
+ pvr_dev->gpu_id.b, pvr_dev->gpu_id.v, pvr_dev->gpu_id.n, pvr_dev->gpu_id.c);
+ return -EINVAL;
+ }
+
+ fw_offset += header->header_len;
+ layout_table_size =
+ header->layout_entry_size * header->layout_entry_num;
+ if ((fw_offset + layout_table_size) > firmware->size)
+ return -EINVAL;
+
+ layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset];
+ for (entry = 0; entry < header->layout_entry_num; entry++) {
+ u32 start_addr = layout_entries[entry].base_addr;
+ u32 end_addr = start_addr + layout_entries[entry].alloc_size;
+
+ if (start_addr >= end_addr)
+ return -EINVAL;
+ }
+
+ fw_offset = (firmware->size - SZ_4K) - header->device_info_size;
+
+ drm_info(drm_dev, "FW version v%u.%u (build %u OS)\n", header->fw_version_major,
+ header->fw_version_minor, header->fw_version_build);
+
+ pvr_dev->fw_version.major = header->fw_version_major;
+ pvr_dev->fw_version.minor = header->fw_version_minor;
+
+ pvr_dev->fw_dev.header = header;
+ pvr_dev->fw_dev.layout_entries = layout_entries;
+
+ return 0;
+}
+
+static int
+pvr_fw_get_device_info(struct pvr_device *pvr_dev)
+{
+ const struct firmware *firmware = pvr_dev->fw_dev.firmware;
+ struct pvr_fw_device_info_header *header;
+ const u8 *fw = firmware->data;
+ const u64 *dev_info;
+ u32 fw_offset;
+
+ fw_offset = (firmware->size - SZ_4K) - pvr_dev->fw_dev.header->device_info_size;
+
+ header = (struct pvr_fw_device_info_header *)&fw[fw_offset];
+ dev_info = (u64 *)(header + 1);
+
+ pvr_device_info_set_quirks(pvr_dev, dev_info, header->brn_mask_size);
+ dev_info += header->brn_mask_size;
+
+ pvr_device_info_set_enhancements(pvr_dev, dev_info, header->ern_mask_size);
+ dev_info += header->ern_mask_size;
+
+ return pvr_device_info_set_features(pvr_dev, dev_info, header->feature_mask_size,
+ header->feature_param_size);
+}
+
+static void
+layout_get_sizes(struct pvr_device *pvr_dev)
+{
+ const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
+ u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
+ struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+
+ fw_mem->code_alloc_size = 0;
+ fw_mem->data_alloc_size = 0;
+ fw_mem->core_code_alloc_size = 0;
+ fw_mem->core_data_alloc_size = 0;
+
+ /* Extract section sizes from FW layout table. */
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
+ switch (layout_entries[entry].type) {
+ case FW_CODE:
+ fw_mem->code_alloc_size += layout_entries[entry].alloc_size;
+ break;
+ case FW_DATA:
+ fw_mem->data_alloc_size += layout_entries[entry].alloc_size;
+ break;
+ case FW_COREMEM_CODE:
+ fw_mem->core_code_alloc_size +=
+ layout_entries[entry].alloc_size;
+ break;
+ case FW_COREMEM_DATA:
+ fw_mem->core_data_alloc_size +=
+ layout_entries[entry].alloc_size;
+ break;
+ case NONE:
+ break;
+ }
+ }
+}
+
+int
+pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw_code_ptr,
+ void *fw_data_ptr, void *fw_core_code_ptr, void *fw_core_data_ptr,
+ void **host_addr_out)
+{
+ const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
+ u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
+ u32 end_addr = addr + size;
+ int entry = 0;
+
+ /* Ensure requested range is not zero, and size is not causing addr to overflow. */
+ if (end_addr <= addr)
+ return -EINVAL;
+
+ for (entry = 0; entry < num_layout_entries; entry++) {
+ u32 entry_start_addr = layout_entries[entry].base_addr;
+ u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size;
+
+ if (addr >= entry_start_addr && addr < entry_end_addr &&
+ end_addr > entry_start_addr && end_addr <= entry_end_addr) {
+ switch (layout_entries[entry].type) {
+ case FW_CODE:
+ *host_addr_out = fw_code_ptr;
+ break;
+
+ case FW_DATA:
+ *host_addr_out = fw_data_ptr;
+ break;
+
+ case FW_COREMEM_CODE:
+ *host_addr_out = fw_core_code_ptr;
+ break;
+
+ case FW_COREMEM_DATA:
+ *host_addr_out = fw_core_data_ptr;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ /* Direct Mem write to mapped memory */
+ addr -= layout_entries[entry].base_addr;
+ addr += layout_entries[entry].alloc_offset;
+
+ /*
+ * Add offset to pointer to FW allocation only if that
+ * allocation is available
+ */
+ *(u8 **)host_addr_out += addr;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+pvr_fw_create_fwif_connection_ctl(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ fw_dev->fwif_connection_ctl =
+ pvr_fw_object_create_and_map_offset(pvr_dev,
+ fw_dev->fw_heap_info.config_offset +
+ PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET,
+ sizeof(*fw_dev->fwif_connection_ctl),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL,
+ &fw_dev->mem.fwif_connection_ctl_obj);
+ if (IS_ERR(fw_dev->fwif_connection_ctl)) {
+ drm_err(drm_dev,
+ "Unable to allocate FWIF connection control memory\n");
+ return PTR_ERR(fw_dev->fwif_connection_ctl);
+ }
+
+ return 0;
+}
+
+static void
+pvr_fw_fini_fwif_connection_ctl(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ pvr_fw_object_unmap_and_destroy(fw_dev->mem.fwif_connection_ctl_obj);
+}
+
+static void
+fw_osinit_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_osinit *fwif_osinit = cpu_ptr;
+ struct pvr_device *pvr_dev = priv;
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+
+ fwif_osinit->kernel_ccbctl_fw_addr = pvr_dev->kccb.ccb.ctrl_fw_addr;
+ fwif_osinit->kernel_ccb_fw_addr = pvr_dev->kccb.ccb.ccb_fw_addr;
+ pvr_fw_object_get_fw_addr(pvr_dev->kccb.rtn_obj,
+ &fwif_osinit->kernel_ccb_rtn_slots_fw_addr);
+
+ fwif_osinit->firmware_ccbctl_fw_addr = pvr_dev->fwccb.ctrl_fw_addr;
+ fwif_osinit->firmware_ccb_fw_addr = pvr_dev->fwccb.ccb_fw_addr;
+
+ fwif_osinit->work_est_firmware_ccbctl_fw_addr = 0;
+ fwif_osinit->work_est_firmware_ccb_fw_addr = 0;
+
+ pvr_fw_object_get_fw_addr(fw_mem->hwrinfobuf_obj,
+ &fwif_osinit->rogue_fwif_hwr_info_buf_ctl_fw_addr);
+ pvr_fw_object_get_fw_addr(fw_mem->osdata_obj, &fwif_osinit->fw_os_data_fw_addr);
+
+ fwif_osinit->hwr_debug_dump_limit = 0;
+
+ rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.hw_bvnc);
+ rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.fw_bvnc);
+}
+
+static void
+fw_osdata_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_osdata *fwif_osdata = cpu_ptr;
+ struct pvr_device *pvr_dev = priv;
+ struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+
+ pvr_fw_object_get_fw_addr(fw_mem->power_sync_obj, &fwif_osdata->power_sync_fw_addr);
+}
+
+static void
+fw_fault_page_init(void *cpu_ptr, void *priv)
+{
+ u32 *fault_page = cpu_ptr;
+
+ for (int i = 0; i < PVR_ROGUE_FAULT_PAGE_SIZE / sizeof(*fault_page); i++)
+ fault_page[i] = 0xdeadbee0;
+}
+
+static void
+fw_sysinit_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_sysinit *fwif_sysinit = cpu_ptr;
+ struct pvr_device *pvr_dev = priv;
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+ dma_addr_t fault_dma_addr = 0;
+ u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk);
+
+ WARN_ON(!clock_speed_hz);
+
+ WARN_ON(pvr_fw_object_get_dma_addr(fw_mem->fault_page_obj, 0, &fault_dma_addr));
+ fwif_sysinit->fault_phys_addr = (u64)fault_dma_addr;
+
+ fwif_sysinit->pds_exec_base = ROGUE_PDSCODEDATA_HEAP_BASE;
+ fwif_sysinit->usc_exec_base = ROGUE_USCCODE_HEAP_BASE;
+
+ pvr_fw_object_get_fw_addr(fw_mem->runtime_cfg_obj, &fwif_sysinit->runtime_cfg_fw_addr);
+ pvr_fw_object_get_fw_addr(fw_dev->fw_trace.tracebuf_ctrl_obj,
+ &fwif_sysinit->trace_buf_ctl_fw_addr);
+ pvr_fw_object_get_fw_addr(fw_mem->sysdata_obj, &fwif_sysinit->fw_sys_data_fw_addr);
+ pvr_fw_object_get_fw_addr(fw_mem->gpu_util_fwcb_obj,
+ &fwif_sysinit->gpu_util_fw_cb_ctl_fw_addr);
+ if (fw_mem->core_data_obj) {
+ pvr_fw_object_get_fw_addr(fw_mem->core_data_obj,
+ &fwif_sysinit->coremem_data_store.fw_addr);
+ }
+
+ /* Currently unsupported. */
+ fwif_sysinit->counter_dump_ctl.buffer_fw_addr = 0;
+ fwif_sysinit->counter_dump_ctl.size_in_dwords = 0;
+
+ /* Skip alignment checks. */
+ fwif_sysinit->align_checks = 0;
+
+ fwif_sysinit->filter_flags = 0;
+ fwif_sysinit->hw_perf_filter = 0;
+ fwif_sysinit->firmware_perf = FW_PERF_CONF_NONE;
+ fwif_sysinit->initial_core_clock_speed = clock_speed_hz;
+ fwif_sysinit->active_pm_latency_ms = 0;
+ fwif_sysinit->gpio_validation_mode = ROGUE_FWIF_GPIO_VAL_OFF;
+ fwif_sysinit->firmware_started = false;
+ fwif_sysinit->marker_val = 1;
+
+ memset(&fwif_sysinit->bvnc_km_feature_flags, 0,
+ sizeof(fwif_sysinit->bvnc_km_feature_flags));
+}
+
+#define ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB 4
+
+static void
+fw_sysdata_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_sysdata *fwif_sysdata = cpu_ptr;
+ struct pvr_device *pvr_dev = priv;
+ u32 slc_size_in_kilobytes = 0;
+ u32 config_flags = 0;
+
+ WARN_ON(PVR_FEATURE_VALUE(pvr_dev, slc_size_in_kilobytes, &slc_size_in_kilobytes));
+
+ if (slc_size_in_kilobytes < ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB)
+ config_flags |= ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP;
+
+ fwif_sysdata->config_flags = config_flags;
+}
+
+static void
+fw_runtime_cfg_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_runtime_cfg *runtime_cfg = cpu_ptr;
+ struct pvr_device *pvr_dev = priv;
+ u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk);
+
+ WARN_ON(!clock_speed_hz);
+
+ runtime_cfg->core_clock_speed = clock_speed_hz;
+ runtime_cfg->active_pm_latency_ms = 0;
+ runtime_cfg->active_pm_latency_persistant = true;
+ WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters,
+ &runtime_cfg->default_dusts_num_init) != 0);
+}
+
+static void
+fw_gpu_util_fwcb_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_gpu_util_fwcb *gpu_util_fwcb = cpu_ptr;
+
+ gpu_util_fwcb->last_word = PVR_FWIF_GPU_UTIL_STATE_IDLE;
+}
+
+static int
+pvr_fw_create_structures(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+ int err;
+
+ fw_dev->power_sync = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->power_sync),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->power_sync_obj);
+ if (IS_ERR(fw_dev->power_sync)) {
+ drm_err(drm_dev, "Unable to allocate FW power_sync structure\n");
+ return PTR_ERR(fw_dev->power_sync);
+ }
+
+ fw_dev->hwrinfobuf = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->hwrinfobuf),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->hwrinfobuf_obj);
+ if (IS_ERR(fw_dev->hwrinfobuf)) {
+ drm_err(drm_dev,
+ "Unable to allocate FW hwrinfobuf structure\n");
+ err = PTR_ERR(fw_dev->hwrinfobuf);
+ goto err_release_power_sync;
+ }
+
+ err = pvr_fw_object_create(pvr_dev, PVR_SYNC_OBJ_SIZE,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->mmucache_sync_obj);
+ if (err) {
+ drm_err(drm_dev,
+ "Unable to allocate MMU cache sync object\n");
+ goto err_release_hwrinfobuf;
+ }
+
+ fw_dev->fwif_sysdata = pvr_fw_object_create_and_map(pvr_dev,
+ sizeof(*fw_dev->fwif_sysdata),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_sysdata_init, pvr_dev,
+ &fw_mem->sysdata_obj);
+ if (IS_ERR(fw_dev->fwif_sysdata)) {
+ drm_err(drm_dev, "Unable to allocate FW SYSDATA structure\n");
+ err = PTR_ERR(fw_dev->fwif_sysdata);
+ goto err_release_mmucache_sync_obj;
+ }
+
+ err = pvr_fw_object_create(pvr_dev, PVR_ROGUE_FAULT_PAGE_SIZE,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_fault_page_init, NULL, &fw_mem->fault_page_obj);
+ if (err) {
+ drm_err(drm_dev, "Unable to allocate FW fault page\n");
+ goto err_release_sysdata;
+ }
+
+ err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_gpu_util_fwcb),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_gpu_util_fwcb_init, pvr_dev, &fw_mem->gpu_util_fwcb_obj);
+ if (err) {
+ drm_err(drm_dev, "Unable to allocate GPU util FWCB\n");
+ goto err_release_fault_page;
+ }
+
+ err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_runtime_cfg),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_runtime_cfg_init, pvr_dev, &fw_mem->runtime_cfg_obj);
+ if (err) {
+ drm_err(drm_dev, "Unable to allocate FW runtime config\n");
+ goto err_release_gpu_util_fwcb;
+ }
+
+ err = pvr_fw_trace_init(pvr_dev);
+ if (err)
+ goto err_release_runtime_cfg;
+
+ fw_dev->fwif_osdata = pvr_fw_object_create_and_map(pvr_dev,
+ sizeof(*fw_dev->fwif_osdata),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_osdata_init, pvr_dev,
+ &fw_mem->osdata_obj);
+ if (IS_ERR(fw_dev->fwif_osdata)) {
+ drm_err(drm_dev, "Unable to allocate FW OSDATA structure\n");
+ err = PTR_ERR(fw_dev->fwif_osdata);
+ goto err_fw_trace_fini;
+ }
+
+ fw_dev->fwif_osinit =
+ pvr_fw_object_create_and_map_offset(pvr_dev,
+ fw_dev->fw_heap_info.config_offset +
+ PVR_ROGUE_FWIF_OSINIT_OFFSET,
+ sizeof(*fw_dev->fwif_osinit),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_osinit_init, pvr_dev, &fw_mem->osinit_obj);
+ if (IS_ERR(fw_dev->fwif_osinit)) {
+ drm_err(drm_dev, "Unable to allocate FW OSINIT structure\n");
+ err = PTR_ERR(fw_dev->fwif_osinit);
+ goto err_release_osdata;
+ }
+
+ fw_dev->fwif_sysinit =
+ pvr_fw_object_create_and_map_offset(pvr_dev,
+ fw_dev->fw_heap_info.config_offset +
+ PVR_ROGUE_FWIF_SYSINIT_OFFSET,
+ sizeof(*fw_dev->fwif_sysinit),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_sysinit_init, pvr_dev, &fw_mem->sysinit_obj);
+ if (IS_ERR(fw_dev->fwif_sysinit)) {
+ drm_err(drm_dev, "Unable to allocate FW SYSINIT structure\n");
+ err = PTR_ERR(fw_dev->fwif_sysinit);
+ goto err_release_osinit;
+ }
+
+ return 0;
+
+err_release_osinit:
+ pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj);
+
+err_release_osdata:
+ pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj);
+
+err_fw_trace_fini:
+ pvr_fw_trace_fini(pvr_dev);
+
+err_release_runtime_cfg:
+ pvr_fw_object_destroy(fw_mem->runtime_cfg_obj);
+
+err_release_gpu_util_fwcb:
+ pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj);
+
+err_release_fault_page:
+ pvr_fw_object_destroy(fw_mem->fault_page_obj);
+
+err_release_sysdata:
+ pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj);
+
+err_release_mmucache_sync_obj:
+ pvr_fw_object_destroy(fw_mem->mmucache_sync_obj);
+
+err_release_hwrinfobuf:
+ pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj);
+
+err_release_power_sync:
+ pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj);
+
+ return err;
+}
+
+static void
+pvr_fw_destroy_structures(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+
+ pvr_fw_trace_fini(pvr_dev);
+ pvr_fw_object_destroy(fw_mem->runtime_cfg_obj);
+ pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj);
+ pvr_fw_object_destroy(fw_mem->fault_page_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->sysinit_obj);
+
+ pvr_fw_object_destroy(fw_mem->mmucache_sync_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj);
+}
+
+/**
+ * pvr_fw_process() - Process firmware image, allocate FW memory and create boot
+ * arguments
+ * @pvr_dev: Device pointer.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_fw_object_create_and_map_offset(), or
+ * * Any error returned by pvr_fw_object_create_and_map().
+ */
+static int
+pvr_fw_process(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+ const u8 *fw = pvr_dev->fw_dev.firmware->data;
+ const struct pvr_fw_layout_entry *private_data;
+ u8 *fw_code_ptr;
+ u8 *fw_data_ptr;
+ u8 *fw_core_code_ptr;
+ u8 *fw_core_data_ptr;
+ int err;
+
+ layout_get_sizes(pvr_dev);
+
+ private_data = pvr_fw_find_private_data(pvr_dev);
+ if (!private_data)
+ return -EINVAL;
+
+ /* Allocate and map memory for firmware sections. */
+
+ /*
+ * Code allocation must be at the start of the firmware heap, otherwise
+ * firmware processor will be unable to boot.
+ *
+ * This has the useful side-effect that for every other object in the
+ * driver, a firmware address of 0 is invalid.
+ */
+ fw_code_ptr = pvr_fw_object_create_and_map_offset(pvr_dev, 0, fw_mem->code_alloc_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->code_obj);
+ if (IS_ERR(fw_code_ptr)) {
+ drm_err(drm_dev, "Unable to allocate FW code memory\n");
+ return PTR_ERR(fw_code_ptr);
+ }
+
+ if (pvr_dev->fw_dev.defs->has_fixed_data_addr()) {
+ u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
+
+ fw_data_ptr =
+ pvr_fw_object_create_and_map_offset(pvr_dev, base_addr,
+ fw_mem->data_alloc_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->data_obj);
+ } else {
+ fw_data_ptr = pvr_fw_object_create_and_map(pvr_dev, fw_mem->data_alloc_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->data_obj);
+ }
+ if (IS_ERR(fw_data_ptr)) {
+ drm_err(drm_dev, "Unable to allocate FW data memory\n");
+ err = PTR_ERR(fw_data_ptr);
+ goto err_free_fw_code_obj;
+ }
+
+ /* Core code and data sections are optional. */
+ if (fw_mem->core_code_alloc_size) {
+ fw_core_code_ptr =
+ pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_code_alloc_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->core_code_obj);
+ if (IS_ERR(fw_core_code_ptr)) {
+ drm_err(drm_dev,
+ "Unable to allocate FW core code memory\n");
+ err = PTR_ERR(fw_core_code_ptr);
+ goto err_free_fw_data_obj;
+ }
+ } else {
+ fw_core_code_ptr = NULL;
+ }
+
+ if (fw_mem->core_data_alloc_size) {
+ fw_core_data_ptr =
+ pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_data_alloc_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->core_data_obj);
+ if (IS_ERR(fw_core_data_ptr)) {
+ drm_err(drm_dev,
+ "Unable to allocate FW core data memory\n");
+ err = PTR_ERR(fw_core_data_ptr);
+ goto err_free_fw_core_code_obj;
+ }
+ } else {
+ fw_core_data_ptr = NULL;
+ }
+
+ fw_mem->code = kzalloc(fw_mem->code_alloc_size, GFP_KERNEL);
+ fw_mem->data = kzalloc(fw_mem->data_alloc_size, GFP_KERNEL);
+ if (fw_mem->core_code_alloc_size)
+ fw_mem->core_code = kzalloc(fw_mem->core_code_alloc_size, GFP_KERNEL);
+ if (fw_mem->core_data_alloc_size)
+ fw_mem->core_data = kzalloc(fw_mem->core_data_alloc_size, GFP_KERNEL);
+
+ if (!fw_mem->code || !fw_mem->data ||
+ (!fw_mem->core_code && fw_mem->core_code_alloc_size) ||
+ (!fw_mem->core_data && fw_mem->core_data_alloc_size)) {
+ err = -ENOMEM;
+ goto err_free_kdata;
+ }
+
+ err = pvr_dev->fw_dev.defs->fw_process(pvr_dev, fw,
+ fw_mem->code, fw_mem->data, fw_mem->core_code,
+ fw_mem->core_data, fw_mem->core_code_alloc_size);
+
+ if (err)
+ goto err_free_fw_core_data_obj;
+
+ memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size);
+ memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size);
+ if (fw_mem->core_code)
+ memcpy(fw_core_code_ptr, fw_mem->core_code, fw_mem->core_code_alloc_size);
+ if (fw_mem->core_data)
+ memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size);
+
+ /* We're finished with the firmware section memory on the CPU, unmap. */
+ if (fw_core_data_ptr)
+ pvr_fw_object_vunmap(fw_mem->core_data_obj);
+ if (fw_core_code_ptr)
+ pvr_fw_object_vunmap(fw_mem->core_code_obj);
+ pvr_fw_object_vunmap(fw_mem->data_obj);
+ fw_data_ptr = NULL;
+ pvr_fw_object_vunmap(fw_mem->code_obj);
+ fw_code_ptr = NULL;
+
+ err = pvr_fw_create_fwif_connection_ctl(pvr_dev);
+ if (err)
+ goto err_free_fw_core_data_obj;
+
+ return 0;
+
+err_free_kdata:
+ kfree(fw_mem->core_data);
+ kfree(fw_mem->core_code);
+ kfree(fw_mem->data);
+ kfree(fw_mem->code);
+
+err_free_fw_core_data_obj:
+ if (fw_core_data_ptr)
+ pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj);
+
+err_free_fw_core_code_obj:
+ if (fw_core_code_ptr)
+ pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj);
+
+err_free_fw_data_obj:
+ if (fw_data_ptr)
+ pvr_fw_object_vunmap(fw_mem->data_obj);
+ pvr_fw_object_destroy(fw_mem->data_obj);
+
+err_free_fw_code_obj:
+ if (fw_code_ptr)
+ pvr_fw_object_vunmap(fw_mem->code_obj);
+ pvr_fw_object_destroy(fw_mem->code_obj);
+
+ return err;
+}
+
+static int
+pvr_copy_to_fw(struct pvr_fw_object *dest_obj, u8 *src_ptr, u32 size)
+{
+ u8 *dest_ptr = pvr_fw_object_vmap(dest_obj);
+
+ if (IS_ERR(dest_ptr))
+ return PTR_ERR(dest_ptr);
+
+ memcpy(dest_ptr, src_ptr, size);
+
+ pvr_fw_object_vunmap(dest_obj);
+
+ return 0;
+}
+
+static int
+pvr_fw_reinit_code_data(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+ int err;
+
+ err = pvr_copy_to_fw(fw_mem->code_obj, fw_mem->code, fw_mem->code_alloc_size);
+ if (err)
+ return err;
+
+ err = pvr_copy_to_fw(fw_mem->data_obj, fw_mem->data, fw_mem->data_alloc_size);
+ if (err)
+ return err;
+
+ if (fw_mem->core_code) {
+ err = pvr_copy_to_fw(fw_mem->core_code_obj, fw_mem->core_code,
+ fw_mem->core_code_alloc_size);
+ if (err)
+ return err;
+ }
+
+ if (fw_mem->core_data) {
+ err = pvr_copy_to_fw(fw_mem->core_data_obj, fw_mem->core_data,
+ fw_mem->core_data_alloc_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+pvr_fw_cleanup(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+
+ pvr_fw_fini_fwif_connection_ctl(pvr_dev);
+ if (fw_mem->core_code_obj)
+ pvr_fw_object_destroy(fw_mem->core_code_obj);
+ if (fw_mem->core_data_obj)
+ pvr_fw_object_destroy(fw_mem->core_data_obj);
+ pvr_fw_object_destroy(fw_mem->code_obj);
+ pvr_fw_object_destroy(fw_mem->data_obj);
+}
+
+/**
+ * pvr_wait_for_fw_boot() - Wait for firmware to finish booting
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%ETIMEDOUT if firmware fails to boot within timeout.
+ */
+int
+pvr_wait_for_fw_boot(struct pvr_device *pvr_dev)
+{
+ ktime_t deadline = ktime_add_us(ktime_get(), FW_BOOT_TIMEOUT_USEC);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ while (ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) {
+ if (READ_ONCE(fw_dev->fwif_sysinit->firmware_started))
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * pvr_fw_heap_info_init() - Calculate size and masks for FW heap
+ * @pvr_dev: Target PowerVR device.
+ * @log2_size: Log2 of raw heap size.
+ * @reserved_size: Size of reserved area of heap, in bytes. May be zero.
+ */
+void
+pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ fw_dev->fw_heap_info.gpu_addr = PVR_ROGUE_FW_MAIN_HEAP_BASE;
+ fw_dev->fw_heap_info.log2_size = log2_size;
+ fw_dev->fw_heap_info.reserved_size = reserved_size;
+ fw_dev->fw_heap_info.raw_size = 1 << fw_dev->fw_heap_info.log2_size;
+ fw_dev->fw_heap_info.offset_mask = fw_dev->fw_heap_info.raw_size - 1;
+ fw_dev->fw_heap_info.config_offset = fw_dev->fw_heap_info.raw_size -
+ PVR_ROGUE_FW_CONFIG_HEAP_SIZE;
+ fw_dev->fw_heap_info.size = fw_dev->fw_heap_info.raw_size -
+ (PVR_ROGUE_FW_CONFIG_HEAP_SIZE + reserved_size);
+}
+
+/**
+ * pvr_fw_validate_init_device_info() - Validate firmware and initialise device information
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function must be called before querying device information.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL if firmware validation fails.
+ */
+int
+pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = pvr_fw_validate(pvr_dev);
+ if (err)
+ return err;
+
+ return pvr_fw_get_device_info(pvr_dev);
+}
+
+/**
+ * pvr_fw_init() - Initialise and boot firmware
+ * @pvr_dev: Target PowerVR device
+ *
+ * On successful completion of the function the PowerVR device will be
+ * initialised and ready to use.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%EINVAL on invalid firmware image,
+ * * -%ENOMEM on out of memory, or
+ * * -%ETIMEDOUT if firmware processor fails to boot or on register poll timeout.
+ */
+int
+pvr_fw_init(struct pvr_device *pvr_dev)
+{
+ u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
+ u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ int err;
+
+ if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META)
+ fw_dev->defs = &pvr_fw_defs_meta;
+ else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS)
+ fw_dev->defs = &pvr_fw_defs_mips;
+ else
+ return -EINVAL;
+
+ err = fw_dev->defs->init(pvr_dev);
+ if (err)
+ return err;
+
+ drm_mm_init(&fw_dev->fw_mm, ROGUE_FW_HEAP_BASE, fw_dev->fw_heap_info.raw_size);
+ fw_dev->fw_mm_base = ROGUE_FW_HEAP_BASE;
+ spin_lock_init(&fw_dev->fw_mm_lock);
+
+ INIT_LIST_HEAD(&fw_dev->fw_objs.list);
+ err = drmm_mutex_init(from_pvr_device(pvr_dev), &fw_dev->fw_objs.lock);
+ if (err)
+ goto err_mm_takedown;
+
+ err = pvr_fw_process(pvr_dev);
+ if (err)
+ goto err_mm_takedown;
+
+ /* Initialise KCCB and FWCCB. */
+ err = pvr_kccb_init(pvr_dev);
+ if (err)
+ goto err_fw_cleanup;
+
+ err = pvr_fwccb_init(pvr_dev);
+ if (err)
+ goto err_kccb_fini;
+
+ /* Allocate memory for KCCB return slots. */
+ pvr_dev->kccb.rtn = pvr_fw_object_create_and_map(pvr_dev, kccb_rtn_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &pvr_dev->kccb.rtn_obj);
+ if (IS_ERR(pvr_dev->kccb.rtn)) {
+ err = PTR_ERR(pvr_dev->kccb.rtn);
+ goto err_fwccb_fini;
+ }
+
+ err = pvr_fw_create_structures(pvr_dev);
+ if (err)
+ goto err_kccb_rtn_release;
+
+ err = pvr_fw_start(pvr_dev);
+ if (err)
+ goto err_destroy_structures;
+
+ err = pvr_wait_for_fw_boot(pvr_dev);
+ if (err) {
+ drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
+ goto err_fw_stop;
+ }
+
+ fw_dev->booted = true;
+
+ return 0;
+
+err_fw_stop:
+ pvr_fw_stop(pvr_dev);
+
+err_destroy_structures:
+ pvr_fw_destroy_structures(pvr_dev);
+
+err_kccb_rtn_release:
+ pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj);
+
+err_fwccb_fini:
+ pvr_ccb_fini(&pvr_dev->fwccb);
+
+err_kccb_fini:
+ pvr_kccb_fini(pvr_dev);
+
+err_fw_cleanup:
+ pvr_fw_cleanup(pvr_dev);
+
+err_mm_takedown:
+ drm_mm_takedown(&fw_dev->fw_mm);
+
+ if (fw_dev->defs->fini)
+ fw_dev->defs->fini(pvr_dev);
+
+ return err;
+}
+
+/**
+ * pvr_fw_fini() - Shutdown firmware processor and free associated memory
+ * @pvr_dev: Target PowerVR device
+ */
+void
+pvr_fw_fini(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ fw_dev->booted = false;
+
+ pvr_fw_destroy_structures(pvr_dev);
+ pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj);
+
+ /*
+ * Ensure FWCCB worker has finished executing before destroying FWCCB. The IRQ handler has
+ * been unregistered at this point so no new work should be being submitted.
+ */
+ pvr_ccb_fini(&pvr_dev->fwccb);
+ pvr_kccb_fini(pvr_dev);
+ pvr_fw_cleanup(pvr_dev);
+
+ mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
+ WARN_ON(!list_empty(&pvr_dev->fw_dev.fw_objs.list));
+ mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
+
+ drm_mm_takedown(&fw_dev->fw_mm);
+
+ if (fw_dev->defs->fini)
+ fw_dev->defs->fini(pvr_dev);
+}
+
+/**
+ * pvr_fw_mts_schedule() - Schedule work via an MTS kick
+ * @pvr_dev: Target PowerVR device
+ * @val: Kick mask. Should be a combination of %ROGUE_CR_MTS_SCHEDULE_*
+ */
+void
+pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val)
+{
+ /* Ensure memory is flushed before kicking MTS. */
+ wmb();
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_SCHEDULE, val);
+
+ /* Ensure the MTS kick goes through before continuing. */
+ mb();
+}
+
+/**
+ * pvr_fw_structure_cleanup() - Send FW cleanup request for an object
+ * @pvr_dev: Target PowerVR device.
+ * @type: Type of object to cleanup. Must be one of &enum rogue_fwif_cleanup_type.
+ * @fw_obj: Pointer to FW object containing object to cleanup.
+ * @offset: Offset within FW object of object to cleanup.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -EBUSY if object is busy,
+ * * -ETIMEDOUT on timeout, or
+ * * -EIO if device is lost.
+ */
+int
+pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj,
+ u32 offset)
+{
+ struct rogue_fwif_kccb_cmd cmd;
+ int slot_nr;
+ int idx;
+ int err;
+ u32 rtn;
+
+ struct rogue_fwif_cleanup_request *cleanup_req = &cmd.cmd_data.cleanup_data;
+
+ down_read(&pvr_dev->reset_sem);
+
+ if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) {
+ err = -EIO;
+ goto err_up_read;
+ }
+
+ cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_CLEANUP;
+ cmd.kccb_flags = 0;
+ cleanup_req->cleanup_type = type;
+
+ switch (type) {
+ case ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT:
+ pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
+ &cleanup_req->cleanup_data.context_fw_addr);
+ break;
+ case ROGUE_FWIF_CLEANUP_HWRTDATA:
+ pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
+ &cleanup_req->cleanup_data.hwrt_data_fw_addr);
+ break;
+ case ROGUE_FWIF_CLEANUP_FREELIST:
+ pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
+ &cleanup_req->cleanup_data.freelist_fw_addr);
+ break;
+ default:
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot_nr);
+ if (err)
+ goto err_drm_dev_exit;
+
+ err = pvr_kccb_wait_for_completion(pvr_dev, slot_nr, HZ, &rtn);
+ if (err)
+ goto err_drm_dev_exit;
+
+ if (rtn & ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)
+ err = -EBUSY;
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+err_up_read:
+ up_read(&pvr_dev->reset_sem);
+
+ return err;
+}
+
+/**
+ * pvr_fw_object_fw_map() - Map a FW object in firmware address space
+ * @pvr_dev: Device pointer.
+ * @fw_obj: FW object to map.
+ * @dev_addr: Desired address in device space, if a specific address is
+ * required. 0 otherwise.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL if @fw_obj is already mapped but has no references, or
+ * * Any error returned by DRM.
+ */
+static int
+pvr_fw_object_fw_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj, u64 dev_addr)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+ struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ int err;
+
+ spin_lock(&fw_dev->fw_mm_lock);
+
+ if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (!dev_addr) {
+ /*
+ * Allocate from the main heap only (firmware heap minus
+ * config space).
+ */
+ err = drm_mm_insert_node_in_range(&fw_dev->fw_mm, &fw_obj->fw_mm_node,
+ gem_obj->size, 0, 0,
+ fw_dev->fw_heap_info.gpu_addr,
+ fw_dev->fw_heap_info.gpu_addr +
+ fw_dev->fw_heap_info.size, 0);
+ if (err)
+ goto err_unlock;
+ } else {
+ fw_obj->fw_mm_node.start = dev_addr;
+ fw_obj->fw_mm_node.size = gem_obj->size;
+ err = drm_mm_reserve_node(&fw_dev->fw_mm, &fw_obj->fw_mm_node);
+ if (err)
+ goto err_unlock;
+ }
+
+ spin_unlock(&fw_dev->fw_mm_lock);
+
+ /* Map object on GPU. */
+ err = fw_dev->defs->vm_map(pvr_dev, fw_obj);
+ if (err)
+ goto err_remove_node;
+
+ fw_obj->fw_addr_offset = (u32)(fw_obj->fw_mm_node.start - fw_dev->fw_mm_base);
+
+ return 0;
+
+err_remove_node:
+ spin_lock(&fw_dev->fw_mm_lock);
+ drm_mm_remove_node(&fw_obj->fw_mm_node);
+
+err_unlock:
+ spin_unlock(&fw_dev->fw_mm_lock);
+
+ return err;
+}
+
+/**
+ * pvr_fw_object_fw_unmap() - Unmap a previously mapped FW object
+ * @fw_obj: FW object to unmap.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL if object is not currently mapped.
+ */
+static int
+pvr_fw_object_fw_unmap(struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+ struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
+ struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ fw_dev->defs->vm_unmap(pvr_dev, fw_obj);
+
+ spin_lock(&fw_dev->fw_mm_lock);
+
+ if (!drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
+ spin_unlock(&fw_dev->fw_mm_lock);
+ return -EINVAL;
+ }
+
+ drm_mm_remove_node(&fw_obj->fw_mm_node);
+
+ spin_unlock(&fw_dev->fw_mm_lock);
+
+ return 0;
+}
+
+static void *
+pvr_fw_object_create_and_map_common(struct pvr_device *pvr_dev, size_t size,
+ u64 flags, u64 dev_addr,
+ void (*init)(void *cpu_ptr, void *priv),
+ void *init_priv, struct pvr_fw_object **fw_obj_out)
+{
+ struct pvr_fw_object *fw_obj;
+ void *cpu_ptr;
+ int err;
+
+ /* %DRM_PVR_BO_PM_FW_PROTECT is implicit for FW objects. */
+ flags |= DRM_PVR_BO_PM_FW_PROTECT;
+
+ fw_obj = kzalloc(sizeof(*fw_obj), GFP_KERNEL);
+ if (!fw_obj)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&fw_obj->node);
+ fw_obj->init = init;
+ fw_obj->init_priv = init_priv;
+
+ fw_obj->gem = pvr_gem_object_create(pvr_dev, size, flags);
+ if (IS_ERR(fw_obj->gem)) {
+ err = PTR_ERR(fw_obj->gem);
+ fw_obj->gem = NULL;
+ goto err_put_object;
+ }
+
+ err = pvr_fw_object_fw_map(pvr_dev, fw_obj, dev_addr);
+ if (err)
+ goto err_put_object;
+
+ cpu_ptr = pvr_fw_object_vmap(fw_obj);
+ if (IS_ERR(cpu_ptr)) {
+ err = PTR_ERR(cpu_ptr);
+ goto err_put_object;
+ }
+
+ *fw_obj_out = fw_obj;
+
+ if (fw_obj->init)
+ fw_obj->init(cpu_ptr, fw_obj->init_priv);
+
+ mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
+ list_add_tail(&fw_obj->node, &pvr_dev->fw_dev.fw_objs.list);
+ mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
+
+ return cpu_ptr;
+
+err_put_object:
+ pvr_fw_object_destroy(fw_obj);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_fw_object_create() - Create a FW object and map to firmware
+ * @pvr_dev: PowerVR device pointer.
+ * @size: Size of object, in bytes.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ * @init: Initialisation callback.
+ * @init_priv: Private pointer to pass to initialisation callback.
+ * @fw_obj_out: Pointer to location to store created object pointer.
+ *
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
+ * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
+ * set.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_fw_object_create_common().
+ */
+int
+pvr_fw_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags,
+ void (*init)(void *cpu_ptr, void *priv), void *init_priv,
+ struct pvr_fw_object **fw_obj_out)
+{
+ void *cpu_ptr;
+
+ cpu_ptr = pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv,
+ fw_obj_out);
+ if (IS_ERR(cpu_ptr))
+ return PTR_ERR(cpu_ptr);
+
+ pvr_fw_object_vunmap(*fw_obj_out);
+
+ return 0;
+}
+
+/**
+ * pvr_fw_object_create_and_map() - Create a FW object and map to firmware and CPU
+ * @pvr_dev: PowerVR device pointer.
+ * @size: Size of object, in bytes.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ * @init: Initialisation callback.
+ * @init_priv: Private pointer to pass to initialisation callback.
+ * @fw_obj_out: Pointer to location to store created object pointer.
+ *
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
+ * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
+ * set.
+ *
+ * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU
+ * mapping.
+ *
+ * Returns:
+ * * Pointer to CPU mapping of newly created object, or
+ * * Any error returned by pvr_fw_object_create(), or
+ * * Any error returned by pvr_fw_object_vmap().
+ */
+void *
+pvr_fw_object_create_and_map(struct pvr_device *pvr_dev, size_t size, u64 flags,
+ void (*init)(void *cpu_ptr, void *priv),
+ void *init_priv, struct pvr_fw_object **fw_obj_out)
+{
+ return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv,
+ fw_obj_out);
+}
+
+/**
+ * pvr_fw_object_create_and_map_offset() - Create a FW object and map to
+ * firmware at the provided offset and to the CPU.
+ * @pvr_dev: PowerVR device pointer.
+ * @dev_offset: Base address of desired FW mapping, offset from start of FW heap.
+ * @size: Size of object, in bytes.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ * @init: Initialisation callback.
+ * @init_priv: Private pointer to pass to initialisation callback.
+ * @fw_obj_out: Pointer to location to store created object pointer.
+ *
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
+ * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
+ * set.
+ *
+ * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU
+ * mapping.
+ *
+ * Returns:
+ * * Pointer to CPU mapping of newly created object, or
+ * * Any error returned by pvr_fw_object_create(), or
+ * * Any error returned by pvr_fw_object_vmap().
+ */
+void *
+pvr_fw_object_create_and_map_offset(struct pvr_device *pvr_dev,
+ u32 dev_offset, size_t size, u64 flags,
+ void (*init)(void *cpu_ptr, void *priv),
+ void *init_priv, struct pvr_fw_object **fw_obj_out)
+{
+ u64 dev_addr = pvr_dev->fw_dev.fw_mm_base + dev_offset;
+
+ return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, dev_addr, init, init_priv,
+ fw_obj_out);
+}
+
+/**
+ * pvr_fw_object_destroy() - Destroy a pvr_fw_object
+ * @fw_obj: Pointer to object to destroy.
+ */
+void pvr_fw_object_destroy(struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+ struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
+ struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev);
+
+ mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
+ list_del(&fw_obj->node);
+ mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
+
+ if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
+ /* If we can't unmap, leak the memory. */
+ if (WARN_ON(pvr_fw_object_fw_unmap(fw_obj)))
+ return;
+ }
+
+ if (fw_obj->gem)
+ pvr_gem_object_put(fw_obj->gem);
+
+ kfree(fw_obj);
+}
+
+/**
+ * pvr_fw_object_get_fw_addr_offset() - Return address of object in firmware address space, with
+ * given offset.
+ * @fw_obj: Pointer to object.
+ * @offset: Desired offset from start of object.
+ * @fw_addr_out: Location to store address to.
+ */
+void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, u32 *fw_addr_out)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+ struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(pvr_obj)->dev);
+
+ *fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset);
+}
+
+/*
+ * pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW
+ * structures
+ * @pvr_dev: Device pointer
+ *
+ * If this function returns an error then the caller must regard the device as lost.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_fw_init_dev_structures() or pvr_fw_reset_all().
+ */
+int
+pvr_fw_hard_reset(struct pvr_device *pvr_dev)
+{
+ struct list_head *pos;
+ int err;
+
+ /* Reset all FW objects */
+ mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
+
+ list_for_each(pos, &pvr_dev->fw_dev.fw_objs.list) {
+ struct pvr_fw_object *fw_obj = container_of(pos, struct pvr_fw_object, node);
+ void *cpu_ptr = pvr_fw_object_vmap(fw_obj);
+
+ WARN_ON(IS_ERR(cpu_ptr));
+
+ if (!(fw_obj->gem->flags & PVR_BO_FW_NO_CLEAR_ON_RESET)) {
+ memset(cpu_ptr, 0, pvr_gem_object_size(fw_obj->gem));
+
+ if (fw_obj->init)
+ fw_obj->init(cpu_ptr, fw_obj->init_priv);
+ }
+
+ pvr_fw_object_vunmap(fw_obj);
+ }
+
+ mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
+
+ err = pvr_fw_reinit_code_data(pvr_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h
new file mode 100644
index 000000000000..b7966bd574a9
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw.h
@@ -0,0 +1,509 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_H
+#define PVR_FW_H
+
+#include "pvr_fw_info.h"
+#include "pvr_fw_trace.h"
+#include "pvr_gem.h"
+
+#include <drm/drm_mm.h>
+
+#include <linux/types.h>
+
+/* Forward declarations from "pvr_device.h". */
+struct pvr_device;
+struct pvr_file;
+
+/* Forward declaration from "pvr_vm.h". */
+struct pvr_vm_context;
+
+#define ROGUE_FWIF_FWCCB_NUMCMDS_LOG2 5
+
+#define ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT 7
+
+/**
+ * struct pvr_fw_object - container for firmware memory allocations
+ */
+struct pvr_fw_object {
+ /** @ref_count: FW object reference counter. */
+ struct kref ref_count;
+
+ /** @gem: GEM object backing the FW object. */
+ struct pvr_gem_object *gem;
+
+ /**
+ * @fw_mm_node: Node representing mapping in FW address space. @pvr_obj->lock must
+ * be held when writing.
+ */
+ struct drm_mm_node fw_mm_node;
+
+ /**
+ * @fw_addr_offset: Virtual address offset of firmware mapping. Only
+ * valid if @flags has %PVR_GEM_OBJECT_FLAGS_FW_MAPPED
+ * set.
+ */
+ u32 fw_addr_offset;
+
+ /**
+ * @init: Initialisation callback. Will be called on object creation and FW hard reset.
+ * Object will have been zeroed before this is called.
+ */
+ void (*init)(void *cpu_ptr, void *priv);
+
+ /** @init_priv: Private data for initialisation callback. */
+ void *init_priv;
+
+ /** @node: Node for firmware object list. */
+ struct list_head node;
+};
+
+/**
+ * struct pvr_fw_defs - FW processor function table and static definitions
+ */
+struct pvr_fw_defs {
+ /**
+ * @init:
+ *
+ * FW processor specific initialisation.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function must call pvr_fw_heap_calculate() to initialise the firmware heap for this
+ * FW processor.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any appropriate error on failure.
+ */
+ int (*init)(struct pvr_device *pvr_dev);
+
+ /**
+ * @fini:
+ *
+ * FW processor specific finalisation.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function is optional.
+ */
+ void (*fini)(struct pvr_device *pvr_dev);
+
+ /**
+ * @fw_process:
+ *
+ * Load and process firmware image.
+ * @pvr_dev: Target PowerVR device.
+ * @fw: Pointer to firmware image.
+ * @fw_code_ptr: Pointer to firmware code section.
+ * @fw_data_ptr: Pointer to firmware data section.
+ * @fw_core_code_ptr: Pointer to firmware core code section. May be %NULL.
+ * @fw_core_data_ptr: Pointer to firmware core data section. May be %NULL.
+ * @core_code_alloc_size: Total allocation size of core code section.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any appropriate error on failure.
+ */
+ int (*fw_process)(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr,
+ u8 *fw_core_data_ptr, u32 core_code_alloc_size);
+
+ /**
+ * @vm_map:
+ *
+ * Map FW object into FW processor address space.
+ * @pvr_dev: Target PowerVR device.
+ * @fw_obj: FW object to map.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any appropriate error on failure.
+ */
+ int (*vm_map)(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+
+ /**
+ * @vm_unmap:
+ *
+ * Unmap FW object from FW processor address space.
+ * @pvr_dev: Target PowerVR device.
+ * @fw_obj: FW object to map.
+ *
+ * This function is mandatory.
+ */
+ void (*vm_unmap)(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+
+ /**
+ * @get_fw_addr_with_offset:
+ *
+ * Called to get address of object in firmware address space, with offset.
+ * @fw_obj: Pointer to object.
+ * @offset: Desired offset from start of object.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * Address in firmware address space.
+ */
+ u32 (*get_fw_addr_with_offset)(struct pvr_fw_object *fw_obj, u32 offset);
+
+ /**
+ * @wrapper_init:
+ *
+ * Called to initialise FW wrapper.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * 0 on success.
+ * * Any appropriate error on failure.
+ */
+ int (*wrapper_init)(struct pvr_device *pvr_dev);
+
+ /**
+ * @has_fixed_data_addr:
+ *
+ * Called to check if firmware fixed data must be loaded at the address given by the
+ * firmware layout table.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * %true if firmware fixed data must be loaded at the address given by the firmware
+ * layout table.
+ * * %false otherwise.
+ */
+ bool (*has_fixed_data_addr)(void);
+
+ /**
+ * @irq: FW Interrupt information.
+ *
+ * Those are processor dependent, and should be initialized by the
+ * processor backend in pvr_fw_funcs::init().
+ */
+ struct {
+ /** @enable_reg: FW interrupt enable register. */
+ u32 enable_reg;
+
+ /** @status_reg: FW interrupt status register. */
+ u32 status_reg;
+
+ /**
+ * @clear_reg: FW interrupt clear register.
+ *
+ * If @status_reg == @clear_reg, we clear by write a bit to zero,
+ * otherwise we clear by writing a bit to one.
+ */
+ u32 clear_reg;
+
+ /** @event_mask: Bitmask of events to listen for. */
+ u32 event_mask;
+
+ /** @clear_mask: Value to write to the clear_reg in order to clear FW IRQs. */
+ u32 clear_mask;
+ } irq;
+};
+
+/**
+ * struct pvr_fw_mem - FW memory allocations
+ */
+struct pvr_fw_mem {
+ /** @code_obj: Object representing firmware code. */
+ struct pvr_fw_object *code_obj;
+
+ /** @data_obj: Object representing firmware data. */
+ struct pvr_fw_object *data_obj;
+
+ /**
+ * @core_code_obj: Object representing firmware core code. May be
+ * %NULL if firmware does not contain this section.
+ */
+ struct pvr_fw_object *core_code_obj;
+
+ /**
+ * @core_data_obj: Object representing firmware core data. May be
+ * %NULL if firmware does not contain this section.
+ */
+ struct pvr_fw_object *core_data_obj;
+
+ /** @code: Driver-side copy of firmware code. */
+ u8 *code;
+
+ /** @data: Driver-side copy of firmware data. */
+ u8 *data;
+
+ /**
+ * @core_code: Driver-side copy of firmware core code. May be %NULL if firmware does not
+ * contain this section.
+ */
+ u8 *core_code;
+
+ /**
+ * @core_data: Driver-side copy of firmware core data. May be %NULL if firmware does not
+ * contain this section.
+ */
+ u8 *core_data;
+
+ /** @code_alloc_size: Allocation size of firmware code section. */
+ u32 code_alloc_size;
+
+ /** @data_alloc_size: Allocation size of firmware data section. */
+ u32 data_alloc_size;
+
+ /** @core_code_alloc_size: Allocation size of firmware core code section. */
+ u32 core_code_alloc_size;
+
+ /** @core_data_alloc_size: Allocation size of firmware core data section. */
+ u32 core_data_alloc_size;
+
+ /**
+ * @fwif_connection_ctl_obj: Object representing FWIF connection control
+ * structure.
+ */
+ struct pvr_fw_object *fwif_connection_ctl_obj;
+
+ /** @osinit_obj: Object representing FW OSINIT structure. */
+ struct pvr_fw_object *osinit_obj;
+
+ /** @sysinit_obj: Object representing FW SYSINIT structure. */
+ struct pvr_fw_object *sysinit_obj;
+
+ /** @osdata_obj: Object representing FW OSDATA structure. */
+ struct pvr_fw_object *osdata_obj;
+
+ /** @hwrinfobuf_obj: Object representing FW hwrinfobuf structure. */
+ struct pvr_fw_object *hwrinfobuf_obj;
+
+ /** @sysdata_obj: Object representing FW SYSDATA structure. */
+ struct pvr_fw_object *sysdata_obj;
+
+ /** @power_sync_obj: Object representing power sync state. */
+ struct pvr_fw_object *power_sync_obj;
+
+ /** @fault_page_obj: Object representing FW fault page. */
+ struct pvr_fw_object *fault_page_obj;
+
+ /** @gpu_util_fwcb_obj: Object representing FW GPU utilisation control structure. */
+ struct pvr_fw_object *gpu_util_fwcb_obj;
+
+ /** @runtime_cfg_obj: Object representing FW runtime config structure. */
+ struct pvr_fw_object *runtime_cfg_obj;
+
+ /** @mmucache_sync_obj: Object used as the sync parameter in an MMU cache operation. */
+ struct pvr_fw_object *mmucache_sync_obj;
+};
+
+struct pvr_fw_device {
+ /** @firmware: Handle to the firmware loaded into the device. */
+ const struct firmware *firmware;
+
+ /** @header: Pointer to firmware header. */
+ const struct pvr_fw_info_header *header;
+
+ /** @layout_entries: Pointer to firmware layout. */
+ const struct pvr_fw_layout_entry *layout_entries;
+
+ /** @mem: Structure containing objects representing firmware memory allocations. */
+ struct pvr_fw_mem mem;
+
+ /** @booted: %true if the firmware has been booted, %false otherwise. */
+ bool booted;
+
+ /**
+ * @processor_type: FW processor type for this device. Must be one of
+ * %PVR_FW_PROCESSOR_TYPE_*.
+ */
+ u16 processor_type;
+
+ /** @funcs: Function table for the FW processor used by this device. */
+ const struct pvr_fw_defs *defs;
+
+ /** @processor_data: Pointer to data specific to FW processor. */
+ union {
+ /** @mips_data: Pointer to MIPS-specific data. */
+ struct pvr_fw_mips_data *mips_data;
+ } processor_data;
+
+ /** @fw_heap_info: Firmware heap information. */
+ struct {
+ /** @gpu_addr: Base address of firmware heap in GPU address space. */
+ u64 gpu_addr;
+
+ /** @size: Size of main area of heap. */
+ u32 size;
+
+ /** @offset_mask: Mask for offsets within FW heap. */
+ u32 offset_mask;
+
+ /** @raw_size: Raw size of heap, including reserved areas. */
+ u32 raw_size;
+
+ /** @log2_size: Log2 of raw size of heap. */
+ u32 log2_size;
+
+ /** @config_offset: Offset of config area within heap. */
+ u32 config_offset;
+
+ /** @reserved_size: Size of reserved area in heap. */
+ u32 reserved_size;
+ } fw_heap_info;
+
+ /** @fw_mm: Firmware address space allocator. */
+ struct drm_mm fw_mm;
+
+ /** @fw_mm_lock: Lock protecting access to &fw_mm. */
+ spinlock_t fw_mm_lock;
+
+ /** @fw_mm_base: Base address of address space managed by @fw_mm. */
+ u64 fw_mm_base;
+
+ /**
+ * @fwif_connection_ctl: Pointer to CPU mapping of FWIF connection
+ * control structure.
+ */
+ struct rogue_fwif_connection_ctl *fwif_connection_ctl;
+
+ /** @fwif_sysinit: Pointer to CPU mapping of FW SYSINIT structure. */
+ struct rogue_fwif_sysinit *fwif_sysinit;
+
+ /** @fwif_sysdata: Pointer to CPU mapping of FW SYSDATA structure. */
+ struct rogue_fwif_sysdata *fwif_sysdata;
+
+ /** @fwif_osinit: Pointer to CPU mapping of FW OSINIT structure. */
+ struct rogue_fwif_osinit *fwif_osinit;
+
+ /** @fwif_osdata: Pointer to CPU mapping of FW OSDATA structure. */
+ struct rogue_fwif_osdata *fwif_osdata;
+
+ /** @power_sync: Pointer to CPU mapping of power sync state. */
+ u32 *power_sync;
+
+ /** @hwrinfobuf: Pointer to CPU mapping of FW HWR info buffer. */
+ struct rogue_fwif_hwrinfobuf *hwrinfobuf;
+
+ /** @fw_trace: Device firmware trace buffer state. */
+ struct pvr_fw_trace fw_trace;
+
+ /** @fw_objs: Structure tracking FW objects. */
+ struct {
+ /** @list: Head of FW object list. */
+ struct list_head list;
+
+ /** @lock: Lock protecting access to FW object list. */
+ struct mutex lock;
+ } fw_objs;
+};
+
+#define pvr_fw_irq_read_reg(pvr_dev, name) \
+ pvr_cr_read32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg)
+
+#define pvr_fw_irq_write_reg(pvr_dev, name, value) \
+ pvr_cr_write32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg, value)
+
+#define pvr_fw_irq_pending(pvr_dev) \
+ (pvr_fw_irq_read_reg(pvr_dev, status) & (pvr_dev)->fw_dev.defs->irq.event_mask)
+
+#define pvr_fw_irq_clear(pvr_dev) \
+ pvr_fw_irq_write_reg(pvr_dev, clear, (pvr_dev)->fw_dev.defs->irq.clear_mask)
+
+#define pvr_fw_irq_enable(pvr_dev) \
+ pvr_fw_irq_write_reg(pvr_dev, enable, (pvr_dev)->fw_dev.defs->irq.event_mask)
+
+#define pvr_fw_irq_disable(pvr_dev) \
+ pvr_fw_irq_write_reg(pvr_dev, enable, 0)
+
+extern const struct pvr_fw_defs pvr_fw_defs_meta;
+extern const struct pvr_fw_defs pvr_fw_defs_mips;
+
+int pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev);
+int pvr_fw_init(struct pvr_device *pvr_dev);
+void pvr_fw_fini(struct pvr_device *pvr_dev);
+
+int pvr_wait_for_fw_boot(struct pvr_device *pvr_dev);
+
+int
+pvr_fw_hard_reset(struct pvr_device *pvr_dev);
+
+void pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val);
+
+void
+pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size);
+
+const struct pvr_fw_layout_entry *
+pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id);
+int
+pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw_code_ptr,
+ void *fw_data_ptr, void *fw_core_code_ptr, void *fw_core_data_ptr,
+ void **host_addr_out);
+
+int
+pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj,
+ u32 offset);
+
+int pvr_fw_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags,
+ void (*init)(void *cpu_ptr, void *priv), void *init_priv,
+ struct pvr_fw_object **pvr_obj_out);
+
+void *pvr_fw_object_create_and_map(struct pvr_device *pvr_dev, size_t size, u64 flags,
+ void (*init)(void *cpu_ptr, void *priv),
+ void *init_priv, struct pvr_fw_object **pvr_obj_out);
+
+void *
+pvr_fw_object_create_and_map_offset(struct pvr_device *pvr_dev, u32 dev_offset, size_t size,
+ u64 flags, void (*init)(void *cpu_ptr, void *priv),
+ void *init_priv, struct pvr_fw_object **pvr_obj_out);
+
+static __always_inline void *
+pvr_fw_object_vmap(struct pvr_fw_object *fw_obj)
+{
+ return pvr_gem_object_vmap(fw_obj->gem);
+}
+
+static __always_inline void
+pvr_fw_object_vunmap(struct pvr_fw_object *fw_obj)
+{
+ pvr_gem_object_vunmap(fw_obj->gem);
+}
+
+void pvr_fw_object_destroy(struct pvr_fw_object *fw_obj);
+
+static __always_inline void
+pvr_fw_object_unmap_and_destroy(struct pvr_fw_object *fw_obj)
+{
+ pvr_fw_object_vunmap(fw_obj);
+ pvr_fw_object_destroy(fw_obj);
+}
+
+/**
+ * pvr_fw_object_get_dma_addr() - Get DMA address for given offset in firmware
+ * object.
+ * @fw_obj: Pointer to object to lookup address in.
+ * @offset: Offset within object to lookup address at.
+ * @dma_addr_out: Pointer to location to store DMA address.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL if object is not currently backed, or if @offset is out of valid
+ * range for this object.
+ */
+static __always_inline int
+pvr_fw_object_get_dma_addr(struct pvr_fw_object *fw_obj, u32 offset, dma_addr_t *dma_addr_out)
+{
+ return pvr_gem_get_dma_addr(fw_obj->gem, offset, dma_addr_out);
+}
+
+void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, u32 *fw_addr_out);
+
+static __always_inline void
+pvr_fw_object_get_fw_addr(struct pvr_fw_object *fw_obj, u32 *fw_addr_out)
+{
+ pvr_fw_object_get_fw_addr_offset(fw_obj, 0, fw_addr_out);
+}
+
+#endif /* PVR_FW_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_info.h b/drivers/gpu/drm/imagination/pvr_fw_info.h
new file mode 100644
index 000000000000..c3639440610e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_info.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_INFO_H
+#define PVR_FW_INFO_H
+
+#include <linux/bits.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+/*
+ * Firmware binary block unit in bytes.
+ * Raw data stored in FW binary will be aligned to this size.
+ */
+#define FW_BLOCK_SIZE SZ_4K
+
+/* Maximum number of entries in firmware layout table. */
+#define PVR_FW_INFO_MAX_NUM_ENTRIES 8
+
+enum pvr_fw_section_id {
+ META_CODE = 0,
+ META_PRIVATE_DATA,
+ META_COREMEM_CODE,
+ META_COREMEM_DATA,
+ MIPS_CODE,
+ MIPS_EXCEPTIONS_CODE,
+ MIPS_BOOT_CODE,
+ MIPS_PRIVATE_DATA,
+ MIPS_BOOT_DATA,
+ MIPS_STACK,
+ RISCV_UNCACHED_CODE,
+ RISCV_CACHED_CODE,
+ RISCV_PRIVATE_DATA,
+ RISCV_COREMEM_CODE,
+ RISCV_COREMEM_DATA,
+};
+
+enum pvr_fw_section_type {
+ NONE = 0,
+ FW_CODE,
+ FW_DATA,
+ FW_COREMEM_CODE,
+ FW_COREMEM_DATA,
+};
+
+/*
+ * FW binary format with FW info attached:
+ *
+ * Contents Offset
+ * +-----------------+
+ * | | 0
+ * | |
+ * | Original binary |
+ * | file |
+ * | (.ldr/.elf) |
+ * | |
+ * | |
+ * +-----------------+
+ * | Device info | FILE_SIZE - 4K - device_info_size
+ * +-----------------+
+ * | FW info header | FILE_SIZE - 4K
+ * +-----------------+
+ * | |
+ * | FW layout table |
+ * | |
+ * +-----------------+
+ * FILE_SIZE
+ */
+
+#define PVR_FW_INFO_VERSION 3
+
+#define PVR_FW_FLAGS_OPEN_SOURCE BIT(0)
+
+/** struct pvr_fw_info_header - Firmware header */
+struct pvr_fw_info_header {
+ /** @info_version: FW info header version. */
+ u32 info_version;
+ /** @header_len: Header length. */
+ u32 header_len;
+ /** @layout_entry_num: Number of entries in the layout table. */
+ u32 layout_entry_num;
+ /** @layout_entry_size: Size of an entry in the layout table. */
+ u32 layout_entry_size;
+ /** @bvnc: GPU ID supported by firmware. */
+ aligned_u64 bvnc;
+ /** @fw_page_size: Page size of processor on which firmware executes. */
+ u32 fw_page_size;
+ /** @flags: Compatibility flags. */
+ u32 flags;
+ /** @fw_version_major: Firmware major version number. */
+ u16 fw_version_major;
+ /** @fw_version_minor: Firmware minor version number. */
+ u16 fw_version_minor;
+ /** @fw_version_build: Firmware build number. */
+ u32 fw_version_build;
+ /** @device_info_size: Size of device info structure. */
+ u32 device_info_size;
+ /** @padding: Padding. */
+ u32 padding;
+};
+
+/**
+ * struct pvr_fw_layout_entry - Entry in firmware layout table, describing a
+ * section of the firmware image
+ */
+struct pvr_fw_layout_entry {
+ /** @id: Section ID. */
+ enum pvr_fw_section_id id;
+ /** @type: Section type. */
+ enum pvr_fw_section_type type;
+ /** @base_addr: Base address of section in FW address space. */
+ u32 base_addr;
+ /** @max_size: Maximum size of section, in bytes. */
+ u32 max_size;
+ /** @alloc_size: Allocation size of section, in bytes. */
+ u32 alloc_size;
+ /** @alloc_offset: Allocation offset of section. */
+ u32 alloc_offset;
+};
+
+/**
+ * struct pvr_fw_device_info_header - Device information header.
+ */
+struct pvr_fw_device_info_header {
+ /** @brn_mask_size: BRN mask size (in u64s). */
+ u64 brn_mask_size;
+ /** @ern_mask_size: ERN mask size (in u64s). */
+ u64 ern_mask_size;
+ /** @feature_mask_size: Feature mask size (in u64s). */
+ u64 feature_mask_size;
+ /** @feature_param_size: Feature parameter size (in u64s). */
+ u64 feature_param_size;
+};
+
+#endif /* PVR_FW_INFO_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
new file mode 100644
index 000000000000..c39beb70c317
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_info.h"
+#include "pvr_fw_meta.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_rogue_meta.h"
+#include "pvr_vm.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define ROGUE_FW_HEAP_META_SHIFT 25 /* 32 MB */
+
+#define POLL_TIMEOUT_USEC 1000000
+
+/**
+ * pvr_meta_cr_read32() - Read a META register via the Slave Port
+ * @pvr_dev: Device pointer.
+ * @reg_addr: Address of register to read.
+ * @reg_value_out: Pointer to location to store register value.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_cr_poll_reg32().
+ */
+int
+pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out)
+{
+ int err;
+
+ /* Wait for Slave Port to be Ready. */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1,
+ ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+ ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+ ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ /* Issue a Read. */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL0,
+ reg_addr | ROGUE_CR_META_SP_MSLVCTRL0_RD_EN);
+ (void)pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL0); /* Fence write. */
+
+ /* Wait for Slave Port to be Ready. */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1,
+ ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+ ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+ ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ *reg_value_out = pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVDATAX);
+
+ return 0;
+}
+
+static int
+pvr_meta_wrapper_init(struct pvr_device *pvr_dev)
+{
+ u64 garten_config;
+
+ /* Configure META to Master boot. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_META_BOOT, ROGUE_CR_META_BOOT_MODE_EN);
+
+ /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address. */
+
+ /* Garten IDLE bit controlled by META. */
+ garten_config = ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
+
+ /* The fence addr is set during the fw init sequence. */
+
+ /* Set PC = 0 for fences. */
+ garten_config &=
+ ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK;
+ garten_config |=
+ (u64)MMU_CONTEXT_MAPPING_FWPRIV
+ << ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT;
+
+ /* Set SLC DM=META. */
+ garten_config |= ((u64)ROGUE_FW_SEGMMU_META_BIFDM_ID)
+ << ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT;
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG, garten_config);
+
+ return 0;
+}
+
+static __always_inline void
+add_boot_arg(u32 **boot_conf, u32 param, u32 data)
+{
+ *(*boot_conf)++ = param;
+ *(*boot_conf)++ = data;
+}
+
+static int
+meta_ldr_cmd_loadmem(struct drm_device *drm_dev, const u8 *fw,
+ struct rogue_meta_ldr_l1_data_blk *l1_data, u32 coremem_size, u8 *fw_code_ptr,
+ u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr, const u32 fw_size)
+{
+ struct rogue_meta_ldr_l2_data_blk *l2_block =
+ (struct rogue_meta_ldr_l2_data_blk *)(fw +
+ l1_data->cmd_data[1]);
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ u32 offset = l1_data->cmd_data[0];
+ u32 data_size;
+ void *write_addr;
+ int err;
+
+ /* Verify header is within bounds. */
+ if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size)
+ return -EINVAL;
+
+ data_size = l2_block->length - 6 /* L2 Tag length and checksum */;
+
+ /* Verify data is within bounds. */
+ if (((u8 *)l2_block->block_data - fw) >= fw_size ||
+ ((((u8 *)l2_block->block_data) + data_size) - fw) >= fw_size)
+ return -EINVAL;
+
+ if (!ROGUE_META_IS_COREMEM_CODE(offset, coremem_size) &&
+ !ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) {
+ /* Global range is aliased to local range */
+ offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+ }
+
+ err = pvr_fw_find_mmu_segment(pvr_dev, offset, data_size, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+ if (err) {
+ drm_err(drm_dev,
+ "Addr 0x%x (size: %d) not found in any firmware segment",
+ offset, data_size);
+ return err;
+ }
+
+ memcpy(write_addr, l2_block->block_data, data_size);
+
+ return 0;
+}
+
+static int
+meta_ldr_cmd_zeromem(struct drm_device *drm_dev,
+ struct rogue_meta_ldr_l1_data_blk *l1_data, u32 coremem_size,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ u32 offset = l1_data->cmd_data[0];
+ u32 byte_count = l1_data->cmd_data[1];
+ void *write_addr;
+ int err;
+
+ if (ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) {
+ /* cannot zero coremem directly */
+ return 0;
+ }
+
+ /* Global range is aliased to local range */
+ offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+ err = pvr_fw_find_mmu_segment(pvr_dev, offset, byte_count, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+ if (err) {
+ drm_err(drm_dev,
+ "Addr 0x%x (size: %d) not found in any firmware segment",
+ offset, byte_count);
+ return err;
+ }
+
+ memset(write_addr, 0, byte_count);
+
+ return 0;
+}
+
+static int
+meta_ldr_cmd_config(struct drm_device *drm_dev, const u8 *fw,
+ struct rogue_meta_ldr_l1_data_blk *l1_data,
+ const u32 fw_size, u32 **boot_conf_ptr)
+{
+ struct rogue_meta_ldr_l2_data_blk *l2_block =
+ (struct rogue_meta_ldr_l2_data_blk *)(fw +
+ l1_data->cmd_data[0]);
+ struct rogue_meta_ldr_cfg_blk *config_command;
+ u32 l2_block_size;
+ u32 curr_block_size = 0;
+ u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL;
+
+ /* Verify block header is within bounds. */
+ if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size)
+ return -EINVAL;
+
+ l2_block_size = l2_block->length - 6 /* L2 Tag length and checksum */;
+ config_command = (struct rogue_meta_ldr_cfg_blk *)l2_block->block_data;
+
+ if (((u8 *)config_command - fw) >= fw_size ||
+ ((((u8 *)config_command) + l2_block_size) - fw) >= fw_size)
+ return -EINVAL;
+
+ while (l2_block_size >= 12) {
+ if (config_command->type != ROGUE_META_LDR_CFG_WRITE)
+ return -EINVAL;
+
+ /*
+ * Only write to bootloader if we got a valid pointer to the FW
+ * code allocation.
+ */
+ if (boot_conf) {
+ u32 register_offset = config_command->block_data[0];
+ u32 register_value = config_command->block_data[1];
+
+ /* Do register write */
+ add_boot_arg(&boot_conf, register_offset,
+ register_value);
+ }
+
+ curr_block_size = 12;
+ l2_block_size -= curr_block_size;
+ config_command = (struct rogue_meta_ldr_cfg_blk
+ *)((uintptr_t)config_command +
+ curr_block_size);
+ }
+
+ if (boot_conf_ptr)
+ *boot_conf_ptr = boot_conf;
+
+ return 0;
+}
+
+/**
+ * process_ldr_command_stream() - Process LDR firmware image and populate
+ * firmware sections
+ * @pvr_dev: Device pointer.
+ * @fw: Pointer to firmware image.
+ * @fw_code_ptr: Pointer to FW code section.
+ * @fw_data_ptr: Pointer to FW data section.
+ * @fw_core_code_ptr: Pointer to FW coremem code section.
+ * @fw_core_data_ptr: Pointer to FW coremem data section.
+ * @boot_conf_ptr: Pointer to boot config argument pointer.
+ *
+ * Returns :
+ * * 0 on success, or
+ * * -EINVAL on any error in LDR command stream.
+ */
+static int
+process_ldr_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
+ u8 *fw_data_ptr, u8 *fw_core_code_ptr,
+ u8 *fw_core_data_ptr, u32 **boot_conf_ptr)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct rogue_meta_ldr_block_hdr *ldr_header =
+ (struct rogue_meta_ldr_block_hdr *)fw;
+ struct rogue_meta_ldr_l1_data_blk *l1_data =
+ (struct rogue_meta_ldr_l1_data_blk *)(fw + ldr_header->sl_data);
+ const u32 fw_size = pvr_dev->fw_dev.firmware->size;
+ int err;
+
+ u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL;
+ u32 coremem_size;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, meta_coremem_size, &coremem_size);
+ if (err)
+ return err;
+
+ coremem_size *= SZ_1K;
+
+ while (l1_data) {
+ /* Verify block header is within bounds. */
+ if (((u8 *)l1_data - fw) >= fw_size || ((u8 *)(l1_data + 1) - fw) >= fw_size)
+ return -EINVAL;
+
+ if (ROGUE_META_LDR_BLK_IS_COMMENT(l1_data->cmd)) {
+ /* Don't process comment blocks */
+ goto next_block;
+ }
+
+ switch (l1_data->cmd & ROGUE_META_LDR_CMD_MASK)
+ case ROGUE_META_LDR_CMD_LOADMEM: {
+ err = meta_ldr_cmd_loadmem(drm_dev, fw, l1_data,
+ coremem_size,
+ fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr,
+ fw_core_data_ptr, fw_size);
+ if (err)
+ return err;
+ break;
+
+ case ROGUE_META_LDR_CMD_START_THREADS:
+ /* Don't process this block */
+ break;
+
+ case ROGUE_META_LDR_CMD_ZEROMEM:
+ err = meta_ldr_cmd_zeromem(drm_dev, l1_data,
+ coremem_size,
+ fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr,
+ fw_core_data_ptr);
+ if (err)
+ return err;
+ break;
+
+ case ROGUE_META_LDR_CMD_CONFIG:
+ err = meta_ldr_cmd_config(drm_dev, fw, l1_data, fw_size,
+ &boot_conf);
+ if (err)
+ return err;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+next_block:
+ if (l1_data->next == 0xFFFFFFFF)
+ break;
+
+ l1_data = (struct rogue_meta_ldr_l1_data_blk *)(fw +
+ l1_data->next);
+ }
+
+ if (boot_conf_ptr)
+ *boot_conf_ptr = boot_conf;
+
+ return 0;
+}
+
+static void
+configure_seg_id(u64 seg_out_addr, u32 seg_base, u32 seg_limit, u32 seg_id,
+ u32 **boot_conf_ptr)
+{
+ u32 seg_out_addr0 = seg_out_addr & 0x00000000FFFFFFFFUL;
+ u32 seg_out_addr1 = (seg_out_addr >> 32) & 0x00000000FFFFFFFFUL;
+ u32 *boot_conf = *boot_conf_ptr;
+
+ /* META segments have a minimum size. */
+ u32 limit_off = max(seg_limit, ROGUE_FW_SEGMMU_ALIGN);
+
+ /* The limit is an offset, therefore off = size - 1. */
+ limit_off -= 1;
+
+ seg_base |= ROGUE_FW_SEGMMU_ALLTHRS_WRITEABLE;
+
+ add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_BASE(seg_id), seg_base);
+ add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_LIMIT(seg_id), limit_off);
+ add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA0(seg_id), seg_out_addr0);
+ add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA1(seg_id), seg_out_addr1);
+
+ *boot_conf_ptr = boot_conf;
+}
+
+static u64 get_fw_obj_gpu_addr(struct pvr_fw_object *fw_obj)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ return fw_obj->fw_addr_offset + fw_dev->fw_heap_info.gpu_addr;
+}
+
+static void
+configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr)
+{
+ const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
+ u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
+ u64 seg_out_addr_top;
+ u32 i;
+
+ seg_out_addr_top =
+ ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV,
+ ROGUE_FW_SEGMMU_META_BIFDM_ID);
+
+ for (i = 0; i < num_layout_entries; i++) {
+ /*
+ * FW code is using the bootloader segment which is already
+ * configured on boot. FW coremem code and data don't use the
+ * segment MMU. Only the FW data segment needs to be configured.
+ */
+ if (layout_entries[i].type == FW_DATA) {
+ u32 seg_id = ROGUE_FW_SEGMMU_DATA_ID;
+ u64 seg_out_addr = get_fw_obj_gpu_addr(pvr_dev->fw_dev.mem.data_obj);
+
+ seg_out_addr += layout_entries[i].alloc_offset;
+ seg_out_addr |= seg_out_addr_top;
+
+ /* Write the sequence to the bootldr. */
+ configure_seg_id(seg_out_addr,
+ layout_entries[i].base_addr,
+ layout_entries[i].alloc_size, seg_id,
+ boot_conf_ptr);
+
+ break;
+ }
+ }
+}
+
+static void
+configure_meta_caches(u32 **boot_conf_ptr)
+{
+ u32 *boot_conf = *boot_conf_ptr;
+ u32 d_cache_t0, i_cache_t0;
+ u32 d_cache_t1, i_cache_t1;
+ u32 d_cache_t2, i_cache_t2;
+ u32 d_cache_t3, i_cache_t3;
+
+ /* Initialise I/Dcache settings */
+ d_cache_t0 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ d_cache_t1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ d_cache_t2 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ d_cache_t3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ i_cache_t0 = 0;
+ i_cache_t1 = 0;
+ i_cache_t2 = 0;
+ i_cache_t3 = 0;
+
+ d_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+ i_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+
+ /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */
+ add_boot_arg(&boot_conf, META_CR_MMCU_LOCAL_EBCTRL,
+ META_CR_MMCU_LOCAL_EBCTRL_ICWIN |
+ META_CR_MMCU_LOCAL_EBCTRL_DCWIN);
+
+ /* Data cache partitioning thread 0 to 3 */
+ add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(0), d_cache_t0);
+ add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(1), d_cache_t1);
+ add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(2), d_cache_t2);
+ add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(3), d_cache_t3);
+
+ /* Enable data cache hits */
+ add_boot_arg(&boot_conf, META_CR_MMCU_DCACHE_CTRL,
+ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+ /* Instruction cache partitioning thread 0 to 3 */
+ add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(0), i_cache_t0);
+ add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(1), i_cache_t1);
+ add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(2), i_cache_t2);
+ add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(3), i_cache_t3);
+
+ /* Enable instruction cache hits */
+ add_boot_arg(&boot_conf, META_CR_MMCU_ICACHE_CTRL,
+ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+ add_boot_arg(&boot_conf, 0x040000C0, 0);
+
+ *boot_conf_ptr = boot_conf;
+}
+
+static int
+pvr_meta_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+ u32 core_code_alloc_size)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ u32 *boot_conf;
+ int err;
+
+ boot_conf = ((u32 *)fw_code_ptr) + ROGUE_FW_BOOTLDR_CONF_OFFSET;
+
+ /* Slave port and JTAG accesses are privileged. */
+ add_boot_arg(&boot_conf, META_CR_SYSC_JTAG_THREAD,
+ META_CR_SYSC_JTAG_THREAD_PRIV_EN);
+
+ configure_seg_mmu(pvr_dev, &boot_conf);
+
+ /* Populate FW sections from LDR image. */
+ err = process_ldr_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
+ fw_core_data_ptr, &boot_conf);
+ if (err)
+ return err;
+
+ configure_meta_caches(&boot_conf);
+
+ /* End argument list. */
+ add_boot_arg(&boot_conf, 0, 0);
+
+ if (fw_dev->mem.core_code_obj) {
+ u32 core_code_fw_addr;
+
+ pvr_fw_object_get_fw_addr(fw_dev->mem.core_code_obj, &core_code_fw_addr);
+ add_boot_arg(&boot_conf, core_code_fw_addr, core_code_alloc_size);
+ } else {
+ add_boot_arg(&boot_conf, 0, 0);
+ }
+ /* None of the cores supported by this driver have META DMA. */
+ add_boot_arg(&boot_conf, 0, 0);
+
+ return 0;
+}
+
+static int
+pvr_meta_init(struct pvr_device *pvr_dev)
+{
+ pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_META_SHIFT, 0);
+
+ return 0;
+}
+
+static u32
+pvr_meta_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
+{
+ u32 fw_addr = fw_obj->fw_addr_offset + offset + ROGUE_FW_SEGMMU_DATA_BASE_ADDRESS;
+
+ /* META cacheability is determined by address. */
+ if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
+ fw_addr |= ROGUE_FW_SEGMMU_DATA_META_UNCACHED |
+ ROGUE_FW_SEGMMU_DATA_VIVT_SLC_UNCACHED;
+
+ return fw_addr;
+}
+
+static int
+pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start,
+ pvr_gem_object_size(pvr_obj));
+}
+
+static void
+pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
+ fw_obj->fw_mm_node.size);
+}
+
+static bool
+pvr_meta_has_fixed_data_addr(void)
+{
+ return false;
+}
+
+const struct pvr_fw_defs pvr_fw_defs_meta = {
+ .init = pvr_meta_init,
+ .fw_process = pvr_meta_fw_process,
+ .vm_map = pvr_meta_vm_map,
+ .vm_unmap = pvr_meta_vm_unmap,
+ .get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset,
+ .wrapper_init = pvr_meta_wrapper_init,
+ .has_fixed_data_addr = pvr_meta_has_fixed_data_addr,
+ .irq = {
+ .enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE,
+ .status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
+ .clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
+ .event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN,
+ .clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK,
+ },
+};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.h b/drivers/gpu/drm/imagination/pvr_fw_meta.h
new file mode 100644
index 000000000000..911ad700cba6
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_META_H
+#define PVR_FW_META_H
+
+#include <linux/types.h>
+
+/* Forward declaration from pvr_device.h */
+struct pvr_device;
+
+int pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out);
+
+#endif /* PVR_FW_META_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.c b/drivers/gpu/drm/imagination/pvr_fw_mips.c
new file mode 100644
index 000000000000..0bed0257e2ab
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_mips.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_mips.h"
+#include "pvr_vm_mips.h"
+
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+#define ROGUE_FW_HEAP_MIPS_BASE 0xC0000000
+#define ROGUE_FW_HEAP_MIPS_SHIFT 24 /* 16 MB */
+#define ROGUE_FW_HEAP_MIPS_RESERVED_SIZE SZ_1M
+
+/**
+ * process_elf_command_stream() - Process ELF firmware image and populate
+ * firmware sections
+ * @pvr_dev: Device pointer.
+ * @fw: Pointer to firmware image.
+ * @fw_code_ptr: Pointer to FW code section.
+ * @fw_data_ptr: Pointer to FW data section.
+ * @fw_core_code_ptr: Pointer to FW coremem code section.
+ * @fw_core_data_ptr: Pointer to FW coremem data section.
+ *
+ * Returns :
+ * * 0 on success, or
+ * * -EINVAL on any error in ELF command stream.
+ */
+static int
+process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
+ u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
+{
+ struct elf32_hdr *header = (struct elf32_hdr *)fw;
+ struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ u32 entry;
+ int err;
+
+ for (entry = 0; entry < header->e_phnum; entry++, program_header++) {
+ void *write_addr;
+
+ /* Only consider loadable entries in the ELF segment table */
+ if (program_header->p_type != PT_LOAD)
+ continue;
+
+ err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
+ program_header->p_memsz, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+ if (err) {
+ drm_err(drm_dev,
+ "Addr 0x%x (size: %d) not found in any firmware segment",
+ program_header->p_vaddr, program_header->p_memsz);
+ return err;
+ }
+
+ /* Write to FW allocation only if available */
+ if (write_addr) {
+ memcpy(write_addr, fw + program_header->p_offset,
+ program_header->p_filesz);
+
+ memset((u8 *)write_addr + program_header->p_filesz, 0,
+ program_header->p_memsz - program_header->p_filesz);
+ }
+ }
+
+ return 0;
+}
+
+static int
+pvr_mips_init(struct pvr_device *pvr_dev)
+{
+ pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_MIPS_SHIFT, ROGUE_FW_HEAP_MIPS_RESERVED_SIZE);
+
+ return pvr_vm_mips_init(pvr_dev);
+}
+
+static void
+pvr_mips_fini(struct pvr_device *pvr_dev)
+{
+ pvr_vm_mips_fini(pvr_dev);
+}
+
+static int
+pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+ u32 core_code_alloc_size)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+ const struct pvr_fw_layout_entry *boot_code_entry;
+ const struct pvr_fw_layout_entry *boot_data_entry;
+ const struct pvr_fw_layout_entry *exception_code_entry;
+ const struct pvr_fw_layout_entry *stack_entry;
+ struct rogue_mipsfw_boot_data *boot_data;
+ dma_addr_t dma_addr;
+ u32 page_nr;
+ int err;
+
+ err = process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
+ fw_core_data_ptr);
+ if (err)
+ return err;
+
+ boot_code_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_BOOT_CODE);
+ boot_data_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_BOOT_DATA);
+ exception_code_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_EXCEPTIONS_CODE);
+ if (!boot_code_entry || !boot_data_entry || !exception_code_entry)
+ return -EINVAL;
+
+ WARN_ON(pvr_gem_get_dma_addr(fw_dev->mem.code_obj->gem, boot_code_entry->alloc_offset,
+ &mips_data->boot_code_dma_addr));
+ WARN_ON(pvr_gem_get_dma_addr(fw_dev->mem.data_obj->gem, boot_data_entry->alloc_offset,
+ &mips_data->boot_data_dma_addr));
+ WARN_ON(pvr_gem_get_dma_addr(fw_dev->mem.code_obj->gem,
+ exception_code_entry->alloc_offset,
+ &mips_data->exception_code_dma_addr));
+
+ stack_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_STACK);
+ if (!stack_entry)
+ return -EINVAL;
+
+ boot_data = (struct rogue_mipsfw_boot_data *)(fw_data_ptr + boot_data_entry->alloc_offset +
+ ROGUE_MIPSFW_BOOTLDR_CONF_OFFSET);
+
+ WARN_ON(pvr_fw_object_get_dma_addr(fw_dev->mem.data_obj, stack_entry->alloc_offset,
+ &dma_addr));
+ boot_data->stack_phys_addr = dma_addr;
+
+ boot_data->reg_base = pvr_dev->regs_resource->start;
+
+ for (page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
+ /* Firmware expects 4k pages, but host page size might be different. */
+ u32 src_page_nr = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) >> PAGE_SHIFT;
+ u32 page_offset = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) & ~PAGE_MASK;
+
+ boot_data->pt_phys_addr[page_nr] = mips_data->pt_dma_addr[src_page_nr] +
+ page_offset;
+ }
+
+ boot_data->pt_log2_page_size = ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+ boot_data->pt_num_pages = ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES;
+ boot_data->reserved1 = 0;
+ boot_data->reserved2 = 0;
+
+ return 0;
+}
+
+static int
+pvr_mips_wrapper_init(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_mips_data *mips_data = pvr_dev->fw_dev.processor_data.mips_data;
+ const u64 remap_settings = ROGUE_MIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE;
+ u32 phys_bus_width;
+
+ int err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
+
+ if (WARN_ON(err))
+ return err;
+
+ /* Currently MIPS FW only supported with physical bus width > 32 bits. */
+ if (WARN_ON(phys_bus_width <= 32))
+ return -EINVAL;
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_CONFIG,
+ (ROGUE_MIPSFW_REGISTERS_VIRTUAL_BASE >>
+ ROGUE_MIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) |
+ ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+
+ /* Configure remap for boot code, boot data and exceptions code areas. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1,
+ ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN |
+ ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN);
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2,
+ (mips_data->boot_code_dma_addr &
+ ~ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings);
+
+ if (PVR_HAS_QUIRK(pvr_dev, 63553)) {
+ /*
+ * WA always required on 36 bit cores, to avoid continuous unmapped memory accesses
+ * to address 0x0.
+ */
+ WARN_ON(phys_bus_width != 36);
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1,
+ ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN);
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2,
+ (mips_data->boot_code_dma_addr &
+ ~ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK) |
+ remap_settings);
+ }
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1,
+ ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN |
+ ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN);
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2,
+ (mips_data->boot_data_dma_addr &
+ ~ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings);
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1,
+ ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN |
+ ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN);
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2,
+ (mips_data->exception_code_dma_addr &
+ ~ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings);
+
+ /* Garten IDLE bit controlled by MIPS. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG,
+ ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+ /* Turn on the EJTAG probe. */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_DEBUG_CONFIG, 0);
+
+ return 0;
+}
+
+static u32
+pvr_mips_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
+
+ /* MIPS cacheability is determined by page table. */
+ return ((fw_obj->fw_addr_offset + offset) & pvr_dev->fw_dev.fw_heap_info.offset_mask) |
+ ROGUE_FW_HEAP_MIPS_BASE;
+}
+
+static bool
+pvr_mips_has_fixed_data_addr(void)
+{
+ return true;
+}
+
+const struct pvr_fw_defs pvr_fw_defs_mips = {
+ .init = pvr_mips_init,
+ .fini = pvr_mips_fini,
+ .fw_process = pvr_mips_fw_process,
+ .vm_map = pvr_vm_mips_map,
+ .vm_unmap = pvr_vm_mips_unmap,
+ .get_fw_addr_with_offset = pvr_mips_get_fw_addr_with_offset,
+ .wrapper_init = pvr_mips_wrapper_init,
+ .has_fixed_data_addr = pvr_mips_has_fixed_data_addr,
+ .irq = {
+ .enable_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE,
+ .status_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS,
+ .clear_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
+ .event_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN,
+ .clear_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN,
+ },
+};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.h b/drivers/gpu/drm/imagination/pvr_fw_mips.h
new file mode 100644
index 000000000000..408dbe63a90c
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_MIPS_H
+#define PVR_FW_MIPS_H
+
+#include "pvr_rogue_mips.h"
+
+#include <asm/page.h>
+#include <linux/types.h>
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_gem_object;
+
+#define PVR_MIPS_PT_PAGE_COUNT ((ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K) \
+ >> PAGE_SHIFT)
+/**
+ * struct pvr_fw_mips_data - MIPS-specific data
+ */
+struct pvr_fw_mips_data {
+ /**
+ * @pt_pages: Pages containing MIPS pagetable.
+ */
+ struct page *pt_pages[PVR_MIPS_PT_PAGE_COUNT];
+
+ /** @pt: Pointer to CPU mapping of MIPS pagetable. */
+ u32 *pt;
+
+ /** @pt_dma_addr: DMA mappings of MIPS pagetable. */
+ dma_addr_t pt_dma_addr[PVR_MIPS_PT_PAGE_COUNT];
+
+ /** @boot_code_dma_addr: DMA address of MIPS boot code. */
+ dma_addr_t boot_code_dma_addr;
+
+ /** @boot_data_dma_addr: DMA address of MIPS boot data. */
+ dma_addr_t boot_data_dma_addr;
+
+ /** @exception_code_dma_addr: DMA address of MIPS exception code. */
+ dma_addr_t exception_code_dma_addr;
+
+ /** @cache_policy: Cache policy for this processor. */
+ u32 cache_policy;
+
+ /** @pfn_mask: PFN mask for MIPS pagetable. */
+ u32 pfn_mask;
+};
+
+#endif /* PVR_FW_MIPS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.c b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
new file mode 100644
index 000000000000..36cec227cfe3
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_meta.h"
+#include "pvr_fw_startstop.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_rogue_meta.h"
+#include "pvr_vm.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define POLL_TIMEOUT_USEC 1000000
+
+static void
+rogue_axi_ace_list_init(struct pvr_device *pvr_dev)
+{
+ /* Setup AXI-ACE config. Set everything to outer cache. */
+ u64 reg_val =
+ (3U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) |
+ (3U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT);
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_AXI_ACE_LITE_CONFIGURATION, reg_val);
+}
+
+static void
+rogue_bif_init(struct pvr_device *pvr_dev)
+{
+ dma_addr_t pc_dma_addr;
+ u64 pc_addr;
+
+ /* Acquire the address of the Kernel Page Catalogue. */
+ pc_dma_addr = pvr_vm_get_page_table_root_addr(pvr_dev->kernel_vm_ctx);
+
+ /* Write the kernel catalogue base. */
+ pc_addr = ((((u64)pc_dma_addr >> ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT)
+ << ROGUE_CR_BIF_CAT_BASE0_ADDR_SHIFT) &
+ ~ROGUE_CR_BIF_CAT_BASE0_ADDR_CLRMSK);
+
+ pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV),
+ pc_addr);
+}
+
+static int
+rogue_slc_init(struct pvr_device *pvr_dev)
+{
+ u16 slc_cache_line_size_bits;
+ u32 reg_val;
+ int err;
+
+ /*
+ * SLC Misc control.
+ *
+ * Note: This is a 64bit register and we set only the lower 32bits
+ * leaving the top 32bits (ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS)
+ * unchanged from the HW default.
+ */
+ reg_val = (pvr_cr_read32(pvr_dev, ROGUE_CR_SLC_CTRL_MISC) &
+ ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN) |
+ ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_bits, &slc_cache_line_size_bits);
+ if (err)
+ return err;
+
+ /* Bypass burst combiner if SLC line size is smaller than 1024 bits. */
+ if (slc_cache_line_size_bits < 1024)
+ reg_val |= ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
+
+ if (PVR_HAS_QUIRK(pvr_dev, 71242) && !PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support))
+ reg_val |= ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN;
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_SLC_CTRL_MISC, reg_val);
+
+ return 0;
+}
+
+/**
+ * pvr_fw_start() - Start FW processor and boot firmware
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by rogue_slc_init().
+ */
+int
+pvr_fw_start(struct pvr_device *pvr_dev)
+{
+ bool has_reset2 = PVR_HAS_FEATURE(pvr_dev, xe_tpu2);
+ u64 soft_reset_mask;
+ int err;
+
+ if (PVR_HAS_FEATURE(pvr_dev, pbe2_in_xe))
+ soft_reset_mask = ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL;
+ else
+ soft_reset_mask = ROGUE_CR_SOFT_RESET_MASKFULL;
+
+ if (PVR_HAS_FEATURE(pvr_dev, sys_bus_secure_reset)) {
+ /*
+ * Disable the default sys_bus_secure protection to perform
+ * minimal setup.
+ */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE, 0);
+ (void)pvr_cr_read32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE); /* Fence write */
+ }
+
+ /* Set Rogue in soft-reset. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
+ if (has_reset2)
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, ROGUE_CR_SOFT_RESET2_MASKFULL);
+
+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline. */
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
+ if (has_reset2)
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2);
+
+ /* Take Rascal and Dust out of reset. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET,
+ soft_reset_mask ^ ROGUE_CR_SOFT_RESET_RASCALDUSTS_EN);
+ if (has_reset2)
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, 0);
+
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
+ if (has_reset2)
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2);
+
+ /* Take everything out of reset but the FW processor. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, ROGUE_CR_SOFT_RESET_GARTEN_EN);
+ if (has_reset2)
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, 0);
+
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
+ if (has_reset2)
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2);
+
+ err = rogue_slc_init(pvr_dev);
+ if (err)
+ goto err_reset;
+
+ /* Initialise Firmware wrapper. */
+ pvr_dev->fw_dev.defs->wrapper_init(pvr_dev);
+
+ /* We must init the AXI-ACE interface before first BIF transaction. */
+ rogue_axi_ace_list_init(pvr_dev);
+
+ if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
+ /* Initialise BIF. */
+ rogue_bif_init(pvr_dev);
+ }
+
+ /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */
+ udelay(3);
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, 0x0);
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
+
+ /* ... and afterwards. */
+ udelay(3);
+
+ return 0;
+
+err_reset:
+ /* Put everything back into soft-reset. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
+
+ return err;
+}
+
+/**
+ * pvr_fw_stop() - Stop FW processor
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_cr_poll_reg32().
+ */
+int
+pvr_fw_stop(struct pvr_device *pvr_dev)
+{
+ const u32 sidekick_idle_mask = ROGUE_CR_SIDEKICK_IDLE_MASKFULL &
+ ~(ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN |
+ ROGUE_CR_SIDEKICK_IDLE_SOCIF_EN |
+ ROGUE_CR_SIDEKICK_IDLE_HOSTIF_EN);
+ bool skip_garten_idle = false;
+ u32 reg_value;
+ int err;
+
+ /*
+ * Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper.
+ * For cores with the LAYOUT_MARS feature, SIDEKICK would have been
+ * powered down by the FW.
+ */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, sidekick_idle_mask,
+ sidekick_idle_mask, POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ /* Unset MTS DM association with threads. */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC,
+ ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL &
+ ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK);
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC,
+ ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL &
+ ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK);
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC,
+ ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL &
+ ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK);
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC,
+ ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL &
+ ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK);
+
+ /* Extra Idle checks. */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIF_STATUS_MMU, 0,
+ ROGUE_CR_BIF_STATUS_MMU_MASKFULL,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIFPM_STATUS_MMU, 0,
+ ROGUE_CR_BIFPM_STATUS_MMU_MASKFULL,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ if (!PVR_HAS_FEATURE(pvr_dev, xt_top_infrastructure)) {
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIF_READS_EXT_STATUS, 0,
+ ROGUE_CR_BIF_READS_EXT_STATUS_MASKFULL,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+ }
+
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIFPM_READS_EXT_STATUS, 0,
+ ROGUE_CR_BIFPM_READS_EXT_STATUS_MASKFULL,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ err = pvr_cr_poll_reg64(pvr_dev, ROGUE_CR_SLC_STATUS1, 0,
+ ROGUE_CR_SLC_STATUS1_MASKFULL,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ /*
+ * Wait for SLC to signal IDLE.
+ * For cores with the LAYOUT_MARS feature, SLC would have been powered
+ * down by the FW.
+ */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SLC_IDLE,
+ ROGUE_CR_SLC_IDLE_MASKFULL,
+ ROGUE_CR_SLC_IDLE_MASKFULL, POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ /*
+ * Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper.
+ * For cores with the LAYOUT_MARS feature, SIDEKICK would have been powered
+ * down by the FW.
+ */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, sidekick_idle_mask,
+ sidekick_idle_mask, POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_META) {
+ err = pvr_meta_cr_read32(pvr_dev, META_CR_TxVECINT_BHALT, &reg_value);
+ if (err)
+ return err;
+
+ /*
+ * Wait for Sidekick/Jones to signal IDLE including the Garten
+ * Wrapper if there is no debugger attached (TxVECINT_BHALT =
+ * 0x0).
+ */
+ if (reg_value)
+ skip_garten_idle = true;
+ }
+
+ if (!skip_garten_idle) {
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE,
+ ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN,
+ ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+ }
+
+ if (PVR_HAS_FEATURE(pvr_dev, pbe2_in_xe))
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET,
+ ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL);
+ else
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, ROGUE_CR_SOFT_RESET_MASKFULL);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.h b/drivers/gpu/drm/imagination/pvr_fw_startstop.h
new file mode 100644
index 000000000000..a3cef061bd60
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_STARTSTOP_H
+#define PVR_FW_STARTSTOP_H
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+int pvr_fw_start(struct pvr_device *pvr_dev);
+int pvr_fw_stop(struct pvr_device *pvr_dev);
+
+#endif /* PVR_FW_STARTSTOP_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
new file mode 100644
index 000000000000..31199e45b72e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_sf.h"
+#include "pvr_fw_trace.h"
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+
+#include <linux/build_bug.h>
+#include <linux/dcache.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+static void
+tracebuf_ctrl_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr;
+ struct pvr_fw_trace *fw_trace = priv;
+ u32 thread_nr;
+
+ tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
+ tracebuf_ctrl->tracebuf_flags = 0;
+
+ if (fw_trace->group_mask)
+ tracebuf_ctrl->log_type = fw_trace->group_mask | ROGUE_FWIF_LOG_TYPE_TRACE;
+ else
+ tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
+
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ struct rogue_fwif_tracebuf_space *tracebuf_space =
+ &tracebuf_ctrl->tracebuf[thread_nr];
+ struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+ pvr_fw_object_get_fw_addr(trace_buffer->buf_obj,
+ &tracebuf_space->trace_buffer_fw_addr);
+
+ tracebuf_space->trace_buffer = trace_buffer->buf;
+ tracebuf_space->trace_pointer = 0;
+ }
+}
+
+int pvr_fw_trace_init(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ u32 thread_nr;
+ int err;
+
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+ trace_buffer->buf =
+ pvr_fw_object_create_and_map(pvr_dev,
+ ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS *
+ sizeof(*trace_buffer->buf),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+ PVR_BO_FW_NO_CLEAR_ON_RESET,
+ NULL, NULL, &trace_buffer->buf_obj);
+ if (IS_ERR(trace_buffer->buf)) {
+ drm_err(drm_dev, "Unable to allocate trace buffer\n");
+ err = PTR_ERR(trace_buffer->buf);
+ trace_buffer->buf = NULL;
+ goto err_free_buf;
+ }
+ }
+
+ /* TODO: Provide control of group mask. */
+ fw_trace->group_mask = 0;
+
+ fw_trace->tracebuf_ctrl =
+ pvr_fw_object_create_and_map(pvr_dev,
+ sizeof(*fw_trace->tracebuf_ctrl),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+ PVR_BO_FW_NO_CLEAR_ON_RESET,
+ tracebuf_ctrl_init, fw_trace,
+ &fw_trace->tracebuf_ctrl_obj);
+ if (IS_ERR(fw_trace->tracebuf_ctrl)) {
+ drm_err(drm_dev, "Unable to allocate trace buffer control structure\n");
+ err = PTR_ERR(fw_trace->tracebuf_ctrl);
+ goto err_free_buf;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
+ ARRAY_SIZE(fw_trace->buffers));
+
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ struct rogue_fwif_tracebuf_space *tracebuf_space =
+ &fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
+ struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+ trace_buffer->tracebuf_space = tracebuf_space;
+ }
+
+ return 0;
+
+err_free_buf:
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+ if (trace_buffer->buf)
+ pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
+ }
+
+ return err;
+}
+
+void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+ u32 thread_nr;
+
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+ pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
+ }
+ pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * update_logtype() - Send KCCB command to trigger FW to update logtype
+ * @pvr_dev: Target PowerVR device
+ * @group_mask: New log group mask.
+ *
+ * Returns:
+ * * 0 on success,
+ * * Any error returned by pvr_kccb_send_cmd(), or
+ * * -%EIO if the device is lost.
+ */
+static int
+update_logtype(struct pvr_device *pvr_dev, u32 group_mask)
+{
+ struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+ struct rogue_fwif_kccb_cmd cmd;
+ int idx;
+ int err;
+
+ if (group_mask)
+ fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask;
+ else
+ fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
+
+ fw_trace->group_mask = group_mask;
+
+ down_read(&pvr_dev->reset_sem);
+ if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) {
+ err = -EIO;
+ goto err_up_read;
+ }
+
+ cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE;
+ cmd.kccb_flags = 0;
+
+ err = pvr_kccb_send_cmd(pvr_dev, &cmd, NULL);
+
+ drm_dev_exit(idx);
+
+err_up_read:
+ up_read(&pvr_dev->reset_sem);
+
+ return err;
+}
+
+struct pvr_fw_trace_seq_data {
+ /** @buffer: Pointer to copy of trace data. */
+ u32 *buffer;
+
+ /** @start_offset: Starting offset in trace data, as reported by FW. */
+ u32 start_offset;
+
+ /** @idx: Current index into trace data. */
+ u32 idx;
+
+ /** @assert_buf: Trace assert buffer, as reported by FW. */
+ struct rogue_fwif_file_info_buf assert_buf;
+};
+
+static u32 find_sfid(u32 id)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
+ if (stid_fmts[i].id == id)
+ return i;
+ }
+
+ return ROGUE_FW_SF_LAST;
+}
+
+static u32 read_fw_trace(struct pvr_fw_trace_seq_data *trace_seq_data, u32 offset)
+{
+ u32 idx;
+
+ idx = trace_seq_data->idx + offset;
+ if (idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
+ return 0;
+
+ idx = (idx + trace_seq_data->start_offset) % ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
+ return trace_seq_data->buffer[idx];
+}
+
+/**
+ * fw_trace_get_next() - Advance trace index to next entry
+ * @trace_seq_data: Trace sequence data.
+ *
+ * Returns:
+ * * %true if trace index is now pointing to a valid entry, or
+ * * %false if trace index is pointing to an invalid entry, or has hit the end
+ * of the trace.
+ */
+static bool fw_trace_get_next(struct pvr_fw_trace_seq_data *trace_seq_data)
+{
+ u32 id, sf_id;
+
+ while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
+ id = read_fw_trace(trace_seq_data, 0);
+ trace_seq_data->idx++;
+ if (!ROGUE_FW_LOG_VALIDID(id))
+ continue;
+ if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
+ /* Assertion failure marks the end of the trace. */
+ return false;
+ }
+
+ sf_id = find_sfid(id);
+ if (sf_id == ROGUE_FW_SF_FIRST)
+ continue;
+ if (sf_id == ROGUE_FW_SF_LAST) {
+ /*
+ * Could not match with an ID in the SF table, trace is
+ * most likely corrupt from this point.
+ */
+ return false;
+ }
+
+ /* Skip over the timestamp, and any parameters. */
+ trace_seq_data->idx += 2 + ROGUE_FW_SF_PARAMNUM(id);
+
+ /* Ensure index is now pointing to a valid trace entry. */
+ id = read_fw_trace(trace_seq_data, 0);
+ if (!ROGUE_FW_LOG_VALIDID(id))
+ continue;
+
+ return true;
+ }
+
+ /* Hit end of trace data. */
+ return false;
+}
+
+/**
+ * fw_trace_get_first() - Find first valid entry in trace
+ * @trace_seq_data: Trace sequence data.
+ *
+ * Skips over invalid (usually zero) and ROGUE_FW_SF_FIRST entries.
+ *
+ * If the trace has no valid entries, this function will exit with the trace
+ * index pointing to the end of the trace. trace_seq_show() will return an error
+ * in this state.
+ */
+static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
+{
+ trace_seq_data->idx = 0;
+
+ while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
+ u32 id = read_fw_trace(trace_seq_data, 0);
+
+ if (ROGUE_FW_LOG_VALIDID(id)) {
+ u32 sf_id = find_sfid(id);
+
+ if (sf_id != ROGUE_FW_SF_FIRST)
+ break;
+ }
+ trace_seq_data->idx++;
+ }
+}
+
+static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
+ u32 i;
+
+ /* Reset trace index, then advance to *pos. */
+ fw_trace_get_first(trace_seq_data);
+
+ for (i = 0; i < *pos; i++) {
+ if (!fw_trace_get_next(trace_seq_data))
+ return NULL;
+ }
+
+ return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
+}
+
+static void *fw_trace_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
+
+ (*pos)++;
+ if (!fw_trace_get_next(trace_seq_data))
+ return NULL;
+
+ return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
+}
+
+static void fw_trace_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int fw_trace_seq_show(struct seq_file *s, void *v)
+{
+ struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
+ u64 timestamp;
+ u32 id;
+ u32 sf_id;
+
+ if (trace_seq_data->idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
+ return -EINVAL;
+
+ id = read_fw_trace(trace_seq_data, 0);
+ /* Index is not pointing at a valid entry. */
+ if (!ROGUE_FW_LOG_VALIDID(id))
+ return -EINVAL;
+
+ sf_id = find_sfid(id);
+ /* Index is not pointing at a valid entry. */
+ if (sf_id == ROGUE_FW_SF_LAST)
+ return -EINVAL;
+
+ timestamp = read_fw_trace(trace_seq_data, 1) |
+ ((u64)read_fw_trace(trace_seq_data, 2) << 32);
+ timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
+ ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
+
+ seq_printf(s, "[%llu] : ", timestamp);
+ if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
+ seq_printf(s, "ASSERTION %s failed at %s:%u",
+ trace_seq_data->assert_buf.info,
+ trace_seq_data->assert_buf.path,
+ trace_seq_data->assert_buf.line_num);
+ } else {
+ seq_printf(s, stid_fmts[sf_id].name,
+ read_fw_trace(trace_seq_data, 3),
+ read_fw_trace(trace_seq_data, 4),
+ read_fw_trace(trace_seq_data, 5),
+ read_fw_trace(trace_seq_data, 6),
+ read_fw_trace(trace_seq_data, 7),
+ read_fw_trace(trace_seq_data, 8),
+ read_fw_trace(trace_seq_data, 9),
+ read_fw_trace(trace_seq_data, 10),
+ read_fw_trace(trace_seq_data, 11),
+ read_fw_trace(trace_seq_data, 12),
+ read_fw_trace(trace_seq_data, 13),
+ read_fw_trace(trace_seq_data, 14),
+ read_fw_trace(trace_seq_data, 15),
+ read_fw_trace(trace_seq_data, 16),
+ read_fw_trace(trace_seq_data, 17),
+ read_fw_trace(trace_seq_data, 18),
+ read_fw_trace(trace_seq_data, 19),
+ read_fw_trace(trace_seq_data, 20),
+ read_fw_trace(trace_seq_data, 21),
+ read_fw_trace(trace_seq_data, 22));
+ }
+ seq_puts(s, "\n");
+ return 0;
+}
+
+static const struct seq_operations pvr_fw_trace_seq_ops = {
+ .start = fw_trace_seq_start,
+ .next = fw_trace_seq_next,
+ .stop = fw_trace_seq_stop,
+ .show = fw_trace_seq_show
+};
+
+static int fw_trace_open(struct inode *inode, struct file *file)
+{
+ struct pvr_fw_trace_buffer *trace_buffer = inode->i_private;
+ struct rogue_fwif_tracebuf_space *tracebuf_space =
+ trace_buffer->tracebuf_space;
+ struct pvr_fw_trace_seq_data *trace_seq_data;
+ int err;
+
+ trace_seq_data = kzalloc(sizeof(*trace_seq_data), GFP_KERNEL);
+ if (!trace_seq_data)
+ return -ENOMEM;
+
+ trace_seq_data->buffer = kcalloc(ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS,
+ sizeof(*trace_seq_data->buffer), GFP_KERNEL);
+ if (!trace_seq_data->buffer) {
+ err = -ENOMEM;
+ goto err_free_data;
+ }
+
+ /*
+ * Take a local copy of the trace buffer, as firmware may still be
+ * writing to it. This will exist as long as this file is open.
+ */
+ memcpy(trace_seq_data->buffer, trace_buffer->buf,
+ ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS * sizeof(u32));
+ trace_seq_data->start_offset = READ_ONCE(tracebuf_space->trace_pointer);
+ trace_seq_data->assert_buf = tracebuf_space->assert_buf;
+ fw_trace_get_first(trace_seq_data);
+
+ err = seq_open(file, &pvr_fw_trace_seq_ops);
+ if (err)
+ goto err_free_buffer;
+
+ ((struct seq_file *)file->private_data)->private = trace_seq_data;
+
+ return 0;
+
+err_free_buffer:
+ kfree(trace_seq_data->buffer);
+
+err_free_data:
+ kfree(trace_seq_data);
+
+ return err;
+}
+
+static int fw_trace_release(struct inode *inode, struct file *file)
+{
+ struct pvr_fw_trace_seq_data *trace_seq_data =
+ ((struct seq_file *)file->private_data)->private;
+
+ seq_release(inode, file);
+ kfree(trace_seq_data->buffer);
+ kfree(trace_seq_data);
+
+ return 0;
+}
+
+static const struct file_operations pvr_fw_trace_fops = {
+ .owner = THIS_MODULE,
+ .open = fw_trace_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = fw_trace_release,
+};
+
+void
+pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask)
+{
+ if (old_mask != new_mask)
+ update_logtype(pvr_dev, new_mask);
+}
+
+void
+pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
+{
+ struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+ u32 thread_nr;
+
+ static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
+ "The filename buffer is only large enough for a single-digit thread count");
+
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
+ char filename[8];
+
+ snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
+ debugfs_create_file(filename, 0400, dir,
+ &fw_trace->buffers[thread_nr],
+ &pvr_fw_trace_fops);
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.h b/drivers/gpu/drm/imagination/pvr_fw_trace.h
new file mode 100644
index 000000000000..0074d2b18da0
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_TRACE_H
+#define PVR_FW_TRACE_H
+
+#include <drm/drm_file.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_fwif.h"
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/* Forward declarations from pvr_rogue_fwif.h */
+struct rogue_fwif_tracebuf;
+struct rogue_fwif_tracebuf_space;
+
+/**
+ * struct pvr_fw_trace_buffer - Structure representing a trace buffer
+ */
+struct pvr_fw_trace_buffer {
+ /** @buf_obj: FW buffer object representing trace buffer. */
+ struct pvr_fw_object *buf_obj;
+
+ /** @buf: Pointer to CPU mapping of trace buffer. */
+ u32 *buf;
+
+ /**
+ * @tracebuf_space: Pointer to FW tracebuf_space structure for this
+ * trace buffer.
+ */
+ struct rogue_fwif_tracebuf_space *tracebuf_space;
+};
+
+/**
+ * struct pvr_fw_trace - Device firmware trace data
+ */
+struct pvr_fw_trace {
+ /**
+ * @tracebuf_ctrl_obj: Object representing FW trace buffer control
+ * structure.
+ */
+ struct pvr_fw_object *tracebuf_ctrl_obj;
+
+ /**
+ * @tracebuf_ctrl: Pointer to CPU mapping of FW trace buffer control
+ * structure.
+ */
+ struct rogue_fwif_tracebuf *tracebuf_ctrl;
+
+ /**
+ * @buffers: Array representing the actual trace buffers owned by this
+ * device.
+ */
+ struct pvr_fw_trace_buffer buffers[ROGUE_FW_THREAD_MAX];
+
+ /** @group_mask: Mask of enabled trace groups. */
+ u32 group_mask;
+};
+
+int pvr_fw_trace_init(struct pvr_device *pvr_dev);
+void pvr_fw_trace_fini(struct pvr_device *pvr_dev);
+
+#if defined(CONFIG_DEBUG_FS)
+/* Forward declaration from <linux/dcache.h>. */
+struct dentry;
+
+void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask,
+ u32 new_mask);
+
+void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* PVR_FW_TRACE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
new file mode 100644
index 000000000000..6a8c81fe8c1e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_gem.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
+
+#include <linux/compiler.h>
+#include <linux/compiler_attributes.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/iosys-map.h>
+#include <linux/log2.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+
+static void pvr_gem_object_free(struct drm_gem_object *obj)
+{
+ drm_gem_shmem_object_free(obj);
+}
+
+static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
+{
+ struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
+ struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
+
+ if (!(pvr_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS))
+ return -EINVAL;
+
+ return drm_gem_shmem_mmap(shmem_obj, vma);
+}
+
+static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
+ .free = pvr_gem_object_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = pvr_gem_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * pvr_gem_object_flags_validate() - Verify that a collection of PowerVR GEM
+ * mapping and/or creation flags form a valid combination.
+ * @flags: PowerVR GEM mapping/creation flags to validate.
+ *
+ * This function explicitly allows kernel-only flags. All ioctl entrypoints
+ * should do their own validation as well as relying on this function.
+ *
+ * Return:
+ * * %true if @flags contains valid mapping and/or creation flags, or
+ * * %false otherwise.
+ */
+static bool
+pvr_gem_object_flags_validate(u64 flags)
+{
+ static const u64 invalid_combinations[] = {
+ /*
+ * Memory flagged as PM/FW-protected cannot be mapped to
+ * userspace. To make this explicit, we require that the two
+ * flags allowing each of these respective features are never
+ * specified together.
+ */
+ (DRM_PVR_BO_PM_FW_PROTECT |
+ DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS),
+ };
+
+ int i;
+
+ /*
+ * Check for bits set in undefined regions. Reserved regions refer to
+ * options that can only be set by the kernel. These are explicitly
+ * allowed in most cases, and must be checked specifically in IOCTL
+ * callback code.
+ */
+ if ((flags & PVR_BO_UNDEFINED_MASK) != 0)
+ return false;
+
+ /*
+ * Check for all combinations of flags marked as invalid in the array
+ * above.
+ */
+ for (i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
+ u64 combo = invalid_combinations[i];
+
+ if ((flags & combo) == combo)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * pvr_gem_object_into_handle() - Convert a reference to an object into a
+ * userspace-accessible handle.
+ * @pvr_obj: [IN] Target PowerVR-specific object.
+ * @pvr_file: [IN] File to associate the handle with.
+ * @handle: [OUT] Pointer to store the created handle in. Remains unmodified if
+ * an error is encountered.
+ *
+ * If an error is encountered, ownership of @pvr_obj will not have been
+ * transferred. If this function succeeds, however, further use of @pvr_obj is
+ * considered undefined behaviour unless another reference to it is explicitly
+ * held.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while attempting to allocate a handle on @pvr_file.
+ */
+int
+pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj,
+ struct pvr_file *pvr_file, u32 *handle)
+{
+ struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
+ struct drm_file *file = from_pvr_file(pvr_file);
+
+ u32 new_handle;
+ int err;
+
+ err = drm_gem_handle_create(file, gem_obj, &new_handle);
+ if (err)
+ return err;
+
+ /*
+ * Release our reference to @pvr_obj, effectively transferring
+ * ownership to the handle.
+ */
+ pvr_gem_object_put(pvr_obj);
+
+ /*
+ * Do not store the new handle in @handle until no more errors can
+ * occur.
+ */
+ *handle = new_handle;
+
+ return 0;
+}
+
+/**
+ * pvr_gem_object_from_handle() - Obtain a reference to an object from a
+ * userspace handle.
+ * @pvr_file: PowerVR-specific file to which @handle is associated.
+ * @handle: Userspace handle referencing the target object.
+ *
+ * On return, @handle always maintains its reference to the requested object
+ * (if it had one in the first place). If this function succeeds, the returned
+ * object will hold an additional reference. When the caller is finished with
+ * the returned object, they should call pvr_gem_object_put() on it to release
+ * this reference.
+ *
+ * Return:
+ * * A pointer to the requested PowerVR-specific object on success, or
+ * * %NULL otherwise.
+ */
+struct pvr_gem_object *
+pvr_gem_object_from_handle(struct pvr_file *pvr_file, u32 handle)
+{
+ struct drm_file *file = from_pvr_file(pvr_file);
+ struct drm_gem_object *gem_obj;
+
+ gem_obj = drm_gem_object_lookup(file, handle);
+ if (!gem_obj)
+ return NULL;
+
+ return gem_to_pvr_gem(gem_obj);
+}
+
+/**
+ * pvr_gem_object_vmap() - Map a PowerVR GEM object into CPU virtual address
+ * space.
+ * @pvr_obj: Target PowerVR GEM object.
+ *
+ * Once the caller is finished with the CPU mapping, they must call
+ * pvr_gem_object_vunmap() on @pvr_obj.
+ *
+ * If @pvr_obj is CPU-cached, dma_sync_sgtable_for_cpu() is called to make
+ * sure the CPU mapping is consistent.
+ *
+ * Return:
+ * * A pointer to the CPU mapping on success,
+ * * -%ENOMEM if the mapping fails, or
+ * * Any error encountered while attempting to acquire a reference to the
+ * backing pages for @pvr_obj.
+ */
+void *
+pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
+{
+ struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
+ struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
+ struct iosys_map map;
+ int err;
+
+ dma_resv_lock(obj->resv, NULL);
+
+ err = drm_gem_shmem_vmap(shmem_obj, &map);
+ if (err)
+ goto err_unlock;
+
+ if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
+ struct device *dev = shmem_obj->base.dev->dev;
+
+ /* If shmem_obj->sgt is NULL, that means the buffer hasn't been mapped
+ * in GPU space yet.
+ */
+ if (shmem_obj->sgt)
+ dma_sync_sgtable_for_cpu(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+ }
+
+ dma_resv_unlock(obj->resv);
+
+ return map.vaddr;
+
+err_unlock:
+ dma_resv_unlock(obj->resv);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_gem_object_vunmap() - Unmap a PowerVR memory object from CPU virtual
+ * address space.
+ * @pvr_obj: Target PowerVR GEM object.
+ *
+ * If @pvr_obj is CPU-cached, dma_sync_sgtable_for_device() is called to make
+ * sure the GPU mapping is consistent.
+ */
+void
+pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
+{
+ struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(shmem_obj->vaddr);
+ struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
+
+ if (WARN_ON(!map.vaddr))
+ return;
+
+ dma_resv_lock(obj->resv, NULL);
+
+ if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
+ struct device *dev = shmem_obj->base.dev->dev;
+
+ /* If shmem_obj->sgt is NULL, that means the buffer hasn't been mapped
+ * in GPU space yet.
+ */
+ if (shmem_obj->sgt)
+ dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+ }
+
+ drm_gem_shmem_vunmap(shmem_obj, &map);
+
+ dma_resv_unlock(obj->resv);
+}
+
+/**
+ * pvr_gem_object_zero() - Zeroes the physical memory behind an object.
+ * @pvr_obj: Target PowerVR GEM object.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while attempting to map @pvr_obj to the CPU (see
+ * pvr_gem_object_vmap()).
+ */
+static int
+pvr_gem_object_zero(struct pvr_gem_object *pvr_obj)
+{
+ void *cpu_ptr;
+
+ cpu_ptr = pvr_gem_object_vmap(pvr_obj);
+ if (IS_ERR(cpu_ptr))
+ return PTR_ERR(cpu_ptr);
+
+ memset(cpu_ptr, 0, pvr_gem_object_size(pvr_obj));
+
+ /* Make sure the zero-ing is done before vumap-ing the object. */
+ wmb();
+
+ pvr_gem_object_vunmap(pvr_obj);
+
+ return 0;
+}
+
+/**
+ * pvr_gem_create_object() - Allocate and pre-initializes a pvr_gem_object
+ * @drm_dev: DRM device creating this object.
+ * @size: Size of the object to allocate in bytes.
+ *
+ * Return:
+ * * The new pre-initialized GEM object on success,
+ * * -ENOMEM if the allocation failed.
+ */
+struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t size)
+{
+ struct drm_gem_object *gem_obj;
+ struct pvr_gem_object *pvr_obj;
+
+ pvr_obj = kzalloc(sizeof(*pvr_obj), GFP_KERNEL);
+ if (!pvr_obj)
+ return ERR_PTR(-ENOMEM);
+
+ gem_obj = gem_from_pvr_gem(pvr_obj);
+ gem_obj->funcs = &pvr_gem_object_funcs;
+
+ return gem_obj;
+}
+
+/**
+ * pvr_gem_object_create() - Creates a PowerVR-specific buffer object.
+ * @pvr_dev: Target PowerVR device.
+ * @size: Size of the object to allocate in bytes. Must be greater than zero.
+ * Any value which is not an exact multiple of the system page size will be
+ * rounded up to satisfy this condition.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ *
+ * The created object may be larger than @size, but can never be smaller. To
+ * get the exact size, call pvr_gem_object_size() on the returned pointer.
+ *
+ * Return:
+ * * The newly-minted PowerVR-specific buffer object on success,
+ * * -%EINVAL if @size is zero or @flags is not valid,
+ * * -%ENOMEM if sufficient physical memory cannot be allocated, or
+ * * Any other error returned by drm_gem_create_mmap_offset().
+ */
+struct pvr_gem_object *
+pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
+{
+ struct drm_gem_shmem_object *shmem_obj;
+ struct pvr_gem_object *pvr_obj;
+ struct sg_table *sgt;
+ int err;
+
+ /* Verify @size and @flags before continuing. */
+ if (size == 0 || !pvr_gem_object_flags_validate(flags))
+ return ERR_PTR(-EINVAL);
+
+ shmem_obj = drm_gem_shmem_create(from_pvr_device(pvr_dev), size);
+ if (IS_ERR(shmem_obj))
+ return ERR_CAST(shmem_obj);
+
+ shmem_obj->pages_mark_dirty_on_put = true;
+ shmem_obj->map_wc = !(flags & PVR_BO_CPU_CACHED);
+ pvr_obj = shmem_gem_to_pvr_gem(shmem_obj);
+ pvr_obj->flags = flags;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto err_shmem_object_free;
+ }
+
+ dma_sync_sgtable_for_device(shmem_obj->base.dev->dev, sgt,
+ DMA_BIDIRECTIONAL);
+
+ /*
+ * Do this last because pvr_gem_object_zero() requires a fully
+ * configured instance of struct pvr_gem_object.
+ */
+ pvr_gem_object_zero(pvr_obj);
+
+ return pvr_obj;
+
+err_shmem_object_free:
+ drm_gem_shmem_free(shmem_obj);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_gem_get_dma_addr() - Get DMA address for given offset in object
+ * @pvr_obj: Pointer to object to lookup address in.
+ * @offset: Offset within object to lookup address at.
+ * @dma_addr_out: Pointer to location to store DMA address.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL if object is not currently backed, or if @offset is out of valid
+ * range for this object.
+ */
+int
+pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
+ dma_addr_t *dma_addr_out)
+{
+ struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
+ u32 accumulated_offset = 0;
+ struct scatterlist *sgl;
+ unsigned int sgt_idx;
+
+ WARN_ON(!shmem_obj->sgt);
+ for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, sgt_idx) {
+ u32 new_offset = accumulated_offset + sg_dma_len(sgl);
+
+ if (offset >= accumulated_offset && offset < new_offset) {
+ *dma_addr_out = sg_dma_address(sgl) +
+ (offset - accumulated_offset);
+ return 0;
+ }
+
+ accumulated_offset = new_offset;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_gem.h b/drivers/gpu/drm/imagination/pvr_gem.h
new file mode 100644
index 000000000000..e0e5ea509a2e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_gem.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_GEM_H
+#define PVR_GEM_H
+
+#include "pvr_rogue_heap_config.h"
+#include "pvr_rogue_meta.h"
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_mm.h>
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/const.h>
+#include <linux/compiler_attributes.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+/* Forward declaration from "pvr_device.h". */
+struct pvr_device;
+struct pvr_file;
+
+/**
+ * DOC: Flags for DRM_IOCTL_PVR_CREATE_BO (kernel-only)
+ *
+ * Kernel-only values allowed in &pvr_gem_object->flags. The majority of options
+ * for this field are specified in the UAPI header "pvr_drm.h" with a
+ * DRM_PVR_BO_ prefix. To distinguish these internal options (which must exist
+ * in ranges marked as "reserved" in the UAPI header), we drop the DRM prefix.
+ * The public options should be used directly, DRM prefix and all.
+ *
+ * To avoid potentially confusing gaps in the UAPI options, these kernel-only
+ * options are specified "in reverse", starting at bit 63.
+ *
+ * We use "reserved" to refer to bits defined here and not exposed in the UAPI.
+ * Bits not defined anywhere are "undefined".
+ *
+ * CPU mapping options
+ * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set this
+ * flag to override this behaviour and map the object cached.
+ *
+ * Firmware options
+ * :PVR_BO_FW_NO_CLEAR_ON_RESET: By default, all FW objects are cleared and reinitialised on hard
+ * reset. Set this flag to override this behaviour and preserve buffer contents on reset.
+ */
+#define PVR_BO_CPU_CACHED BIT_ULL(63)
+
+#define PVR_BO_FW_NO_CLEAR_ON_RESET BIT_ULL(62)
+
+#define PVR_BO_KERNEL_FLAGS_MASK (PVR_BO_CPU_CACHED | PVR_BO_FW_NO_CLEAR_ON_RESET)
+
+/* Bits 61..3 are undefined. */
+/* Bits 2..0 are defined in the UAPI. */
+
+/* Other utilities. */
+#define PVR_BO_UNDEFINED_MASK ~(PVR_BO_KERNEL_FLAGS_MASK | DRM_PVR_BO_FLAGS_MASK)
+
+/*
+ * All firmware-mapped memory uses (mostly) the same flags. Specifically,
+ * firmware-mapped memory should be:
+ * * Read/write on the device,
+ * * Read/write on the CPU, and
+ * * Write-combined on the CPU.
+ *
+ * The only variation is in caching on the device.
+ */
+#define PVR_BO_FW_FLAGS_DEVICE_CACHED (ULL(0))
+#define PVR_BO_FW_FLAGS_DEVICE_UNCACHED DRM_PVR_BO_BYPASS_DEVICE_CACHE
+
+/**
+ * struct pvr_gem_object - powervr-specific wrapper for &struct drm_gem_object
+ */
+struct pvr_gem_object {
+ /**
+ * @base: The underlying &struct drm_gem_shmem_object.
+ *
+ * Do not access this member directly, instead call
+ * shem_gem_from_pvr_gem().
+ */
+ struct drm_gem_shmem_object base;
+
+ /**
+ * @flags: Options set at creation-time. Some of these options apply to
+ * the creation operation itself (which are stored here for reference)
+ * with the remainder used for mapping options to both the device and
+ * CPU. These are used every time this object is mapped, but may be
+ * changed after creation.
+ *
+ * Must be a combination of DRM_PVR_BO_* and/or PVR_BO_* flags.
+ *
+ * .. note::
+ *
+ * This member is declared const to indicate that none of these
+ * options may change or be changed throughout the object's
+ * lifetime.
+ */
+ u64 flags;
+
+};
+
+static_assert(offsetof(struct pvr_gem_object, base) == 0,
+ "offsetof(struct pvr_gem_object, base) not zero");
+
+#define shmem_gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base)
+
+#define shmem_gem_to_pvr_gem(shmem_obj) container_of_const(shmem_obj, struct pvr_gem_object, base)
+
+#define gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base.base)
+
+#define gem_to_pvr_gem(gem_obj) container_of_const(gem_obj, struct pvr_gem_object, base.base)
+
+/* Functions defined in pvr_gem.c */
+
+struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t size);
+
+struct pvr_gem_object *pvr_gem_object_create(struct pvr_device *pvr_dev,
+ size_t size, u64 flags);
+
+int pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj,
+ struct pvr_file *pvr_file, u32 *handle);
+struct pvr_gem_object *pvr_gem_object_from_handle(struct pvr_file *pvr_file,
+ u32 handle);
+
+static __always_inline struct sg_table *
+pvr_gem_object_get_pages_sgt(struct pvr_gem_object *pvr_obj)
+{
+ return drm_gem_shmem_get_pages_sgt(shmem_gem_from_pvr_gem(pvr_obj));
+}
+
+void *pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj);
+void pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj);
+
+int pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
+ dma_addr_t *dma_addr_out);
+
+/**
+ * pvr_gem_object_get() - Acquire reference on pvr_gem_object
+ * @pvr_obj: Pointer to object to acquire reference on.
+ */
+static __always_inline void
+pvr_gem_object_get(struct pvr_gem_object *pvr_obj)
+{
+ drm_gem_object_get(gem_from_pvr_gem(pvr_obj));
+}
+
+/**
+ * pvr_gem_object_put() - Release reference on pvr_gem_object
+ * @pvr_obj: Pointer to object to release reference on.
+ */
+static __always_inline void
+pvr_gem_object_put(struct pvr_gem_object *pvr_obj)
+{
+ drm_gem_object_put(gem_from_pvr_gem(pvr_obj));
+}
+
+static __always_inline size_t
+pvr_gem_object_size(struct pvr_gem_object *pvr_obj)
+{
+ return gem_from_pvr_gem(pvr_obj)->size;
+}
+
+#endif /* PVR_GEM_H */
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c
new file mode 100644
index 000000000000..c4213c18489e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_free_list.h"
+#include "pvr_hwrt.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_cr_defs_client.h"
+#include "pvr_rogue_fwif.h"
+
+#include <drm/drm_gem.h>
+#include <linux/bitops.h>
+#include <linux/math.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+static_assert(ROGUE_FWIF_NUM_RTDATAS == 2);
+static_assert(ROGUE_FWIF_NUM_GEOMDATAS == 1);
+static_assert(ROGUE_FWIF_NUM_RTDATA_FREELISTS == 2);
+
+/*
+ * struct pvr_rt_mtile_info - Render target macrotile information
+ */
+struct pvr_rt_mtile_info {
+ u32 mtile_x[3];
+ u32 mtile_y[3];
+ u32 tile_max_x;
+ u32 tile_max_y;
+ u32 tile_size_x;
+ u32 tile_size_y;
+ u32 num_tiles_x;
+ u32 num_tiles_y;
+};
+
+/* Size of Shadow Render Target Cache entry */
+#define SRTC_ENTRY_SIZE sizeof(u32)
+/* Size of Renders Accumulation Array entry */
+#define RAA_ENTRY_SIZE sizeof(u32)
+
+static int
+hwrt_init_kernel_structure(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args,
+ struct pvr_hwrt_dataset *hwrt)
+{
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+ int err;
+ int i;
+
+ hwrt->pvr_dev = pvr_dev;
+ hwrt->max_rts = args->layers;
+
+ /* Get pointers to the free lists */
+ for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ hwrt->free_lists[i] = pvr_free_list_lookup(pvr_file, args->free_list_handles[i]);
+ if (!hwrt->free_lists[i]) {
+ err = -EINVAL;
+ goto err_put_free_lists;
+ }
+ }
+
+ if (hwrt->free_lists[ROGUE_FW_LOCAL_FREELIST]->current_pages <
+ pvr_get_free_list_min_pages(pvr_dev)) {
+ err = -EINVAL;
+ goto err_put_free_lists;
+ }
+
+ return 0;
+
+err_put_free_lists:
+ for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ pvr_free_list_put(hwrt->free_lists[i]);
+ hwrt->free_lists[i] = NULL;
+ }
+
+ return err;
+}
+
+static void
+hwrt_fini_kernel_structure(struct pvr_hwrt_dataset *hwrt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ pvr_free_list_put(hwrt->free_lists[i]);
+ hwrt->free_lists[i] = NULL;
+ }
+}
+
+static void
+hwrt_fini_common_fw_structure(struct pvr_hwrt_dataset *hwrt)
+{
+ pvr_fw_object_destroy(hwrt->common_fw_obj);
+}
+
+static int
+get_cr_isp_mtile_size_val(struct pvr_device *pvr_dev, u32 samples,
+ struct pvr_rt_mtile_info *info, u32 *value_out)
+{
+ u32 x = info->mtile_x[0];
+ u32 y = info->mtile_y[0];
+ u32 samples_per_pixel;
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, isp_samples_per_pixel, &samples_per_pixel);
+ if (err)
+ return err;
+
+ if (samples_per_pixel == 1) {
+ if (samples >= 4)
+ x <<= 1;
+ if (samples >= 2)
+ y <<= 1;
+ } else if (samples_per_pixel == 2) {
+ if (samples >= 8)
+ x <<= 1;
+ if (samples >= 4)
+ y <<= 1;
+ } else if (samples_per_pixel == 4) {
+ if (samples >= 8)
+ y <<= 1;
+ } else {
+ WARN(true, "Unsupported ISP samples per pixel value");
+ return -EINVAL;
+ }
+
+ *value_out = ((x << ROGUE_CR_ISP_MTILE_SIZE_X_SHIFT) & ~ROGUE_CR_ISP_MTILE_SIZE_X_CLRMSK) |
+ ((y << ROGUE_CR_ISP_MTILE_SIZE_Y_SHIFT) & ~ROGUE_CR_ISP_MTILE_SIZE_Y_CLRMSK);
+
+ return 0;
+}
+
+static int
+get_cr_multisamplectl_val(u32 samples, bool y_flip, u64 *value_out)
+{
+ static const struct {
+ u8 x[8];
+ u8 y[8];
+ } sample_positions[4] = {
+ /* 1 sample */
+ {
+ .x = { 8 },
+ .y = { 8 },
+ },
+ /* 2 samples */
+ {
+ .x = { 12, 4 },
+ .y = { 12, 4 },
+ },
+ /* 4 samples */
+ {
+ .x = { 6, 14, 2, 10 },
+ .y = { 2, 6, 10, 14 },
+ },
+ /* 8 samples */
+ {
+ .x = { 9, 7, 13, 5, 3, 1, 11, 15 },
+ .y = { 5, 11, 9, 3, 13, 7, 15, 1 },
+ },
+ };
+ const int idx = fls(samples) - 1;
+ u64 value = 0;
+
+ if (idx < 0 || idx > 3)
+ return -EINVAL;
+
+ for (u32 i = 0; i < 8; i++) {
+ value |= ((u64)sample_positions[idx].x[i]) << (i * 8);
+ if (y_flip)
+ value |= (((u64)(16 - sample_positions[idx].y[i]) & 0xf)) << (i * 8 + 4);
+ else
+ value |= ((u64)sample_positions[idx].y[i]) << (i * 8 + 4);
+ }
+
+ *value_out = value;
+
+ return 0;
+}
+
+static int
+get_cr_te_aa_val(struct pvr_device *pvr_dev, u32 samples, u32 *value_out)
+{
+ u32 samples_per_pixel;
+ u32 value = 0;
+ int err = 0;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, isp_samples_per_pixel, &samples_per_pixel);
+ if (err)
+ return err;
+
+ switch (samples_per_pixel) {
+ case 1:
+ if (samples >= 2)
+ value |= ROGUE_CR_TE_AA_Y_EN;
+ if (samples >= 4)
+ value |= ROGUE_CR_TE_AA_X_EN;
+ break;
+ case 2:
+ if (samples >= 2)
+ value |= ROGUE_CR_TE_AA_X2_EN;
+ if (samples >= 4)
+ value |= ROGUE_CR_TE_AA_Y_EN;
+ if (samples >= 8)
+ value |= ROGUE_CR_TE_AA_X_EN;
+ break;
+ case 4:
+ if (samples >= 2)
+ value |= ROGUE_CR_TE_AA_X2_EN;
+ if (samples >= 4)
+ value |= ROGUE_CR_TE_AA_Y2_EN;
+ if (samples >= 8)
+ value |= ROGUE_CR_TE_AA_Y_EN;
+ break;
+ default:
+ WARN(true, "Unsupported ISP samples per pixel value");
+ return -EINVAL;
+ }
+
+ *value_out = value;
+
+ return 0;
+}
+
+static void
+hwrtdata_common_init(void *cpu_ptr, void *priv)
+{
+ struct pvr_hwrt_dataset *hwrt = priv;
+
+ memcpy(cpu_ptr, &hwrt->common, sizeof(hwrt->common));
+}
+
+static int
+hwrt_init_common_fw_structure(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args,
+ struct pvr_hwrt_dataset *hwrt)
+{
+ struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+ struct pvr_rt_mtile_info info;
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &info.tile_size_x);
+ if (WARN_ON(err))
+ return err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &info.tile_size_y);
+ if (WARN_ON(err))
+ return err;
+
+ info.num_tiles_x = DIV_ROUND_UP(args->width, info.tile_size_x);
+ info.num_tiles_y = DIV_ROUND_UP(args->height, info.tile_size_y);
+
+ if (PVR_HAS_FEATURE(pvr_dev, simple_parameter_format_version)) {
+ u32 parameter_format;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, simple_parameter_format_version,
+ &parameter_format);
+ if (WARN_ON(err))
+ return err;
+
+ WARN_ON(parameter_format != 2);
+
+ /*
+ * Set up 16 macrotiles with a multiple of 2x2 tiles per macrotile, which is
+ * aligned to a tile group.
+ */
+ info.mtile_x[0] = DIV_ROUND_UP(info.num_tiles_x, 8) * 2;
+ info.mtile_y[0] = DIV_ROUND_UP(info.num_tiles_y, 8) * 2;
+ info.mtile_x[1] = 0;
+ info.mtile_y[1] = 0;
+ info.mtile_x[2] = 0;
+ info.mtile_y[2] = 0;
+ info.tile_max_x = round_up(info.num_tiles_x, 2) - 1;
+ info.tile_max_y = round_up(info.num_tiles_y, 2) - 1;
+ } else {
+ /* Set up 16 macrotiles with a multiple of 4x4 tiles per macrotile. */
+ info.mtile_x[0] = round_up(DIV_ROUND_UP(info.num_tiles_x, 4), 4);
+ info.mtile_y[0] = round_up(DIV_ROUND_UP(info.num_tiles_y, 4), 4);
+ info.mtile_x[1] = info.mtile_x[0] * 2;
+ info.mtile_y[1] = info.mtile_y[0] * 2;
+ info.mtile_x[2] = info.mtile_x[0] * 3;
+ info.mtile_y[2] = info.mtile_y[0] * 3;
+ info.tile_max_x = info.num_tiles_x - 1;
+ info.tile_max_y = info.num_tiles_y - 1;
+ }
+
+ hwrt->common.geom_caches_need_zeroing = false;
+
+ hwrt->common.isp_merge_lower_x = args->isp_merge_lower_x;
+ hwrt->common.isp_merge_lower_y = args->isp_merge_lower_y;
+ hwrt->common.isp_merge_upper_x = args->isp_merge_upper_x;
+ hwrt->common.isp_merge_upper_y = args->isp_merge_upper_y;
+ hwrt->common.isp_merge_scale_x = args->isp_merge_scale_x;
+ hwrt->common.isp_merge_scale_y = args->isp_merge_scale_y;
+
+ err = get_cr_multisamplectl_val(args->samples, false,
+ &hwrt->common.multi_sample_ctl);
+ if (err)
+ return err;
+
+ err = get_cr_multisamplectl_val(args->samples, true,
+ &hwrt->common.flipped_multi_sample_ctl);
+ if (err)
+ return err;
+
+ hwrt->common.mtile_stride = info.mtile_x[0] * info.mtile_y[0];
+
+ err = get_cr_te_aa_val(pvr_dev, args->samples, &hwrt->common.teaa);
+ if (err)
+ return err;
+
+ hwrt->common.screen_pixel_max =
+ (((args->width - 1) << ROGUE_CR_PPP_SCREEN_PIXXMAX_SHIFT) &
+ ~ROGUE_CR_PPP_SCREEN_PIXXMAX_CLRMSK) |
+ (((args->height - 1) << ROGUE_CR_PPP_SCREEN_PIXYMAX_SHIFT) &
+ ~ROGUE_CR_PPP_SCREEN_PIXYMAX_CLRMSK);
+
+ hwrt->common.te_screen =
+ ((info.tile_max_x << ROGUE_CR_TE_SCREEN_XMAX_SHIFT) &
+ ~ROGUE_CR_TE_SCREEN_XMAX_CLRMSK) |
+ ((info.tile_max_y << ROGUE_CR_TE_SCREEN_YMAX_SHIFT) &
+ ~ROGUE_CR_TE_SCREEN_YMAX_CLRMSK);
+ hwrt->common.te_mtile1 =
+ ((info.mtile_x[0] << ROGUE_CR_TE_MTILE1_X1_SHIFT) & ~ROGUE_CR_TE_MTILE1_X1_CLRMSK) |
+ ((info.mtile_x[1] << ROGUE_CR_TE_MTILE1_X2_SHIFT) & ~ROGUE_CR_TE_MTILE1_X2_CLRMSK) |
+ ((info.mtile_x[2] << ROGUE_CR_TE_MTILE1_X3_SHIFT) & ~ROGUE_CR_TE_MTILE1_X3_CLRMSK);
+ hwrt->common.te_mtile2 =
+ ((info.mtile_y[0] << ROGUE_CR_TE_MTILE2_Y1_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y1_CLRMSK) |
+ ((info.mtile_y[1] << ROGUE_CR_TE_MTILE2_Y2_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y2_CLRMSK) |
+ ((info.mtile_y[2] << ROGUE_CR_TE_MTILE2_Y3_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y3_CLRMSK);
+
+ err = get_cr_isp_mtile_size_val(pvr_dev, args->samples, &info,
+ &hwrt->common.isp_mtile_size);
+ if (err)
+ return err;
+
+ hwrt->common.tpc_stride = geom_data_args->tpc_stride;
+ hwrt->common.tpc_size = geom_data_args->tpc_size;
+
+ hwrt->common.rgn_header_size = args->region_header_size;
+
+ err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_hwrtdata_common),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED, hwrtdata_common_init, hwrt,
+ &hwrt->common_fw_obj);
+
+ return err;
+}
+
+static void
+hwrt_fw_data_init(void *cpu_ptr, void *priv)
+{
+ struct pvr_hwrt_data *hwrt_data = priv;
+
+ memcpy(cpu_ptr, &hwrt_data->data, sizeof(hwrt_data->data));
+}
+
+static int
+hwrt_data_init_fw_structure(struct pvr_file *pvr_file,
+ struct pvr_hwrt_dataset *hwrt,
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args,
+ struct drm_pvr_create_hwrt_rt_data_args *rt_data_args,
+ struct pvr_hwrt_data *hwrt_data)
+{
+ struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+ struct rogue_fwif_rta_ctl *rta_ctl;
+ int free_list_i;
+ int err;
+
+ pvr_fw_object_get_fw_addr(hwrt->common_fw_obj,
+ &hwrt_data->data.hwrt_data_common_fw_addr);
+
+ for (free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
+ pvr_fw_object_get_fw_addr(hwrt->free_lists[free_list_i]->fw_obj,
+ &hwrt_data->data.freelists_fw_addr[free_list_i]);
+ }
+
+ hwrt_data->data.tail_ptrs_dev_addr = geom_data_args->tpc_dev_addr;
+ hwrt_data->data.vheap_table_dev_addr = geom_data_args->vheap_table_dev_addr;
+ hwrt_data->data.rtc_dev_addr = geom_data_args->rtc_dev_addr;
+
+ hwrt_data->data.pm_mlist_dev_addr = rt_data_args->pm_mlist_dev_addr;
+ hwrt_data->data.macrotile_array_dev_addr = rt_data_args->macrotile_array_dev_addr;
+ hwrt_data->data.rgn_header_dev_addr = rt_data_args->region_header_dev_addr;
+
+ rta_ctl = &hwrt_data->data.rta_ctl;
+
+ rta_ctl->render_target_index = 0;
+ rta_ctl->active_render_targets = 0;
+ rta_ctl->valid_render_targets_fw_addr = 0;
+ rta_ctl->rta_num_partial_renders_fw_addr = 0;
+ rta_ctl->max_rts = args->layers;
+
+ if (args->layers > 1) {
+ err = pvr_fw_object_create(pvr_dev, args->layers * SRTC_ENTRY_SIZE,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &hwrt_data->srtc_obj);
+ if (err)
+ return err;
+ pvr_fw_object_get_fw_addr(hwrt_data->srtc_obj,
+ &rta_ctl->valid_render_targets_fw_addr);
+
+ err = pvr_fw_object_create(pvr_dev, args->layers * RAA_ENTRY_SIZE,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &hwrt_data->raa_obj);
+ if (err)
+ goto err_put_shadow_rt_cache;
+ pvr_fw_object_get_fw_addr(hwrt_data->raa_obj,
+ &rta_ctl->rta_num_partial_renders_fw_addr);
+ }
+
+ err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_hwrtdata),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ hwrt_fw_data_init, hwrt_data, &hwrt_data->fw_obj);
+ if (err)
+ goto err_put_raa_obj;
+
+ pvr_free_list_add_hwrt(hwrt->free_lists[0], hwrt_data);
+
+ return 0;
+
+err_put_raa_obj:
+ if (args->layers > 1)
+ pvr_fw_object_destroy(hwrt_data->raa_obj);
+
+err_put_shadow_rt_cache:
+ if (args->layers > 1)
+ pvr_fw_object_destroy(hwrt_data->srtc_obj);
+
+ return err;
+}
+
+static void
+hwrt_data_fini_fw_structure(struct pvr_hwrt_dataset *hwrt, int hwrt_nr)
+{
+ struct pvr_hwrt_data *hwrt_data = &hwrt->data[hwrt_nr];
+
+ pvr_free_list_remove_hwrt(hwrt->free_lists[0], hwrt_data);
+
+ if (hwrt->max_rts > 1) {
+ pvr_fw_object_destroy(hwrt_data->raa_obj);
+ pvr_fw_object_destroy(hwrt_data->srtc_obj);
+ }
+
+ pvr_fw_object_destroy(hwrt_data->fw_obj);
+}
+
+/**
+ * pvr_hwrt_dataset_create() - Create a new HWRT dataset
+ * @pvr_file: Pointer to pvr_file structure.
+ * @args: Creation arguments from userspace.
+ *
+ * Return:
+ * * Pointer to new HWRT, or
+ * * ERR_PTR(-%ENOMEM) on out of memory.
+ */
+struct pvr_hwrt_dataset *
+pvr_hwrt_dataset_create(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args)
+{
+ struct pvr_hwrt_dataset *hwrt;
+ int err;
+
+ /* Create and fill out the kernel structure */
+ hwrt = kzalloc(sizeof(*hwrt), GFP_KERNEL);
+
+ if (!hwrt)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&hwrt->ref_count);
+
+ err = hwrt_init_kernel_structure(pvr_file, args, hwrt);
+ if (err < 0)
+ goto err_free;
+
+ err = hwrt_init_common_fw_structure(pvr_file, args, hwrt);
+ if (err < 0)
+ goto err_free;
+
+ for (int i = 0; i < ARRAY_SIZE(hwrt->data); i++) {
+ err = hwrt_data_init_fw_structure(pvr_file, hwrt, args,
+ &args->rt_data_args[i],
+ &hwrt->data[i]);
+ if (err < 0) {
+ i--;
+ /* Destroy already created structures. */
+ for (; i >= 0; i--)
+ hwrt_data_fini_fw_structure(hwrt, i);
+ goto err_free;
+ }
+
+ hwrt->data[i].hwrt_dataset = hwrt;
+ }
+
+ return hwrt;
+
+err_free:
+ pvr_hwrt_dataset_put(hwrt);
+
+ return ERR_PTR(err);
+}
+
+static void
+pvr_hwrt_dataset_release(struct kref *ref_count)
+{
+ struct pvr_hwrt_dataset *hwrt =
+ container_of(ref_count, struct pvr_hwrt_dataset, ref_count);
+
+ for (int i = ARRAY_SIZE(hwrt->data) - 1; i >= 0; i--) {
+ WARN_ON(pvr_fw_structure_cleanup(hwrt->pvr_dev, ROGUE_FWIF_CLEANUP_HWRTDATA,
+ hwrt->data[i].fw_obj, 0));
+ hwrt_data_fini_fw_structure(hwrt, i);
+ }
+
+ hwrt_fini_common_fw_structure(hwrt);
+ hwrt_fini_kernel_structure(hwrt);
+
+ kfree(hwrt);
+}
+
+/**
+ * pvr_destroy_hwrt_datasets_for_file: Destroy any HWRT datasets associated
+ * with the given file.
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all HWRT datasets associated with @pvr_file from the device
+ * hwrt_dataset list and drops initial references. HWRT datasets will then be
+ * destroyed once all outstanding references are dropped.
+ */
+void pvr_destroy_hwrt_datasets_for_file(struct pvr_file *pvr_file)
+{
+ struct pvr_hwrt_dataset *hwrt;
+ unsigned long handle;
+
+ xa_for_each(&pvr_file->hwrt_handles, handle, hwrt) {
+ (void)hwrt;
+ pvr_hwrt_dataset_put(xa_erase(&pvr_file->hwrt_handles, handle));
+ }
+}
+
+/**
+ * pvr_hwrt_dataset_put() - Release reference on HWRT dataset
+ * @hwrt: Pointer to HWRT dataset to release reference on
+ */
+void
+pvr_hwrt_dataset_put(struct pvr_hwrt_dataset *hwrt)
+{
+ if (hwrt)
+ kref_put(&hwrt->ref_count, pvr_hwrt_dataset_release);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.h b/drivers/gpu/drm/imagination/pvr_hwrt.h
new file mode 100644
index 000000000000..676070b20c3b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_HWRT_H
+#define PVR_HWRT_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#include "pvr_device.h"
+#include "pvr_rogue_fwif_shared.h"
+
+/* Forward declaration from pvr_free_list.h. */
+struct pvr_free_list;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/**
+ * struct pvr_hwrt_data - structure representing HWRT data
+ */
+struct pvr_hwrt_data {
+ /** @fw_obj: FW object representing the FW-side structure. */
+ struct pvr_fw_object *fw_obj;
+
+ /** @data: Local copy of FW-side structure. */
+ struct rogue_fwif_hwrtdata data;
+
+ /** @freelist_node: List node connecting this HWRT to the local freelist. */
+ struct list_head freelist_node;
+
+ /**
+ * @srtc_obj: FW object representing shadow render target cache.
+ *
+ * Only valid if @max_rts > 1.
+ */
+ struct pvr_fw_object *srtc_obj;
+
+ /**
+ * @raa_obj: FW object representing renders accumulation array.
+ *
+ * Only valid if @max_rts > 1.
+ */
+ struct pvr_fw_object *raa_obj;
+
+ /** @hwrt_dataset: Back pointer to owning HWRT dataset. */
+ struct pvr_hwrt_dataset *hwrt_dataset;
+};
+
+/**
+ * struct pvr_hwrt_dataset - structure representing a HWRT data set.
+ */
+struct pvr_hwrt_dataset {
+ /** @ref_count: Reference count of object. */
+ struct kref ref_count;
+
+ /** @pvr_dev: Pointer to device that owns this object. */
+ struct pvr_device *pvr_dev;
+
+ /** @common_fw_obj: FW object representing common FW-side structure. */
+ struct pvr_fw_object *common_fw_obj;
+
+ /** @common: Common HWRT data. */
+ struct rogue_fwif_hwrtdata_common common;
+
+ /** @data: HWRT data structures belonging to this set. */
+ struct pvr_hwrt_data data[ROGUE_FWIF_NUM_RTDATAS];
+
+ /** @free_lists: Free lists used by HWRT data set. */
+ struct pvr_free_list *free_lists[ROGUE_FWIF_NUM_RTDATA_FREELISTS];
+
+ /** @max_rts: Maximum render targets for this HWRT data set. */
+ u16 max_rts;
+};
+
+struct pvr_hwrt_dataset *
+pvr_hwrt_dataset_create(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args);
+
+void
+pvr_destroy_hwrt_datasets_for_file(struct pvr_file *pvr_file);
+
+/**
+ * pvr_hwrt_dataset_lookup() - Lookup HWRT dataset pointer from handle
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ *
+ * Takes reference on dataset object. Call pvr_hwrt_dataset_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, or is not a HWRT
+ * dataset)
+ */
+static __always_inline struct pvr_hwrt_dataset *
+pvr_hwrt_dataset_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_hwrt_dataset *hwrt;
+
+ xa_lock(&pvr_file->hwrt_handles);
+ hwrt = xa_load(&pvr_file->hwrt_handles, handle);
+
+ if (hwrt)
+ kref_get(&hwrt->ref_count);
+
+ xa_unlock(&pvr_file->hwrt_handles);
+
+ return hwrt;
+}
+
+void
+pvr_hwrt_dataset_put(struct pvr_hwrt_dataset *hwrt);
+
+/**
+ * pvr_hwrt_data_lookup() - Lookup HWRT data pointer from handle and index
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ * @index: Index of RT data within dataset.
+ *
+ * Takes reference on dataset object. Call pvr_hwrt_data_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, or is not a HWRT
+ * dataset, or index is out of range)
+ */
+static __always_inline struct pvr_hwrt_data *
+pvr_hwrt_data_lookup(struct pvr_file *pvr_file, u32 handle, u32 index)
+{
+ struct pvr_hwrt_dataset *hwrt_dataset = pvr_hwrt_dataset_lookup(pvr_file, handle);
+
+ if (hwrt_dataset) {
+ if (index < ARRAY_SIZE(hwrt_dataset->data))
+ return &hwrt_dataset->data[index];
+
+ pvr_hwrt_dataset_put(hwrt_dataset);
+ }
+
+ return NULL;
+}
+
+/**
+ * pvr_hwrt_data_put() - Release reference on HWRT data
+ * @hwrt: Pointer to HWRT data to release reference on
+ */
+static __always_inline void
+pvr_hwrt_data_put(struct pvr_hwrt_data *hwrt)
+{
+ if (hwrt)
+ pvr_hwrt_dataset_put(hwrt->hwrt_dataset);
+}
+
+static __always_inline struct pvr_hwrt_data *
+pvr_hwrt_data_get(struct pvr_hwrt_data *hwrt)
+{
+ if (hwrt)
+ kref_get(&hwrt->hwrt_dataset->ref_count);
+
+ return hwrt;
+}
+
+#endif /* PVR_HWRT_H */
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
new file mode 100644
index 000000000000..04139da6c04d
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -0,0 +1,786 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_context.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+#include "pvr_job.h"
+#include "pvr_mmu.h"
+#include "pvr_power.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_client.h"
+#include "pvr_stream.h"
+#include "pvr_stream_defs.h"
+#include "pvr_sync.h"
+
+#include <drm/drm_exec.h>
+#include <drm/drm_gem.h>
+#include <linux/types.h>
+#include <uapi/drm/pvr_drm.h>
+
+static void pvr_job_release(struct kref *kref)
+{
+ struct pvr_job *job = container_of(kref, struct pvr_job, ref_count);
+
+ xa_erase(&job->pvr_dev->job_ids, job->id);
+
+ pvr_hwrt_data_put(job->hwrt);
+ pvr_context_put(job->ctx);
+
+ WARN_ON(job->paired_job);
+
+ pvr_queue_job_cleanup(job);
+ pvr_job_release_pm_ref(job);
+
+ kfree(job->cmd);
+ kfree(job);
+}
+
+/**
+ * pvr_job_put() - Release reference on job
+ * @job: Target job.
+ */
+void
+pvr_job_put(struct pvr_job *job)
+{
+ if (job)
+ kref_put(&job->ref_count, pvr_job_release);
+}
+
+/**
+ * pvr_job_process_stream() - Build job FW structure from stream
+ * @pvr_dev: Device pointer.
+ * @cmd_defs: Stream definition.
+ * @stream: Pointer to command stream.
+ * @stream_size: Size of command stream, in bytes.
+ * @job: Pointer to job.
+ *
+ * Caller is responsible for freeing the output structure.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%ENOMEM on out of memory, or
+ * * -%EINVAL on malformed stream.
+ */
+static int
+pvr_job_process_stream(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
+ void *stream, u32 stream_size, struct pvr_job *job)
+{
+ int err;
+
+ job->cmd = kzalloc(cmd_defs->dest_size, GFP_KERNEL);
+ if (!job->cmd)
+ return -ENOMEM;
+
+ job->cmd_len = cmd_defs->dest_size;
+
+ err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, job->cmd);
+ if (err)
+ kfree(job->cmd);
+
+ return err;
+}
+
+static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job,
+ const struct pvr_stream_cmd_defs *stream_def,
+ u64 stream_userptr, u32 stream_len)
+{
+ void *stream;
+ int err;
+
+ stream = kzalloc(stream_len, GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ if (copy_from_user(stream, u64_to_user_ptr(stream_userptr), stream_len)) {
+ err = -EFAULT;
+ goto err_free_stream;
+ }
+
+ err = pvr_job_process_stream(pvr_dev, stream_def, stream, stream_len, job);
+
+err_free_stream:
+ kfree(stream);
+
+ return err;
+}
+
+static u32
+convert_geom_flags(u32 in_flags)
+{
+ u32 out_flags = 0;
+
+ if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST)
+ out_flags |= ROGUE_GEOM_FLAGS_FIRSTKICK;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST)
+ out_flags |= ROGUE_GEOM_FLAGS_LASTKICK;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
+ out_flags |= ROGUE_GEOM_FLAGS_SINGLE_CORE;
+
+ return out_flags;
+}
+
+static u32
+convert_frag_flags(u32 in_flags)
+{
+ u32 out_flags = 0;
+
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE)
+ out_flags |= ROGUE_FRAG_FLAGS_SINGLE_CORE;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER)
+ out_flags |= ROGUE_FRAG_FLAGS_DEPTHBUFFER;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER)
+ out_flags |= ROGUE_FRAG_FLAGS_STENCILBUFFER;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP)
+ out_flags |= ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER)
+ out_flags |= ROGUE_FRAG_FLAGS_SCRATCHBUFFER;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS)
+ out_flags |= ROGUE_FRAG_FLAGS_GET_VIS_RESULTS;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE)
+ out_flags |= ROGUE_FRAG_FLAGS_DISABLE_PIXELMERGE;
+
+ return out_flags;
+}
+
+static int
+pvr_geom_job_fw_cmd_init(struct pvr_job *job,
+ struct drm_pvr_job *args)
+{
+ struct rogue_fwif_cmd_geom *cmd;
+ int err;
+
+ if (args->flags & ~DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER)
+ return -EINVAL;
+
+ if (!job->hwrt)
+ return -EINVAL;
+
+ job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_GEOM;
+ err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_geom_stream,
+ args->cmd_stream, args->cmd_stream_len);
+ if (err)
+ return err;
+
+ cmd = job->cmd;
+ cmd->cmd_shared.cmn.frame_num = 0;
+ cmd->flags = convert_geom_flags(args->flags);
+ pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr);
+ return 0;
+}
+
+static int
+pvr_frag_job_fw_cmd_init(struct pvr_job *job,
+ struct drm_pvr_job *args)
+{
+ struct rogue_fwif_cmd_frag *cmd;
+ int err;
+
+ if (args->flags & ~DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER)
+ return -EINVAL;
+
+ if (!job->hwrt)
+ return -EINVAL;
+
+ job->fw_ccb_cmd_type = (args->flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER) ?
+ ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR :
+ ROGUE_FWIF_CCB_CMD_TYPE_FRAG;
+ err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_frag_stream,
+ args->cmd_stream, args->cmd_stream_len);
+ if (err)
+ return err;
+
+ cmd = job->cmd;
+ cmd->cmd_shared.cmn.frame_num = 0;
+ cmd->flags = convert_frag_flags(args->flags);
+ pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr);
+ return 0;
+}
+
+static u32
+convert_compute_flags(u32 in_flags)
+{
+ u32 out_flags = 0;
+
+ if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP)
+ out_flags |= ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
+ out_flags |= ROGUE_COMPUTE_FLAG_SINGLE_CORE;
+
+ return out_flags;
+}
+
+static int
+pvr_compute_job_fw_cmd_init(struct pvr_job *job,
+ struct drm_pvr_job *args)
+{
+ struct rogue_fwif_cmd_compute *cmd;
+ int err;
+
+ if (args->flags & ~DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (job->ctx->type != DRM_PVR_CTX_TYPE_COMPUTE)
+ return -EINVAL;
+
+ job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_CDM;
+ err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_compute_stream,
+ args->cmd_stream, args->cmd_stream_len);
+ if (err)
+ return err;
+
+ cmd = job->cmd;
+ cmd->common.frame_num = 0;
+ cmd->flags = convert_compute_flags(args->flags);
+ return 0;
+}
+
+static u32
+convert_transfer_flags(u32 in_flags)
+{
+ u32 out_flags = 0;
+
+ if (in_flags & DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE)
+ out_flags |= ROGUE_TRANSFER_FLAGS_SINGLE_CORE;
+
+ return out_flags;
+}
+
+static int
+pvr_transfer_job_fw_cmd_init(struct pvr_job *job,
+ struct drm_pvr_job *args)
+{
+ struct rogue_fwif_cmd_transfer *cmd;
+ int err;
+
+ if (args->flags & ~DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (job->ctx->type != DRM_PVR_CTX_TYPE_TRANSFER_FRAG)
+ return -EINVAL;
+
+ job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D;
+ err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_transfer_stream,
+ args->cmd_stream, args->cmd_stream_len);
+ if (err)
+ return err;
+
+ cmd = job->cmd;
+ cmd->common.frame_num = 0;
+ cmd->flags = convert_transfer_flags(args->flags);
+ return 0;
+}
+
+static int
+pvr_job_fw_cmd_init(struct pvr_job *job,
+ struct drm_pvr_job *args)
+{
+ switch (args->type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return pvr_geom_job_fw_cmd_init(job, args);
+
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return pvr_frag_job_fw_cmd_init(job, args);
+
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return pvr_compute_job_fw_cmd_init(job, args);
+
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return pvr_transfer_job_fw_cmd_init(job, args);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * struct pvr_job_data - Helper container for pairing jobs with the
+ * sync_ops supplied for them by the user.
+ */
+struct pvr_job_data {
+ /** @job: Pointer to the job. */
+ struct pvr_job *job;
+
+ /** @sync_ops: Pointer to the sync_ops associated with @job. */
+ struct drm_pvr_sync_op *sync_ops;
+
+ /** @sync_op_count: Number of members of @sync_ops. */
+ u32 sync_op_count;
+};
+
+/**
+ * prepare_job_syncs() - Prepare all sync objects for a single job.
+ * @pvr_file: PowerVR file.
+ * @job_data: Precreated job and sync_ops array.
+ * @signal_array: xarray to receive signal sync objects.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_sync_signal_array_collect_ops(),
+ * pvr_sync_add_deps_to_job(), drm_sched_job_add_resv_dependencies() or
+ * pvr_sync_signal_array_update_fences().
+ */
+static int
+prepare_job_syncs(struct pvr_file *pvr_file,
+ struct pvr_job_data *job_data,
+ struct xarray *signal_array)
+{
+ struct dma_fence *done_fence;
+ int err = pvr_sync_signal_array_collect_ops(signal_array,
+ from_pvr_file(pvr_file),
+ job_data->sync_op_count,
+ job_data->sync_ops);
+
+ if (err)
+ return err;
+
+ err = pvr_sync_add_deps_to_job(pvr_file, &job_data->job->base,
+ job_data->sync_op_count,
+ job_data->sync_ops, signal_array);
+ if (err)
+ return err;
+
+ if (job_data->job->hwrt) {
+ /* The geometry job writes the HWRT region headers, which are
+ * then read by the fragment job.
+ */
+ struct drm_gem_object *obj =
+ gem_from_pvr_gem(job_data->job->hwrt->fw_obj->gem);
+ enum dma_resv_usage usage =
+ dma_resv_usage_rw(job_data->job->type ==
+ DRM_PVR_JOB_TYPE_GEOMETRY);
+
+ dma_resv_lock(obj->resv, NULL);
+ err = drm_sched_job_add_resv_dependencies(&job_data->job->base,
+ obj->resv, usage);
+ dma_resv_unlock(obj->resv);
+ if (err)
+ return err;
+ }
+
+ /* We need to arm the job to get the job done fence. */
+ done_fence = pvr_queue_job_arm(job_data->job);
+
+ err = pvr_sync_signal_array_update_fences(signal_array,
+ job_data->sync_op_count,
+ job_data->sync_ops,
+ done_fence);
+ return err;
+}
+
+/**
+ * prepare_job_syncs_for_each() - Prepare all sync objects for an array of jobs.
+ * @pvr_file: PowerVR file.
+ * @job_data: Array of precreated jobs and their sync_ops.
+ * @job_count: Number of jobs.
+ * @signal_array: xarray to receive signal sync objects.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_vm_bind_job_prepare_syncs().
+ */
+static int
+prepare_job_syncs_for_each(struct pvr_file *pvr_file,
+ struct pvr_job_data *job_data,
+ u32 *job_count,
+ struct xarray *signal_array)
+{
+ for (u32 i = 0; i < *job_count; i++) {
+ int err = prepare_job_syncs(pvr_file, &job_data[i],
+ signal_array);
+
+ if (err) {
+ *job_count = i;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static struct pvr_job *
+create_job(struct pvr_device *pvr_dev,
+ struct pvr_file *pvr_file,
+ struct drm_pvr_job *args)
+{
+ struct pvr_job *job = NULL;
+ int err;
+
+ if (!args->cmd_stream || !args->cmd_stream_len)
+ return ERR_PTR(-EINVAL);
+
+ if (args->type != DRM_PVR_JOB_TYPE_GEOMETRY &&
+ args->type != DRM_PVR_JOB_TYPE_FRAGMENT &&
+ (args->hwrt.set_handle || args->hwrt.data_index))
+ return ERR_PTR(-EINVAL);
+
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&job->ref_count);
+ job->type = args->type;
+ job->pvr_dev = pvr_dev;
+
+ err = xa_alloc(&pvr_dev->job_ids, &job->id, job, xa_limit_32b, GFP_KERNEL);
+ if (err)
+ goto err_put_job;
+
+ job->ctx = pvr_context_lookup(pvr_file, args->context_handle);
+ if (!job->ctx) {
+ err = -EINVAL;
+ goto err_put_job;
+ }
+
+ if (args->hwrt.set_handle) {
+ job->hwrt = pvr_hwrt_data_lookup(pvr_file, args->hwrt.set_handle,
+ args->hwrt.data_index);
+ if (!job->hwrt) {
+ err = -EINVAL;
+ goto err_put_job;
+ }
+ }
+
+ err = pvr_job_fw_cmd_init(job, args);
+ if (err)
+ goto err_put_job;
+
+ err = pvr_queue_job_init(job);
+ if (err)
+ goto err_put_job;
+
+ return job;
+
+err_put_job:
+ pvr_job_put(job);
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_job_data_fini() - Cleanup all allocs used to set up job submission.
+ * @job_data: Job data array.
+ * @job_count: Number of members of @job_data.
+ */
+static void
+pvr_job_data_fini(struct pvr_job_data *job_data, u32 job_count)
+{
+ for (u32 i = 0; i < job_count; i++) {
+ pvr_job_put(job_data[i].job);
+ kvfree(job_data[i].sync_ops);
+ }
+}
+
+/**
+ * pvr_job_data_init() - Init an array of created jobs, associating them with
+ * the appropriate sync_ops args, which will be copied in.
+ * @pvr_dev: Target PowerVR device.
+ * @pvr_file: Pointer to PowerVR file structure.
+ * @job_args: Job args array copied from user.
+ * @job_count: Number of members of @job_args.
+ * @job_data_out: Job data array.
+ */
+static int pvr_job_data_init(struct pvr_device *pvr_dev,
+ struct pvr_file *pvr_file,
+ struct drm_pvr_job *job_args,
+ u32 *job_count,
+ struct pvr_job_data *job_data_out)
+{
+ int err = 0, i = 0;
+
+ for (; i < *job_count; i++) {
+ job_data_out[i].job =
+ create_job(pvr_dev, pvr_file, &job_args[i]);
+ err = PTR_ERR_OR_ZERO(job_data_out[i].job);
+
+ if (err) {
+ *job_count = i;
+ job_data_out[i].job = NULL;
+ goto err_cleanup;
+ }
+
+ err = PVR_UOBJ_GET_ARRAY(job_data_out[i].sync_ops,
+ &job_args[i].sync_ops);
+ if (err) {
+ *job_count = i;
+
+ /* Ensure the job created above is also cleaned up. */
+ i++;
+ goto err_cleanup;
+ }
+
+ job_data_out[i].sync_op_count = job_args[i].sync_ops.count;
+ }
+
+ return 0;
+
+err_cleanup:
+ pvr_job_data_fini(job_data_out, i);
+
+ return err;
+}
+
+static void
+push_jobs(struct pvr_job_data *job_data, u32 job_count)
+{
+ for (u32 i = 0; i < job_count; i++)
+ pvr_queue_job_push(job_data[i].job);
+}
+
+static int
+prepare_fw_obj_resv(struct drm_exec *exec, struct pvr_fw_object *fw_obj)
+{
+ return drm_exec_prepare_obj(exec, gem_from_pvr_gem(fw_obj->gem), 1);
+}
+
+static int
+jobs_lock_all_objs(struct drm_exec *exec, struct pvr_job_data *job_data,
+ u32 job_count)
+{
+ for (u32 i = 0; i < job_count; i++) {
+ struct pvr_job *job = job_data[i].job;
+
+ /* Grab a lock on a the context, to guard against
+ * concurrent submission to the same queue.
+ */
+ int err = drm_exec_lock_obj(exec,
+ gem_from_pvr_gem(job->ctx->fw_obj->gem));
+
+ if (err)
+ return err;
+
+ if (job->hwrt) {
+ err = prepare_fw_obj_resv(exec,
+ job->hwrt->fw_obj);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+prepare_job_resvs_for_each(struct drm_exec *exec, struct pvr_job_data *job_data,
+ u32 job_count)
+{
+ drm_exec_until_all_locked(exec) {
+ int err = jobs_lock_all_objs(exec, job_data, job_count);
+
+ drm_exec_retry_on_contention(exec);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+update_job_resvs(struct pvr_job *job)
+{
+ if (job->hwrt) {
+ enum dma_resv_usage usage = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ;
+ struct drm_gem_object *obj = gem_from_pvr_gem(job->hwrt->fw_obj->gem);
+
+ dma_resv_add_fence(obj->resv, &job->base.s_fence->finished, usage);
+ }
+}
+
+static void
+update_job_resvs_for_each(struct pvr_job_data *job_data, u32 job_count)
+{
+ for (u32 i = 0; i < job_count; i++)
+ update_job_resvs(job_data[i].job);
+}
+
+static bool can_combine_jobs(struct pvr_job *a, struct pvr_job *b)
+{
+ struct pvr_job *geom_job = a, *frag_job = b;
+ struct dma_fence *fence;
+ unsigned long index;
+
+ /* Geometry and fragment jobs can be combined if they are queued to the
+ * same context and targeting the same HWRT.
+ */
+ if (a->type != DRM_PVR_JOB_TYPE_GEOMETRY ||
+ b->type != DRM_PVR_JOB_TYPE_FRAGMENT ||
+ a->ctx != b->ctx ||
+ a->hwrt != b->hwrt)
+ return false;
+
+ xa_for_each(&frag_job->base.dependencies, index, fence) {
+ /* We combine when we see an explicit geom -> frag dep. */
+ if (&geom_job->base.s_fence->scheduled == fence)
+ return true;
+ }
+
+ return false;
+}
+
+static struct dma_fence *
+get_last_queued_job_scheduled_fence(struct pvr_queue *queue,
+ struct pvr_job_data *job_data,
+ u32 cur_job_pos)
+{
+ /* We iterate over the current job array in reverse order to grab the
+ * last to-be-queued job targeting the same queue.
+ */
+ for (u32 i = cur_job_pos; i > 0; i--) {
+ struct pvr_job *job = job_data[i - 1].job;
+
+ if (job->ctx == queue->ctx && job->type == queue->type)
+ return dma_fence_get(&job->base.s_fence->scheduled);
+ }
+
+ /* If we didn't find any, we just return the last queued job scheduled
+ * fence attached to the queue.
+ */
+ return dma_fence_get(queue->last_queued_job_scheduled_fence);
+}
+
+static int
+pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
+{
+ for (u32 i = 0; i < *job_count - 1; i++) {
+ struct pvr_job *geom_job = job_data[i].job;
+ struct pvr_job *frag_job = job_data[i + 1].job;
+ struct pvr_queue *frag_queue;
+ struct dma_fence *f;
+
+ if (!can_combine_jobs(job_data[i].job, job_data[i + 1].job))
+ continue;
+
+ /* The fragment job will be submitted by the geometry queue. We
+ * need to make sure it comes after all the other fragment jobs
+ * queued before it.
+ */
+ frag_queue = pvr_context_get_queue_for_job(frag_job->ctx,
+ frag_job->type);
+ f = get_last_queued_job_scheduled_fence(frag_queue, job_data,
+ i);
+ if (f) {
+ int err = drm_sched_job_add_dependency(&geom_job->base,
+ f);
+ if (err) {
+ *job_count = i;
+ return err;
+ }
+ }
+
+ /* The KCCB slot will be reserved by the geometry job, so we can
+ * drop the KCCB fence on the fragment job.
+ */
+ pvr_kccb_fence_put(frag_job->kccb_fence);
+ frag_job->kccb_fence = NULL;
+
+ geom_job->paired_job = frag_job;
+ frag_job->paired_job = geom_job;
+
+ /* Skip the fragment job we just paired to the geometry job. */
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * pvr_submit_jobs() - Submit jobs to the GPU
+ * @pvr_dev: Target PowerVR device.
+ * @pvr_file: Pointer to PowerVR file structure.
+ * @args: Ioctl args.
+ *
+ * This initial implementation is entirely synchronous; on return the GPU will
+ * be idle. This will not be the case for future implementations.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%EFAULT if arguments can not be copied from user space, or
+ * * -%EINVAL on invalid arguments, or
+ * * Any other error.
+ */
+int
+pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_submit_jobs_args *args)
+{
+ struct pvr_job_data *job_data = NULL;
+ struct drm_pvr_job *job_args;
+ struct xarray signal_array;
+ u32 jobs_alloced = 0;
+ struct drm_exec exec;
+ int err;
+
+ if (!args->jobs.count)
+ return -EINVAL;
+
+ err = PVR_UOBJ_GET_ARRAY(job_args, &args->jobs);
+ if (err)
+ return err;
+
+ job_data = kvmalloc_array(args->jobs.count, sizeof(*job_data),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job_data) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = pvr_job_data_init(pvr_dev, pvr_file, job_args, &args->jobs.count,
+ job_data);
+ if (err)
+ goto out_free;
+
+ jobs_alloced = args->jobs.count;
+
+ /*
+ * Flush MMU if needed - this has been deferred until now to avoid
+ * overuse of this expensive operation.
+ */
+ err = pvr_mmu_flush_exec(pvr_dev, false);
+ if (err)
+ goto out_job_data_cleanup;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES);
+
+ xa_init_flags(&signal_array, XA_FLAGS_ALLOC);
+
+ err = prepare_job_syncs_for_each(pvr_file, job_data, &args->jobs.count,
+ &signal_array);
+ if (err)
+ goto out_exec_fini;
+
+ err = prepare_job_resvs_for_each(&exec, job_data, args->jobs.count);
+ if (err)
+ goto out_exec_fini;
+
+ err = pvr_jobs_link_geom_frag(job_data, &args->jobs.count);
+ if (err)
+ goto out_exec_fini;
+
+ /* Anything after that point must succeed because we start exposing job
+ * finished fences to the outside world.
+ */
+ update_job_resvs_for_each(job_data, args->jobs.count);
+ push_jobs(job_data, args->jobs.count);
+ pvr_sync_signal_array_push_fences(&signal_array);
+ err = 0;
+
+out_exec_fini:
+ drm_exec_fini(&exec);
+ pvr_sync_signal_array_cleanup(&signal_array);
+
+out_job_data_cleanup:
+ pvr_job_data_fini(job_data, jobs_alloced);
+
+out_free:
+ kvfree(job_data);
+ kvfree(job_args);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_job.h b/drivers/gpu/drm/imagination/pvr_job.h
new file mode 100644
index 000000000000..0ca003c5c475
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_job.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_JOB_H
+#define PVR_JOB_H
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <linux/kref.h>
+#include <linux/types.h>
+
+#include <drm/drm_gem.h>
+#include <drm/gpu_scheduler.h>
+
+#include "pvr_power.h"
+
+/* Forward declaration from "pvr_context.h". */
+struct pvr_context;
+
+/* Forward declarations from "pvr_device.h". */
+struct pvr_device;
+struct pvr_file;
+
+/* Forward declarations from "pvr_hwrt.h". */
+struct pvr_hwrt_data;
+
+/* Forward declaration from "pvr_queue.h". */
+struct pvr_queue;
+
+struct pvr_job {
+ /** @base: drm_sched_job object. */
+ struct drm_sched_job base;
+
+ /** @ref_count: Refcount for job. */
+ struct kref ref_count;
+
+ /** @type: Type of job. */
+ enum drm_pvr_job_type type;
+
+ /** @id: Job ID number. */
+ u32 id;
+
+ /**
+ * @paired_job: Job paired to this job.
+ *
+ * This field is only meaningful for geometry and fragment jobs.
+ *
+ * Paired jobs are executed on the same context, and need to be submitted
+ * atomically to the FW, to make sure the partial render logic has a
+ * fragment job to execute when the Parameter Manager runs out of memory.
+ *
+ * The geometry job should point to the fragment job it's paired with,
+ * and the fragment job should point to the geometry job it's paired with.
+ */
+ struct pvr_job *paired_job;
+
+ /** @cccb_fence: Fence used to wait for CCCB space. */
+ struct dma_fence *cccb_fence;
+
+ /** @kccb_fence: Fence used to wait for KCCB space. */
+ struct dma_fence *kccb_fence;
+
+ /** @done_fence: Fence to signal when the job is done. */
+ struct dma_fence *done_fence;
+
+ /** @pvr_dev: Device pointer. */
+ struct pvr_device *pvr_dev;
+
+ /** @ctx: Pointer to owning context. */
+ struct pvr_context *ctx;
+
+ /** @cmd: Command data. Format depends on @type. */
+ void *cmd;
+
+ /** @cmd_len: Length of command data, in bytes. */
+ u32 cmd_len;
+
+ /**
+ * @fw_ccb_cmd_type: Firmware CCB command type. Must be one of %ROGUE_FWIF_CCB_CMD_TYPE_*.
+ */
+ u32 fw_ccb_cmd_type;
+
+ /** @hwrt: HWRT object. Will be NULL for compute and transfer jobs. */
+ struct pvr_hwrt_data *hwrt;
+
+ /**
+ * @has_pm_ref: True if the job has a power ref, thus forcing the GPU to stay on until
+ * the job is done.
+ */
+ bool has_pm_ref;
+};
+
+/**
+ * pvr_job_get() - Take additional reference on job.
+ * @job: Job pointer.
+ *
+ * Call pvr_job_put() to release.
+ *
+ * Returns:
+ * * The requested job on success, or
+ * * %NULL if no job pointer passed.
+ */
+static __always_inline struct pvr_job *
+pvr_job_get(struct pvr_job *job)
+{
+ if (job)
+ kref_get(&job->ref_count);
+
+ return job;
+}
+
+void pvr_job_put(struct pvr_job *job);
+
+/**
+ * pvr_job_release_pm_ref() - Release the PM ref if the job acquired it.
+ * @job: The job to release the PM ref on.
+ */
+static __always_inline void
+pvr_job_release_pm_ref(struct pvr_job *job)
+{
+ if (job->has_pm_ref) {
+ pvr_power_put(job->pvr_dev);
+ job->has_pm_ref = false;
+ }
+}
+
+/**
+ * pvr_job_get_pm_ref() - Get a PM ref and attach it to the job.
+ * @job: The job to attach the PM ref to.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_power_get() otherwise.
+ */
+static __always_inline int
+pvr_job_get_pm_ref(struct pvr_job *job)
+{
+ int err;
+
+ if (job->has_pm_ref)
+ return 0;
+
+ err = pvr_power_get(job->pvr_dev);
+ if (!err)
+ job->has_pm_ref = true;
+
+ return err;
+}
+
+int pvr_job_wait_first_non_signaled_native_dep(struct pvr_job *job);
+
+bool pvr_job_non_native_deps_done(struct pvr_job *job);
+
+int pvr_job_fits_in_cccb(struct pvr_job *job, unsigned long native_dep_count);
+
+void pvr_job_submit(struct pvr_job *job);
+
+int pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_submit_jobs_args *args);
+
+#endif /* PVR_JOB_H */
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c
new file mode 100644
index 000000000000..4fe70610ed94
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_mmu.c
@@ -0,0 +1,2640 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_mmu.h"
+
+#include "pvr_ccb.h"
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_gem.h"
+#include "pvr_power.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_mmu_defs.h"
+
+#include <drm/drm_drv.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/kmemleak.h>
+#include <linux/minmax.h>
+#include <linux/sizes.h>
+
+#define PVR_SHIFT_FROM_SIZE(size_) (__builtin_ctzll(size_))
+#define PVR_MASK_FROM_SIZE(size_) (~((size_) - U64_C(1)))
+
+/*
+ * The value of the device page size (%PVR_DEVICE_PAGE_SIZE) is currently
+ * pegged to the host page size (%PAGE_SIZE). This chunk of macro goodness both
+ * ensures that the selected host page size corresponds to a valid device page
+ * size and sets up values needed by the MMU code below.
+ */
+#if (PVR_DEVICE_PAGE_SIZE == SZ_4K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_4KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_4KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_4KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_16K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_16KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_16KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_16KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_64K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_64KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_64KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_64KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_256K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_256KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_256KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_256KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_1M)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_1MB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_1MB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_1MB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_2M)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_2MB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_2MB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_2MB_RANGE_CLRMSK
+#else
+# error Unsupported device page size PVR_DEVICE_PAGE_SIZE
+#endif
+
+#define ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X \
+ (ROGUE_MMUCTRL_ENTRIES_PT_VALUE >> \
+ (PVR_DEVICE_PAGE_SHIFT - PVR_SHIFT_FROM_SIZE(SZ_4K)))
+
+enum pvr_mmu_sync_level {
+ PVR_MMU_SYNC_LEVEL_NONE = -1,
+ PVR_MMU_SYNC_LEVEL_0 = 0,
+ PVR_MMU_SYNC_LEVEL_1 = 1,
+ PVR_MMU_SYNC_LEVEL_2 = 2,
+};
+
+#define PVR_MMU_SYNC_LEVEL_0_FLAGS (ROGUE_FWIF_MMUCACHEDATA_FLAGS_PT | \
+ ROGUE_FWIF_MMUCACHEDATA_FLAGS_INTERRUPT | \
+ ROGUE_FWIF_MMUCACHEDATA_FLAGS_TLB)
+#define PVR_MMU_SYNC_LEVEL_1_FLAGS (PVR_MMU_SYNC_LEVEL_0_FLAGS | ROGUE_FWIF_MMUCACHEDATA_FLAGS_PD)
+#define PVR_MMU_SYNC_LEVEL_2_FLAGS (PVR_MMU_SYNC_LEVEL_1_FLAGS | ROGUE_FWIF_MMUCACHEDATA_FLAGS_PC)
+
+/**
+ * pvr_mmu_set_flush_flags() - Set MMU cache flush flags for next call to
+ * pvr_mmu_flush_exec().
+ * @pvr_dev: Target PowerVR device.
+ * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS.
+ *
+ * This function must be called following any possible change to the MMU page
+ * tables.
+ */
+static void pvr_mmu_set_flush_flags(struct pvr_device *pvr_dev, u32 flags)
+{
+ atomic_fetch_or(flags, &pvr_dev->mmu_flush_cache_flags);
+}
+
+/**
+ * pvr_mmu_flush_request_all() - Request flush of all MMU caches when
+ * subsequently calling pvr_mmu_flush_exec().
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function must be called following any possible change to the MMU page
+ * tables.
+ */
+void pvr_mmu_flush_request_all(struct pvr_device *pvr_dev)
+{
+ pvr_mmu_set_flush_flags(pvr_dev, PVR_MMU_SYNC_LEVEL_2_FLAGS);
+}
+
+/**
+ * pvr_mmu_flush_exec() - Execute a flush of all MMU caches previously
+ * requested.
+ * @pvr_dev: Target PowerVR device.
+ * @wait: Do not return until the flush is completed.
+ *
+ * This function must be called prior to submitting any new GPU job. The flush
+ * will complete before the jobs are scheduled, so this can be called once after
+ * a series of maps. However, a single unmap should always be immediately
+ * followed by a flush and it should be explicitly waited by setting @wait.
+ *
+ * As a failure to flush the MMU caches could risk memory corruption, if the
+ * flush fails (implying the firmware is not responding) then the GPU device is
+ * marked as lost.
+ *
+ * Returns:
+ * * 0 on success when @wait is true, or
+ * * -%EIO if the device is unavailable, or
+ * * Any error encountered while submitting the flush command via the KCCB.
+ */
+int pvr_mmu_flush_exec(struct pvr_device *pvr_dev, bool wait)
+{
+ struct rogue_fwif_kccb_cmd cmd_mmu_cache = {};
+ struct rogue_fwif_mmucachedata *cmd_mmu_cache_data =
+ &cmd_mmu_cache.cmd_data.mmu_cache_data;
+ int err = 0;
+ u32 slot;
+ int idx;
+
+ if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx))
+ return -EIO;
+
+ /* Can't flush MMU if the firmware hasn't booted yet. */
+ if (!pvr_dev->fw_dev.booted)
+ goto err_drm_dev_exit;
+
+ cmd_mmu_cache_data->cache_flags =
+ atomic_xchg(&pvr_dev->mmu_flush_cache_flags, 0);
+
+ if (!cmd_mmu_cache_data->cache_flags)
+ goto err_drm_dev_exit;
+
+ cmd_mmu_cache.cmd_type = ROGUE_FWIF_KCCB_CMD_MMUCACHE;
+
+ pvr_fw_object_get_fw_addr(pvr_dev->fw_dev.mem.mmucache_sync_obj,
+ &cmd_mmu_cache_data->mmu_cache_sync_fw_addr);
+ cmd_mmu_cache_data->mmu_cache_sync_update_value = 0;
+
+ err = pvr_kccb_send_cmd(pvr_dev, &cmd_mmu_cache, &slot);
+ if (err)
+ goto err_reset_and_retry;
+
+ err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL);
+ if (err)
+ goto err_reset_and_retry;
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_reset_and_retry:
+ /*
+ * Flush command failure is most likely the result of a firmware lockup. Hard
+ * reset the GPU and retry.
+ */
+ err = pvr_power_reset(pvr_dev, true);
+ if (err)
+ goto err_drm_dev_exit; /* Device is lost. */
+
+ /* Retry sending flush request. */
+ err = pvr_kccb_send_cmd(pvr_dev, &cmd_mmu_cache, &slot);
+ if (err) {
+ pvr_device_lost(pvr_dev);
+ goto err_drm_dev_exit;
+ }
+
+ if (wait) {
+ err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL);
+ if (err)
+ pvr_device_lost(pvr_dev);
+ }
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * DOC: PowerVR Virtual Memory Handling
+ */
+/**
+ * DOC: PowerVR Virtual Memory Handling (constants)
+ *
+ * .. c:macro:: PVR_IDX_INVALID
+ *
+ * Default value for a u16-based index.
+ *
+ * This value cannot be zero, since zero is a valid index value.
+ */
+#define PVR_IDX_INVALID ((u16)(-1))
+
+/**
+ * DOC: MMU backing pages
+ */
+/**
+ * DOC: MMU backing pages (constants)
+ *
+ * .. c:macro:: PVR_MMU_BACKING_PAGE_SIZE
+ *
+ * Page size of a PowerVR device's integrated MMU. The CPU page size must be
+ * at least as large as this value for the current implementation; this is
+ * checked at compile-time.
+ */
+#define PVR_MMU_BACKING_PAGE_SIZE SZ_4K
+static_assert(PAGE_SIZE >= PVR_MMU_BACKING_PAGE_SIZE);
+
+/**
+ * struct pvr_mmu_backing_page - Represents a single page used to back a page
+ * table of any level.
+ * @dma_addr: DMA address of this page.
+ * @host_ptr: CPU address of this page.
+ * @pvr_dev: The PowerVR device to which this page is associated. **For
+ * internal use only.**
+ */
+struct pvr_mmu_backing_page {
+ dma_addr_t dma_addr;
+ void *host_ptr;
+/* private: internal use only */
+ struct page *raw_page;
+ struct pvr_device *pvr_dev;
+};
+
+/**
+ * pvr_mmu_backing_page_init() - Initialize a MMU backing page.
+ * @page: Target backing page.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function performs three distinct operations:
+ *
+ * 1. Allocate a single page,
+ * 2. Map the page to the CPU, and
+ * 3. Map the page to DMA-space.
+ *
+ * It is expected that @page be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%ENOMEM if allocation of the backing page or mapping of the backing
+ * page to DMA fails.
+ */
+static int
+pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
+ struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+
+ struct page *raw_page;
+ int err;
+
+ dma_addr_t dma_addr;
+ void *host_ptr;
+
+ raw_page = alloc_page(__GFP_ZERO | GFP_KERNEL);
+ if (!raw_page)
+ return -ENOMEM;
+
+ host_ptr = vmap(&raw_page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (!host_ptr) {
+ err = -ENOMEM;
+ goto err_free_page;
+ }
+
+ dma_addr = dma_map_page(dev, raw_page, 0, PVR_MMU_BACKING_PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ err = -ENOMEM;
+ goto err_unmap_page;
+ }
+
+ page->dma_addr = dma_addr;
+ page->host_ptr = host_ptr;
+ page->pvr_dev = pvr_dev;
+ page->raw_page = raw_page;
+ kmemleak_alloc(page->host_ptr, PAGE_SIZE, 1, GFP_KERNEL);
+
+ return 0;
+
+err_unmap_page:
+ vunmap(host_ptr);
+
+err_free_page:
+ __free_page(raw_page);
+
+ return err;
+}
+
+/**
+ * pvr_mmu_backing_page_fini() - Teardown a MMU backing page.
+ * @page: Target backing page.
+ *
+ * This function performs the mirror operations to pvr_mmu_backing_page_init(),
+ * in reverse order:
+ *
+ * 1. Unmap the page from DMA-space,
+ * 2. Unmap the page from the CPU, and
+ * 3. Free the page.
+ *
+ * It also zeros @page.
+ *
+ * It is a no-op to call this function a second (or further) time on any @page.
+ */
+static void
+pvr_mmu_backing_page_fini(struct pvr_mmu_backing_page *page)
+{
+ struct device *dev;
+
+ /* Do nothing if no allocation is present. */
+ if (!page->pvr_dev)
+ return;
+
+ dev = from_pvr_device(page->pvr_dev)->dev;
+
+ dma_unmap_page(dev, page->dma_addr, PVR_MMU_BACKING_PAGE_SIZE,
+ DMA_TO_DEVICE);
+
+ kmemleak_free(page->host_ptr);
+ vunmap(page->host_ptr);
+
+ __free_page(page->raw_page);
+
+ memset(page, 0, sizeof(*page));
+}
+
+/**
+ * pvr_mmu_backing_page_sync() - Flush a MMU backing page from the CPU to the
+ * device.
+ * @page: Target backing page.
+ * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS.
+ *
+ * .. caution::
+ *
+ * **This is potentially an expensive function call.** Only call
+ * pvr_mmu_backing_page_sync() once you're sure you have no more changes to
+ * make to the backing page in the immediate future.
+ */
+static void
+pvr_mmu_backing_page_sync(struct pvr_mmu_backing_page *page, u32 flags)
+{
+ struct pvr_device *pvr_dev = page->pvr_dev;
+ struct device *dev;
+
+ /*
+ * Do nothing if no allocation is present. This may be the case if
+ * we are unmapping pages.
+ */
+ if (!pvr_dev)
+ return;
+
+ dev = from_pvr_device(pvr_dev)->dev;
+
+ dma_sync_single_for_device(dev, page->dma_addr,
+ PVR_MMU_BACKING_PAGE_SIZE, DMA_TO_DEVICE);
+
+ pvr_mmu_set_flush_flags(pvr_dev, flags);
+}
+
+/**
+ * DOC: Raw page tables
+ */
+
+#define PVR_PAGE_TABLE_TYPEOF_ENTRY(level_) \
+ typeof_member(struct pvr_page_table_l##level_##_entry_raw, val)
+
+#define PVR_PAGE_TABLE_FIELD_GET(level_, name_, field_, entry_) \
+ (((entry_).val & \
+ ~ROGUE_MMUCTRL_##name_##_DATA_##field_##_CLRMSK) >> \
+ ROGUE_MMUCTRL_##name_##_DATA_##field_##_SHIFT)
+
+#define PVR_PAGE_TABLE_FIELD_PREP(level_, name_, field_, val_) \
+ ((((PVR_PAGE_TABLE_TYPEOF_ENTRY(level_))(val_)) \
+ << ROGUE_MMUCTRL_##name_##_DATA_##field_##_SHIFT) & \
+ ~ROGUE_MMUCTRL_##name_##_DATA_##field_##_CLRMSK)
+
+/**
+ * struct pvr_page_table_l2_entry_raw - A single entry in a level 2 page table.
+ * @val: The raw value of this entry.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE.
+ *
+ * The value stored in this structure can be decoded using the following bitmap:
+ *
+ * .. flat-table::
+ * :widths: 1 5
+ * :stub-columns: 1
+ *
+ * * - 31..4
+ * - **Level 1 Page Table Base Address:** Bits 39..12 of the L1
+ * page table base address, which is 4KiB aligned.
+ *
+ * * - 3..2
+ * - *(reserved)*
+ *
+ * * - 1
+ * - **Pending:** When valid bit is not set, indicates that a valid
+ * entry is pending and the MMU should wait for the driver to map
+ * the entry. This is used to support page demand mapping of
+ * memory.
+ *
+ * * - 0
+ * - **Valid:** Indicates that the entry contains a valid L1 page
+ * table. If the valid bit is not set, then an attempted use of
+ * the page would result in a page fault.
+ */
+struct pvr_page_table_l2_entry_raw {
+ u32 val;
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l2_entry_raw) * 8 ==
+ ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE);
+
+static bool
+pvr_page_table_l2_entry_raw_is_valid(struct pvr_page_table_l2_entry_raw entry)
+{
+ return PVR_PAGE_TABLE_FIELD_GET(2, PC, VALID, entry);
+}
+
+/**
+ * pvr_page_table_l2_entry_raw_set() - Write a valid entry into a raw level 2
+ * page table.
+ * @entry: Target raw level 2 page table entry.
+ * @child_table_dma_addr: DMA address of the level 1 page table to be
+ * associated with @entry.
+ *
+ * When calling this function, @child_table_dma_addr must be a valid DMA
+ * address and a multiple of %ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE.
+ */
+static void
+pvr_page_table_l2_entry_raw_set(struct pvr_page_table_l2_entry_raw *entry,
+ dma_addr_t child_table_dma_addr)
+{
+ child_table_dma_addr >>= ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+
+ WRITE_ONCE(entry->val,
+ PVR_PAGE_TABLE_FIELD_PREP(2, PC, VALID, true) |
+ PVR_PAGE_TABLE_FIELD_PREP(2, PC, ENTRY_PENDING, false) |
+ PVR_PAGE_TABLE_FIELD_PREP(2, PC, PD_BASE, child_table_dma_addr));
+}
+
+static void
+pvr_page_table_l2_entry_raw_clear(struct pvr_page_table_l2_entry_raw *entry)
+{
+ WRITE_ONCE(entry->val, 0);
+}
+
+/**
+ * struct pvr_page_table_l1_entry_raw - A single entry in a level 1 page table.
+ * @val: The raw value of this entry.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE.
+ *
+ * The value stored in this structure can be decoded using the following bitmap:
+ *
+ * .. flat-table::
+ * :widths: 1 5
+ * :stub-columns: 1
+ *
+ * * - 63..41
+ * - *(reserved)*
+ *
+ * * - 40
+ * - **Pending:** When valid bit is not set, indicates that a valid entry
+ * is pending and the MMU should wait for the driver to map the entry.
+ * This is used to support page demand mapping of memory.
+ *
+ * * - 39..5
+ * - **Level 0 Page Table Base Address:** The way this value is
+ * interpreted depends on the page size. Bits not specified in the
+ * table below (e.g. bits 11..5 for page size 4KiB) should be
+ * considered reserved.
+ *
+ * This table shows the bits used in an L1 page table entry to
+ * represent the Physical Table Base Address for a given Page Size.
+ * Since each L1 page table entry covers 2MiB of address space, the
+ * maximum page size is 2MiB.
+ *
+ * .. flat-table::
+ * :widths: 1 1 1 1
+ * :header-rows: 1
+ * :stub-columns: 1
+ *
+ * * - Page size
+ * - L0 page table base address bits
+ * - Number of L0 page table entries
+ * - Size of L0 page table
+ *
+ * * - 4KiB
+ * - 39..12
+ * - 512
+ * - 4KiB
+ *
+ * * - 16KiB
+ * - 39..10
+ * - 128
+ * - 1KiB
+ *
+ * * - 64KiB
+ * - 39..8
+ * - 32
+ * - 256B
+ *
+ * * - 256KiB
+ * - 39..6
+ * - 8
+ * - 64B
+ *
+ * * - 1MiB
+ * - 39..5 (4 = '0')
+ * - 2
+ * - 16B
+ *
+ * * - 2MiB
+ * - 39..5 (4..3 = '00')
+ * - 1
+ * - 8B
+ *
+ * * - 4
+ * - *(reserved)*
+ *
+ * * - 3..1
+ * - **Page Size:** Sets the page size, from 4KiB to 2MiB.
+ *
+ * * - 0
+ * - **Valid:** Indicates that the entry contains a valid L0 page table.
+ * If the valid bit is not set, then an attempted use of the page would
+ * result in a page fault.
+ */
+struct pvr_page_table_l1_entry_raw {
+ u64 val;
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l1_entry_raw) * 8 ==
+ ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE);
+
+static bool
+pvr_page_table_l1_entry_raw_is_valid(struct pvr_page_table_l1_entry_raw entry)
+{
+ return PVR_PAGE_TABLE_FIELD_GET(1, PD, VALID, entry);
+}
+
+/**
+ * pvr_page_table_l1_entry_raw_set() - Write a valid entry into a raw level 1
+ * page table.
+ * @entry: Target raw level 1 page table entry.
+ * @child_table_dma_addr: DMA address of the level 0 page table to be
+ * associated with @entry.
+ *
+ * When calling this function, @child_table_dma_addr must be a valid DMA
+ * address and a multiple of 4 KiB.
+ */
+static void
+pvr_page_table_l1_entry_raw_set(struct pvr_page_table_l1_entry_raw *entry,
+ dma_addr_t child_table_dma_addr)
+{
+ WRITE_ONCE(entry->val,
+ PVR_PAGE_TABLE_FIELD_PREP(1, PD, VALID, true) |
+ PVR_PAGE_TABLE_FIELD_PREP(1, PD, ENTRY_PENDING, false) |
+ PVR_PAGE_TABLE_FIELD_PREP(1, PD, PAGE_SIZE, ROGUE_MMUCTRL_PAGE_SIZE_X) |
+ /*
+ * The use of a 4K-specific macro here is correct. It is
+ * a future optimization to allocate sub-host-page-sized
+ * blocks for individual tables, so the condition that any
+ * page table address is aligned to the size of the
+ * largest (a 4KB) table currently holds.
+ */
+ (child_table_dma_addr & ~ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK));
+}
+
+static void
+pvr_page_table_l1_entry_raw_clear(struct pvr_page_table_l1_entry_raw *entry)
+{
+ WRITE_ONCE(entry->val, 0);
+}
+
+/**
+ * struct pvr_page_table_l0_entry_raw - A single entry in a level 0 page table.
+ * @val: The raw value of this entry.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE.
+ *
+ * The value stored in this structure can be decoded using the following bitmap:
+ *
+ * .. flat-table::
+ * :widths: 1 5
+ * :stub-columns: 1
+ *
+ * * - 63
+ * - *(reserved)*
+ *
+ * * - 62
+ * - **PM/FW Protect:** Indicates a protected region which only the
+ * Parameter Manager (PM) or firmware processor can write to.
+ *
+ * * - 61..40
+ * - **VP Page (High):** Virtual-physical page used for Parameter Manager
+ * (PM) memory. This field is only used if the additional level of PB
+ * virtualization is enabled. The VP Page field is needed by the PM in
+ * order to correctly reconstitute the free lists after render
+ * completion. This (High) field holds bits 39..18 of the value; the
+ * Low field holds bits 17..12. Bits 11..0 are always zero because the
+ * value is always aligned to the 4KiB page size.
+ *
+ * * - 39..12
+ * - **Physical Page Address:** The way this value is interpreted depends
+ * on the page size. Bits not specified in the table below (e.g. bits
+ * 20..12 for page size 2MiB) should be considered reserved.
+ *
+ * This table shows the bits used in an L0 page table entry to represent
+ * the Physical Page Address for a given page size (as defined in the
+ * associated L1 page table entry).
+ *
+ * .. flat-table::
+ * :widths: 1 1
+ * :header-rows: 1
+ * :stub-columns: 1
+ *
+ * * - Page size
+ * - Physical address bits
+ *
+ * * - 4KiB
+ * - 39..12
+ *
+ * * - 16KiB
+ * - 39..14
+ *
+ * * - 64KiB
+ * - 39..16
+ *
+ * * - 256KiB
+ * - 39..18
+ *
+ * * - 1MiB
+ * - 39..20
+ *
+ * * - 2MiB
+ * - 39..21
+ *
+ * * - 11..6
+ * - **VP Page (Low):** Continuation of VP Page (High).
+ *
+ * * - 5
+ * - **Pending:** When valid bit is not set, indicates that a valid entry
+ * is pending and the MMU should wait for the driver to map the entry.
+ * This is used to support page demand mapping of memory.
+ *
+ * * - 4
+ * - **PM Src:** Set on Parameter Manager (PM) allocated page table
+ * entries when indicated by the PM. Note that this bit will only be set
+ * by the PM, not by the device driver.
+ *
+ * * - 3
+ * - **SLC Bypass Control:** Specifies requests to this page should bypass
+ * the System Level Cache (SLC), if enabled in SLC configuration.
+ *
+ * * - 2
+ * - **Cache Coherency:** Indicates that the page is coherent (i.e. it
+ * does not require a cache flush between operations on the CPU and the
+ * device).
+ *
+ * * - 1
+ * - **Read Only:** If set, this bit indicates that the page is read only.
+ * An attempted write to this page would result in a write-protection
+ * fault.
+ *
+ * * - 0
+ * - **Valid:** Indicates that the entry contains a valid page. If the
+ * valid bit is not set, then an attempted use of the page would result
+ * in a page fault.
+ */
+struct pvr_page_table_l0_entry_raw {
+ u64 val;
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l0_entry_raw) * 8 ==
+ ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE);
+
+/**
+ * struct pvr_page_flags_raw - The configurable flags from a single entry in a
+ * level 0 page table.
+ * @val: The raw value of these flags. Since these are a strict subset of
+ * &struct pvr_page_table_l0_entry_raw; use that type for our member here.
+ *
+ * The flags stored in this type are: PM/FW Protect; SLC Bypass Control; Cache
+ * Coherency, and Read Only (bits 62, 3, 2 and 1 respectively).
+ *
+ * This type should never be instantiated directly; instead use
+ * pvr_page_flags_raw_create() to ensure only valid bits of @val are set.
+ */
+struct pvr_page_flags_raw {
+ struct pvr_page_table_l0_entry_raw val;
+} __packed;
+static_assert(sizeof(struct pvr_page_flags_raw) ==
+ sizeof(struct pvr_page_table_l0_entry_raw));
+
+static bool
+pvr_page_table_l0_entry_raw_is_valid(struct pvr_page_table_l0_entry_raw entry)
+{
+ return PVR_PAGE_TABLE_FIELD_GET(0, PT, VALID, entry);
+}
+
+/**
+ * pvr_page_table_l0_entry_raw_set() - Write a valid entry into a raw level 0
+ * page table.
+ * @entry: Target raw level 0 page table entry.
+ * @dma_addr: DMA address of the physical page to be associated with @entry.
+ * @flags: Options to be set on @entry.
+ *
+ * When calling this function, @child_table_dma_addr must be a valid DMA
+ * address and a multiple of %PVR_DEVICE_PAGE_SIZE.
+ *
+ * The @flags parameter is directly assigned into @entry. It is the callers
+ * responsibility to ensure that only bits specified in
+ * &struct pvr_page_flags_raw are set in @flags.
+ */
+static void
+pvr_page_table_l0_entry_raw_set(struct pvr_page_table_l0_entry_raw *entry,
+ dma_addr_t dma_addr,
+ struct pvr_page_flags_raw flags)
+{
+ WRITE_ONCE(entry->val, PVR_PAGE_TABLE_FIELD_PREP(0, PT, VALID, true) |
+ PVR_PAGE_TABLE_FIELD_PREP(0, PT, ENTRY_PENDING, false) |
+ (dma_addr & ~ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK) |
+ flags.val.val);
+}
+
+static void
+pvr_page_table_l0_entry_raw_clear(struct pvr_page_table_l0_entry_raw *entry)
+{
+ WRITE_ONCE(entry->val, 0);
+}
+
+/**
+ * pvr_page_flags_raw_create() - Initialize the flag bits of a raw level 0 page
+ * table entry.
+ * @read_only: This page is read-only (see: Read Only).
+ * @cache_coherent: This page does not require cache flushes (see: Cache
+ * Coherency).
+ * @slc_bypass: This page bypasses the device cache (see: SLC Bypass Control).
+ * @pm_fw_protect: This page is only for use by the firmware or Parameter
+ * Manager (see PM/FW Protect).
+ *
+ * For more details on the use of these four options, see their respective
+ * entries in the table under &struct pvr_page_table_l0_entry_raw.
+ *
+ * Return:
+ * A new &struct pvr_page_flags_raw instance which can be passed directly to
+ * pvr_page_table_l0_entry_raw_set() or pvr_page_table_l0_insert().
+ */
+static struct pvr_page_flags_raw
+pvr_page_flags_raw_create(bool read_only, bool cache_coherent, bool slc_bypass,
+ bool pm_fw_protect)
+{
+ struct pvr_page_flags_raw flags;
+
+ flags.val.val =
+ PVR_PAGE_TABLE_FIELD_PREP(0, PT, READ_ONLY, read_only) |
+ PVR_PAGE_TABLE_FIELD_PREP(0, PT, CC, cache_coherent) |
+ PVR_PAGE_TABLE_FIELD_PREP(0, PT, SLC_BYPASS_CTRL, slc_bypass) |
+ PVR_PAGE_TABLE_FIELD_PREP(0, PT, PM_META_PROTECT, pm_fw_protect);
+
+ return flags;
+}
+
+/**
+ * struct pvr_page_table_l2_raw - The raw data of a level 2 page table.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %PVR_MMU_BACKING_PAGE_SIZE.
+ */
+struct pvr_page_table_l2_raw {
+ /** @entries: The raw values of this table. */
+ struct pvr_page_table_l2_entry_raw
+ entries[ROGUE_MMUCTRL_ENTRIES_PC_VALUE];
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l2_raw) == PVR_MMU_BACKING_PAGE_SIZE);
+
+/**
+ * struct pvr_page_table_l1_raw - The raw data of a level 1 page table.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %PVR_MMU_BACKING_PAGE_SIZE.
+ */
+struct pvr_page_table_l1_raw {
+ /** @entries: The raw values of this table. */
+ struct pvr_page_table_l1_entry_raw
+ entries[ROGUE_MMUCTRL_ENTRIES_PD_VALUE];
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l1_raw) == PVR_MMU_BACKING_PAGE_SIZE);
+
+/**
+ * struct pvr_page_table_l0_raw - The raw data of a level 0 page table.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %PVR_MMU_BACKING_PAGE_SIZE.
+ *
+ * .. caution::
+ *
+ * The size of level 0 page tables is variable depending on the page size
+ * specified in the associated level 1 page table entry. Since the device
+ * page size in use is pegged to the host page size, it cannot vary at
+ * runtime. This structure is therefore only defined to contain the required
+ * number of entries for the current device page size. **You should never
+ * read or write beyond the last supported entry.**
+ */
+struct pvr_page_table_l0_raw {
+ /** @entries: The raw values of this table. */
+ struct pvr_page_table_l0_entry_raw
+ entries[ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X];
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l0_raw) <= PVR_MMU_BACKING_PAGE_SIZE);
+
+/**
+ * DOC: Mirror page tables
+ */
+
+/*
+ * We pre-declare these types because they cross-depend on pointers to each
+ * other.
+ */
+struct pvr_page_table_l1;
+struct pvr_page_table_l0;
+
+/**
+ * struct pvr_page_table_l2 - A wrapped level 2 page table.
+ *
+ * To access the raw part of this table, use pvr_page_table_l2_get_raw().
+ * Alternatively to access a raw entry directly, use
+ * pvr_page_table_l2_get_entry_raw().
+ *
+ * A level 2 page table forms the root of the page table tree structure, so
+ * this type has no &parent or &parent_idx members.
+ */
+struct pvr_page_table_l2 {
+ /**
+ * @entries: The children of this node in the page table tree
+ * structure. These are also mirror tables. The indexing of this array
+ * is identical to that of the raw equivalent
+ * (&pvr_page_table_l1_raw.entries).
+ */
+ struct pvr_page_table_l1 *entries[ROGUE_MMUCTRL_ENTRIES_PC_VALUE];
+
+ /**
+ * @backing_page: A handle to the memory which holds the raw
+ * equivalent of this table. **For internal use only.**
+ */
+ struct pvr_mmu_backing_page backing_page;
+
+ /**
+ * @entry_count: The current number of valid entries (that we know of)
+ * in this table. This value is essentially a refcount - the table is
+ * destroyed when this value is decremented to zero by
+ * pvr_page_table_l2_remove().
+ */
+ u16 entry_count;
+};
+
+/**
+ * pvr_page_table_l2_init() - Initialize a level 2 page table.
+ * @table: Target level 2 page table.
+ * @pvr_dev: Target PowerVR device
+ *
+ * It is expected that @table be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while intializing &table->backing_page using
+ * pvr_mmu_backing_page_init().
+ */
+static int
+pvr_page_table_l2_init(struct pvr_page_table_l2 *table,
+ struct pvr_device *pvr_dev)
+{
+ return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev);
+}
+
+/**
+ * pvr_page_table_l2_fini() - Teardown a level 2 page table.
+ * @table: Target level 2 page table.
+ *
+ * It is an error to attempt to use @table after calling this function.
+ */
+static void
+pvr_page_table_l2_fini(struct pvr_page_table_l2 *table)
+{
+ pvr_mmu_backing_page_fini(&table->backing_page);
+}
+
+/**
+ * pvr_page_table_l2_sync() - Flush a level 2 page table from the CPU to the
+ * device.
+ * @table: Target level 2 page table.
+ *
+ * This is just a thin wrapper around pvr_mmu_backing_page_sync(), so the
+ * warning there applies here too: **Only call pvr_page_table_l2_sync() once
+ * you're sure you have no more changes to make to** @table **in the immediate
+ * future.**
+ *
+ * If child level 1 page tables of @table also need to be flushed, this should
+ * be done first using pvr_page_table_l1_sync() *before* calling this function.
+ */
+static void
+pvr_page_table_l2_sync(struct pvr_page_table_l2 *table)
+{
+ pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_2_FLAGS);
+}
+
+/**
+ * pvr_page_table_l2_get_raw() - Access the raw equivalent of a mirror level 2
+ * page table.
+ * @table: Target level 2 page table.
+ *
+ * Essentially returns the CPU address of the raw equivalent of @table, cast to
+ * a &struct pvr_page_table_l2_raw pointer.
+ *
+ * You probably want to call pvr_page_table_l2_get_entry_raw() instead.
+ *
+ * Return:
+ * The raw equivalent of @table.
+ */
+static struct pvr_page_table_l2_raw *
+pvr_page_table_l2_get_raw(struct pvr_page_table_l2 *table)
+{
+ return table->backing_page.host_ptr;
+}
+
+/**
+ * pvr_page_table_l2_get_entry_raw() - Access an entry from the raw equivalent
+ * of a mirror level 2 page table.
+ * @table: Target level 2 page table.
+ * @idx: Index of the entry to access.
+ *
+ * Technically this function returns a pointer to a slot in a raw level 2 page
+ * table, since the returned "entry" is not guaranteed to be valid. The caller
+ * must verify the validity of the entry at the returned address (perhaps using
+ * pvr_page_table_l2_entry_raw_is_valid()) before reading or overwriting it.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before dereferencing the
+ * returned pointer.
+ *
+ * Return:
+ * A pointer to the requested raw level 2 page table entry.
+ */
+static struct pvr_page_table_l2_entry_raw *
+pvr_page_table_l2_get_entry_raw(struct pvr_page_table_l2 *table, u16 idx)
+{
+ return &pvr_page_table_l2_get_raw(table)->entries[idx];
+}
+
+/**
+ * pvr_page_table_l2_entry_is_valid() - Check if a level 2 page table entry is
+ * marked as valid.
+ * @table: Target level 2 page table.
+ * @idx: Index of the entry to check.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before calling this
+ * function.
+ */
+static bool
+pvr_page_table_l2_entry_is_valid(struct pvr_page_table_l2 *table, u16 idx)
+{
+ struct pvr_page_table_l2_entry_raw entry_raw =
+ *pvr_page_table_l2_get_entry_raw(table, idx);
+
+ return pvr_page_table_l2_entry_raw_is_valid(entry_raw);
+}
+
+/**
+ * struct pvr_page_table_l1 - A wrapped level 1 page table.
+ *
+ * To access the raw part of this table, use pvr_page_table_l1_get_raw().
+ * Alternatively to access a raw entry directly, use
+ * pvr_page_table_l1_get_entry_raw().
+ */
+struct pvr_page_table_l1 {
+ /**
+ * @entries: The children of this node in the page table tree
+ * structure. These are also mirror tables. The indexing of this array
+ * is identical to that of the raw equivalent
+ * (&pvr_page_table_l0_raw.entries).
+ */
+ struct pvr_page_table_l0 *entries[ROGUE_MMUCTRL_ENTRIES_PD_VALUE];
+
+ /**
+ * @backing_page: A handle to the memory which holds the raw
+ * equivalent of this table. **For internal use only.**
+ */
+ struct pvr_mmu_backing_page backing_page;
+
+ union {
+ /**
+ * @parent: The parent of this node in the page table tree structure.
+ *
+ * This is also a mirror table.
+ *
+ * Only valid when the L1 page table is active. When the L1 page table
+ * has been removed and queued for destruction, the next_free field
+ * should be used instead.
+ */
+ struct pvr_page_table_l2 *parent;
+
+ /**
+ * @next_free: Pointer to the next L1 page table to take/free.
+ *
+ * Used to form a linked list of L1 page tables. This is used
+ * when preallocating tables and when the page table has been
+ * removed and queued for destruction.
+ */
+ struct pvr_page_table_l1 *next_free;
+ };
+
+ /**
+ * @parent_idx: The index of the entry in the parent table (see
+ * @parent) which corresponds to this table.
+ */
+ u16 parent_idx;
+
+ /**
+ * @entry_count: The current number of valid entries (that we know of)
+ * in this table. This value is essentially a refcount - the table is
+ * destroyed when this value is decremented to zero by
+ * pvr_page_table_l1_remove().
+ */
+ u16 entry_count;
+};
+
+/**
+ * pvr_page_table_l1_init() - Initialize a level 1 page table.
+ * @table: Target level 1 page table.
+ * @pvr_dev: Target PowerVR device
+ *
+ * When this function returns successfully, @table is still not considered
+ * valid. It must be inserted into the page table tree structure with
+ * pvr_page_table_l2_insert() before it is ready for use.
+ *
+ * It is expected that @table be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while intializing &table->backing_page using
+ * pvr_mmu_backing_page_init().
+ */
+static int
+pvr_page_table_l1_init(struct pvr_page_table_l1 *table,
+ struct pvr_device *pvr_dev)
+{
+ table->parent_idx = PVR_IDX_INVALID;
+
+ return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev);
+}
+
+/**
+ * pvr_page_table_l1_free() - Teardown a level 1 page table.
+ * @table: Target level 1 page table.
+ *
+ * It is an error to attempt to use @table after calling this function, even
+ * indirectly. This includes calling pvr_page_table_l2_remove(), which must
+ * be called *before* pvr_page_table_l1_free().
+ */
+static void
+pvr_page_table_l1_free(struct pvr_page_table_l1 *table)
+{
+ pvr_mmu_backing_page_fini(&table->backing_page);
+ kfree(table);
+}
+
+/**
+ * pvr_page_table_l1_sync() - Flush a level 1 page table from the CPU to the
+ * device.
+ * @table: Target level 1 page table.
+ *
+ * This is just a thin wrapper around pvr_mmu_backing_page_sync(), so the
+ * warning there applies here too: **Only call pvr_page_table_l1_sync() once
+ * you're sure you have no more changes to make to** @table **in the immediate
+ * future.**
+ *
+ * If child level 0 page tables of @table also need to be flushed, this should
+ * be done first using pvr_page_table_l0_sync() *before* calling this function.
+ */
+static void
+pvr_page_table_l1_sync(struct pvr_page_table_l1 *table)
+{
+ pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_1_FLAGS);
+}
+
+/**
+ * pvr_page_table_l1_get_raw() - Access the raw equivalent of a mirror level 1
+ * page table.
+ * @table: Target level 1 page table.
+ *
+ * Essentially returns the CPU address of the raw equivalent of @table, cast to
+ * a &struct pvr_page_table_l1_raw pointer.
+ *
+ * You probably want to call pvr_page_table_l1_get_entry_raw() instead.
+ *
+ * Return:
+ * The raw equivalent of @table.
+ */
+static struct pvr_page_table_l1_raw *
+pvr_page_table_l1_get_raw(struct pvr_page_table_l1 *table)
+{
+ return table->backing_page.host_ptr;
+}
+
+/**
+ * pvr_page_table_l1_get_entry_raw() - Access an entry from the raw equivalent
+ * of a mirror level 1 page table.
+ * @table: Target level 1 page table.
+ * @idx: Index of the entry to access.
+ *
+ * Technically this function returns a pointer to a slot in a raw level 1 page
+ * table, since the returned "entry" is not guaranteed to be valid. The caller
+ * must verify the validity of the entry at the returned address (perhaps using
+ * pvr_page_table_l1_entry_raw_is_valid()) before reading or overwriting it.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before dereferencing the
+ * returned pointer.
+ *
+ * Return:
+ * A pointer to the requested raw level 1 page table entry.
+ */
+static struct pvr_page_table_l1_entry_raw *
+pvr_page_table_l1_get_entry_raw(struct pvr_page_table_l1 *table, u16 idx)
+{
+ return &pvr_page_table_l1_get_raw(table)->entries[idx];
+}
+
+/**
+ * pvr_page_table_l1_entry_is_valid() - Check if a level 1 page table entry is
+ * marked as valid.
+ * @table: Target level 1 page table.
+ * @idx: Index of the entry to check.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before calling this
+ * function.
+ */
+static bool
+pvr_page_table_l1_entry_is_valid(struct pvr_page_table_l1 *table, u16 idx)
+{
+ struct pvr_page_table_l1_entry_raw entry_raw =
+ *pvr_page_table_l1_get_entry_raw(table, idx);
+
+ return pvr_page_table_l1_entry_raw_is_valid(entry_raw);
+}
+
+/**
+ * struct pvr_page_table_l0 - A wrapped level 0 page table.
+ *
+ * To access the raw part of this table, use pvr_page_table_l0_get_raw().
+ * Alternatively to access a raw entry directly, use
+ * pvr_page_table_l0_get_entry_raw().
+ *
+ * There is no mirror representation of an individual page, so this type has no
+ * &entries member.
+ */
+struct pvr_page_table_l0 {
+ /**
+ * @backing_page: A handle to the memory which holds the raw
+ * equivalent of this table. **For internal use only.**
+ */
+ struct pvr_mmu_backing_page backing_page;
+
+ union {
+ /**
+ * @parent: The parent of this node in the page table tree structure.
+ *
+ * This is also a mirror table.
+ *
+ * Only valid when the L0 page table is active. When the L0 page table
+ * has been removed and queued for destruction, the next_free field
+ * should be used instead.
+ */
+ struct pvr_page_table_l1 *parent;
+
+ /**
+ * @next_free: Pointer to the next L0 page table to take/free.
+ *
+ * Used to form a linked list of L0 page tables. This is used
+ * when preallocating tables and when the page table has been
+ * removed and queued for destruction.
+ */
+ struct pvr_page_table_l0 *next_free;
+ };
+
+ /**
+ * @parent_idx: The index of the entry in the parent table (see
+ * @parent) which corresponds to this table.
+ */
+ u16 parent_idx;
+
+ /**
+ * @entry_count: The current number of valid entries (that we know of)
+ * in this table. This value is essentially a refcount - the table is
+ * destroyed when this value is decremented to zero by
+ * pvr_page_table_l0_remove().
+ */
+ u16 entry_count;
+};
+
+/**
+ * pvr_page_table_l0_init() - Initialize a level 0 page table.
+ * @table: Target level 0 page table.
+ * @pvr_dev: Target PowerVR device
+ *
+ * When this function returns successfully, @table is still not considered
+ * valid. It must be inserted into the page table tree structure with
+ * pvr_page_table_l1_insert() before it is ready for use.
+ *
+ * It is expected that @table be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while intializing &table->backing_page using
+ * pvr_mmu_backing_page_init().
+ */
+static int
+pvr_page_table_l0_init(struct pvr_page_table_l0 *table,
+ struct pvr_device *pvr_dev)
+{
+ table->parent_idx = PVR_IDX_INVALID;
+
+ return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev);
+}
+
+/**
+ * pvr_page_table_l0_free() - Teardown a level 0 page table.
+ * @table: Target level 0 page table.
+ *
+ * It is an error to attempt to use @table after calling this function, even
+ * indirectly. This includes calling pvr_page_table_l1_remove(), which must
+ * be called *before* pvr_page_table_l0_free().
+ */
+static void
+pvr_page_table_l0_free(struct pvr_page_table_l0 *table)
+{
+ pvr_mmu_backing_page_fini(&table->backing_page);
+ kfree(table);
+}
+
+/**
+ * pvr_page_table_l0_sync() - Flush a level 0 page table from the CPU to the
+ * device.
+ * @table: Target level 0 page table.
+ *
+ * This is just a thin wrapper around pvr_mmu_backing_page_sync(), so the
+ * warning there applies here too: **Only call pvr_page_table_l0_sync() once
+ * you're sure you have no more changes to make to** @table **in the immediate
+ * future.**
+ *
+ * If child pages of @table also need to be flushed, this should be done first
+ * using a DMA sync function (e.g. dma_sync_sg_for_device()) *before* calling
+ * this function.
+ */
+static void
+pvr_page_table_l0_sync(struct pvr_page_table_l0 *table)
+{
+ pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_0_FLAGS);
+}
+
+/**
+ * pvr_page_table_l0_get_raw() - Access the raw equivalent of a mirror level 0
+ * page table.
+ * @table: Target level 0 page table.
+ *
+ * Essentially returns the CPU address of the raw equivalent of @table, cast to
+ * a &struct pvr_page_table_l0_raw pointer.
+ *
+ * You probably want to call pvr_page_table_l0_get_entry_raw() instead.
+ *
+ * Return:
+ * The raw equivalent of @table.
+ */
+static struct pvr_page_table_l0_raw *
+pvr_page_table_l0_get_raw(struct pvr_page_table_l0 *table)
+{
+ return table->backing_page.host_ptr;
+}
+
+/**
+ * pvr_page_table_l0_get_entry_raw() - Access an entry from the raw equivalent
+ * of a mirror level 0 page table.
+ * @table: Target level 0 page table.
+ * @idx: Index of the entry to access.
+ *
+ * Technically this function returns a pointer to a slot in a raw level 0 page
+ * table, since the returned "entry" is not guaranteed to be valid. The caller
+ * must verify the validity of the entry at the returned address (perhaps using
+ * pvr_page_table_l0_entry_raw_is_valid()) before reading or overwriting it.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before dereferencing the
+ * returned pointer. This is espcially important for level 0 page tables, which
+ * can have a variable number of entries.
+ *
+ * Return:
+ * A pointer to the requested raw level 0 page table entry.
+ */
+static struct pvr_page_table_l0_entry_raw *
+pvr_page_table_l0_get_entry_raw(struct pvr_page_table_l0 *table, u16 idx)
+{
+ return &pvr_page_table_l0_get_raw(table)->entries[idx];
+}
+
+/**
+ * pvr_page_table_l0_entry_is_valid() - Check if a level 0 page table entry is
+ * marked as valid.
+ * @table: Target level 0 page table.
+ * @idx: Index of the entry to check.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before calling this
+ * function.
+ */
+static bool
+pvr_page_table_l0_entry_is_valid(struct pvr_page_table_l0 *table, u16 idx)
+{
+ struct pvr_page_table_l0_entry_raw entry_raw =
+ *pvr_page_table_l0_get_entry_raw(table, idx);
+
+ return pvr_page_table_l0_entry_raw_is_valid(entry_raw);
+}
+
+/**
+ * struct pvr_mmu_context - context holding data for operations at page
+ * catalogue level, intended for use with a VM context.
+ */
+struct pvr_mmu_context {
+ /** @pvr_dev: The PVR device associated with the owning VM context. */
+ struct pvr_device *pvr_dev;
+
+ /** @page_table_l2: The MMU table root. */
+ struct pvr_page_table_l2 page_table_l2;
+};
+
+/**
+ * struct pvr_page_table_ptr - A reference to a single physical page as indexed
+ * by the page table structure.
+ *
+ * Intended for embedding in a &struct pvr_mmu_op_context.
+ */
+struct pvr_page_table_ptr {
+ /**
+ * @l1_table: A cached handle to the level 1 page table the
+ * context is currently traversing.
+ */
+ struct pvr_page_table_l1 *l1_table;
+
+ /**
+ * @l0_table: A cached handle to the level 0 page table the
+ * context is currently traversing.
+ */
+ struct pvr_page_table_l0 *l0_table;
+
+ /**
+ * @l2_idx: Index into the level 2 page table the context is
+ * currently referencing.
+ */
+ u16 l2_idx;
+
+ /**
+ * @l1_idx: Index into the level 1 page table the context is
+ * currently referencing.
+ */
+ u16 l1_idx;
+
+ /**
+ * @l0_idx: Index into the level 0 page table the context is
+ * currently referencing.
+ */
+ u16 l0_idx;
+};
+
+/**
+ * struct pvr_mmu_op_context - context holding data for individual
+ * device-virtual mapping operations. Intended for use with a VM bind operation.
+ */
+struct pvr_mmu_op_context {
+ /** @mmu_ctx: The MMU context associated with the owning VM context. */
+ struct pvr_mmu_context *mmu_ctx;
+
+ /** @map: Data specifically for map operations. */
+ struct {
+ /**
+ * @sgt: Scatter gather table containing pages pinned for use by
+ * this context - these are currently pinned when initialising
+ * the VM bind operation.
+ */
+ struct sg_table *sgt;
+
+ /** @sgt_offset: Start address of the device-virtual mapping. */
+ u64 sgt_offset;
+
+ /**
+ * @l1_prealloc_tables: Preallocated l1 page table objects
+ * use by this context when creating a page mapping. Linked list
+ * fully created during initialisation.
+ */
+ struct pvr_page_table_l1 *l1_prealloc_tables;
+
+ /**
+ * @l0_prealloc_tables: Preallocated l0 page table objects
+ * use by this context when creating a page mapping. Linked list
+ * fully created during initialisation.
+ */
+ struct pvr_page_table_l0 *l0_prealloc_tables;
+ } map;
+
+ /** @unmap: Data specifically for unmap operations. */
+ struct {
+ /**
+ * @l1_free_tables: Collects page table objects freed by unmap
+ * ops. Linked list empty at creation.
+ */
+ struct pvr_page_table_l1 *l1_free_tables;
+
+ /**
+ * @l0_free_tables: Collects page table objects freed by unmap
+ * ops. Linked list empty at creation.
+ */
+ struct pvr_page_table_l0 *l0_free_tables;
+ } unmap;
+
+ /**
+ * @curr_page: A reference to a single physical page as indexed by the
+ * page table structure.
+ */
+ struct pvr_page_table_ptr curr_page;
+
+ /**
+ * @sync_level_required: The maximum level of the page table tree
+ * structure which has (possibly) been modified since it was last
+ * flushed to the device.
+ *
+ * This field should only be set with pvr_mmu_op_context_require_sync()
+ * or indirectly by pvr_mmu_op_context_sync_partial().
+ */
+ enum pvr_mmu_sync_level sync_level_required;
+};
+
+/**
+ * pvr_page_table_l2_insert() - Insert an entry referring to a level 1 page
+ * table into a level 2 page table.
+ * @op_ctx: Target MMU op context pointing at the entry to insert the L1 page
+ * table into.
+ * @child_table: Target level 1 page table to be referenced by the new entry.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L2 entry.
+ *
+ * It is the caller's responsibility to execute any memory barries to ensure
+ * that the creation of @child_table is ordered before the L2 entry is inserted.
+ */
+static void
+pvr_page_table_l2_insert(struct pvr_mmu_op_context *op_ctx,
+ struct pvr_page_table_l1 *child_table)
+{
+ struct pvr_page_table_l2 *l2_table =
+ &op_ctx->mmu_ctx->page_table_l2;
+ struct pvr_page_table_l2_entry_raw *entry_raw =
+ pvr_page_table_l2_get_entry_raw(l2_table,
+ op_ctx->curr_page.l2_idx);
+
+ pvr_page_table_l2_entry_raw_set(entry_raw,
+ child_table->backing_page.dma_addr);
+
+ child_table->parent = l2_table;
+ child_table->parent_idx = op_ctx->curr_page.l2_idx;
+ l2_table->entries[op_ctx->curr_page.l2_idx] = child_table;
+ ++l2_table->entry_count;
+ op_ctx->curr_page.l1_table = child_table;
+}
+
+/**
+ * pvr_page_table_l2_remove() - Remove a level 1 page table from a level 2 page
+ * table.
+ * @op_ctx: Target MMU op context pointing at the L2 entry to remove.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L2 entry.
+ */
+static void
+pvr_page_table_l2_remove(struct pvr_mmu_op_context *op_ctx)
+{
+ struct pvr_page_table_l2 *l2_table =
+ &op_ctx->mmu_ctx->page_table_l2;
+ struct pvr_page_table_l2_entry_raw *entry_raw =
+ pvr_page_table_l2_get_entry_raw(l2_table,
+ op_ctx->curr_page.l1_table->parent_idx);
+
+ WARN_ON(op_ctx->curr_page.l1_table->parent != l2_table);
+
+ pvr_page_table_l2_entry_raw_clear(entry_raw);
+
+ l2_table->entries[op_ctx->curr_page.l1_table->parent_idx] = NULL;
+ op_ctx->curr_page.l1_table->parent_idx = PVR_IDX_INVALID;
+ op_ctx->curr_page.l1_table->next_free = op_ctx->unmap.l1_free_tables;
+ op_ctx->unmap.l1_free_tables = op_ctx->curr_page.l1_table;
+ op_ctx->curr_page.l1_table = NULL;
+
+ --l2_table->entry_count;
+}
+
+/**
+ * pvr_page_table_l1_insert() - Insert an entry referring to a level 0 page
+ * table into a level 1 page table.
+ * @op_ctx: Target MMU op context pointing at the entry to insert the L0 page
+ * table into.
+ * @child_table: L0 page table to insert.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L1 entry.
+ *
+ * It is the caller's responsibility to execute any memory barries to ensure
+ * that the creation of @child_table is ordered before the L1 entry is inserted.
+ */
+static void
+pvr_page_table_l1_insert(struct pvr_mmu_op_context *op_ctx,
+ struct pvr_page_table_l0 *child_table)
+{
+ struct pvr_page_table_l1_entry_raw *entry_raw =
+ pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l1_table,
+ op_ctx->curr_page.l1_idx);
+
+ pvr_page_table_l1_entry_raw_set(entry_raw,
+ child_table->backing_page.dma_addr);
+
+ child_table->parent = op_ctx->curr_page.l1_table;
+ child_table->parent_idx = op_ctx->curr_page.l1_idx;
+ op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx] = child_table;
+ ++op_ctx->curr_page.l1_table->entry_count;
+ op_ctx->curr_page.l0_table = child_table;
+}
+
+/**
+ * pvr_page_table_l1_remove() - Remove a level 0 page table from a level 1 page
+ * table.
+ * @op_ctx: Target MMU op context pointing at the L1 entry to remove.
+ *
+ * If this function results in the L1 table becoming empty, it will be removed
+ * from its parent level 2 page table and destroyed.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L1 entry.
+ */
+static void
+pvr_page_table_l1_remove(struct pvr_mmu_op_context *op_ctx)
+{
+ struct pvr_page_table_l1_entry_raw *entry_raw =
+ pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l0_table->parent,
+ op_ctx->curr_page.l0_table->parent_idx);
+
+ WARN_ON(op_ctx->curr_page.l0_table->parent !=
+ op_ctx->curr_page.l1_table);
+
+ pvr_page_table_l1_entry_raw_clear(entry_raw);
+
+ op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l0_table->parent_idx] = NULL;
+ op_ctx->curr_page.l0_table->parent_idx = PVR_IDX_INVALID;
+ op_ctx->curr_page.l0_table->next_free = op_ctx->unmap.l0_free_tables;
+ op_ctx->unmap.l0_free_tables = op_ctx->curr_page.l0_table;
+ op_ctx->curr_page.l0_table = NULL;
+
+ if (--op_ctx->curr_page.l1_table->entry_count == 0) {
+ /* Clear the parent L2 page table entry. */
+ if (op_ctx->curr_page.l1_table->parent_idx != PVR_IDX_INVALID)
+ pvr_page_table_l2_remove(op_ctx);
+ }
+}
+
+/**
+ * pvr_page_table_l0_insert() - Insert an entry referring to a physical page
+ * into a level 0 page table.
+ * @op_ctx: Target MMU op context pointing at the L0 entry to insert.
+ * @dma_addr: Target DMA address to be referenced by the new entry.
+ * @flags: Page options to be stored in the new entry.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L0 entry.
+ */
+static void
+pvr_page_table_l0_insert(struct pvr_mmu_op_context *op_ctx,
+ dma_addr_t dma_addr, struct pvr_page_flags_raw flags)
+{
+ struct pvr_page_table_l0_entry_raw *entry_raw =
+ pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table,
+ op_ctx->curr_page.l0_idx);
+
+ pvr_page_table_l0_entry_raw_set(entry_raw, dma_addr, flags);
+
+ /*
+ * There is no entry to set here - we don't keep a mirror of
+ * individual pages.
+ */
+
+ ++op_ctx->curr_page.l0_table->entry_count;
+}
+
+/**
+ * pvr_page_table_l0_remove() - Remove a physical page from a level 0 page
+ * table.
+ * @op_ctx: Target MMU op context pointing at the L0 entry to remove.
+ *
+ * If this function results in the L0 table becoming empty, it will be removed
+ * from its parent L1 page table and destroyed.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L0 entry.
+ */
+static void
+pvr_page_table_l0_remove(struct pvr_mmu_op_context *op_ctx)
+{
+ struct pvr_page_table_l0_entry_raw *entry_raw =
+ pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table,
+ op_ctx->curr_page.l0_idx);
+
+ pvr_page_table_l0_entry_raw_clear(entry_raw);
+
+ /*
+ * There is no entry to clear here - we don't keep a mirror of
+ * individual pages.
+ */
+
+ if (--op_ctx->curr_page.l0_table->entry_count == 0) {
+ /* Clear the parent L1 page table entry. */
+ if (op_ctx->curr_page.l0_table->parent_idx != PVR_IDX_INVALID)
+ pvr_page_table_l1_remove(op_ctx);
+ }
+}
+
+/**
+ * DOC: Page table index utilities
+ */
+
+/**
+ * pvr_page_table_l2_idx() - Calculate the level 2 page table index for a
+ * device-virtual address.
+ * @device_addr: Target device-virtual address.
+ *
+ * This function does not perform any bounds checking - it is the caller's
+ * responsibility to ensure that @device_addr is valid before interpreting
+ * the result.
+ *
+ * Return:
+ * The index into a level 2 page table corresponding to @device_addr.
+ */
+static u16
+pvr_page_table_l2_idx(u64 device_addr)
+{
+ return (device_addr & ~ROGUE_MMUCTRL_VADDR_PC_INDEX_CLRMSK) >>
+ ROGUE_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+}
+
+/**
+ * pvr_page_table_l1_idx() - Calculate the level 1 page table index for a
+ * device-virtual address.
+ * @device_addr: Target device-virtual address.
+ *
+ * This function does not perform any bounds checking - it is the caller's
+ * responsibility to ensure that @device_addr is valid before interpreting
+ * the result.
+ *
+ * Return:
+ * The index into a level 1 page table corresponding to @device_addr.
+ */
+static u16
+pvr_page_table_l1_idx(u64 device_addr)
+{
+ return (device_addr & ~ROGUE_MMUCTRL_VADDR_PD_INDEX_CLRMSK) >>
+ ROGUE_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+}
+
+/**
+ * pvr_page_table_l0_idx() - Calculate the level 0 page table index for a
+ * device-virtual address.
+ * @device_addr: Target device-virtual address.
+ *
+ * This function does not perform any bounds checking - it is the caller's
+ * responsibility to ensure that @device_addr is valid before interpreting
+ * the result.
+ *
+ * Return:
+ * The index into a level 0 page table corresponding to @device_addr.
+ */
+static u16
+pvr_page_table_l0_idx(u64 device_addr)
+{
+ return (device_addr & ~ROGUE_MMUCTRL_VADDR_PT_INDEX_CLRMSK) >>
+ ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT;
+}
+
+/**
+ * DOC: High-level page table operations
+ */
+
+/**
+ * pvr_page_table_l1_get_or_insert() - Retrieves (optionally inserting if
+ * necessary) a level 1 page table from the specified level 2 page table entry.
+ * @op_ctx: Target MMU op context.
+ * @should_insert: [IN] Specifies whether new page tables should be inserted
+ * when empty page table entries are encountered during traversal.
+ *
+ * Return:
+ * * 0 on success, or
+ *
+ * If @should_insert is %false:
+ * * -%ENXIO if a level 1 page table would have been inserted.
+ *
+ * If @should_insert is %true:
+ * * Any error encountered while inserting the level 1 page table.
+ */
+static int
+pvr_page_table_l1_get_or_insert(struct pvr_mmu_op_context *op_ctx,
+ bool should_insert)
+{
+ struct pvr_page_table_l2 *l2_table =
+ &op_ctx->mmu_ctx->page_table_l2;
+ struct pvr_page_table_l1 *table;
+
+ if (pvr_page_table_l2_entry_is_valid(l2_table,
+ op_ctx->curr_page.l2_idx)) {
+ op_ctx->curr_page.l1_table =
+ l2_table->entries[op_ctx->curr_page.l2_idx];
+ return 0;
+ }
+
+ if (!should_insert)
+ return -ENXIO;
+
+ /* Take a prealloced table. */
+ table = op_ctx->map.l1_prealloc_tables;
+ if (!table)
+ return -ENOMEM;
+
+ /* Pop */
+ op_ctx->map.l1_prealloc_tables = table->next_free;
+ table->next_free = NULL;
+
+ /* Ensure new table is fully written out before adding to L2 page table. */
+ wmb();
+
+ pvr_page_table_l2_insert(op_ctx, table);
+
+ return 0;
+}
+
+/**
+ * pvr_page_table_l0_get_or_insert() - Retrieves (optionally inserting if
+ * necessary) a level 0 page table from the specified level 1 page table entry.
+ * @op_ctx: Target MMU op context.
+ * @should_insert: [IN] Specifies whether new page tables should be inserted
+ * when empty page table entries are encountered during traversal.
+ *
+ * Return:
+ * * 0 on success,
+ *
+ * If @should_insert is %false:
+ * * -%ENXIO if a level 0 page table would have been inserted.
+ *
+ * If @should_insert is %true:
+ * * Any error encountered while inserting the level 0 page table.
+ */
+static int
+pvr_page_table_l0_get_or_insert(struct pvr_mmu_op_context *op_ctx,
+ bool should_insert)
+{
+ struct pvr_page_table_l0 *table;
+
+ if (pvr_page_table_l1_entry_is_valid(op_ctx->curr_page.l1_table,
+ op_ctx->curr_page.l1_idx)) {
+ op_ctx->curr_page.l0_table =
+ op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx];
+ return 0;
+ }
+
+ if (!should_insert)
+ return -ENXIO;
+
+ /* Take a prealloced table. */
+ table = op_ctx->map.l0_prealloc_tables;
+ if (!table)
+ return -ENOMEM;
+
+ /* Pop */
+ op_ctx->map.l0_prealloc_tables = table->next_free;
+ table->next_free = NULL;
+
+ /* Ensure new table is fully written out before adding to L1 page table. */
+ wmb();
+
+ pvr_page_table_l1_insert(op_ctx, table);
+
+ return 0;
+}
+
+/**
+ * pvr_mmu_context_create() - Create an MMU context.
+ * @pvr_dev: PVR device associated with owning VM context.
+ *
+ * Returns:
+ * * Newly created MMU context object on success, or
+ * * -%ENOMEM if no memory is available,
+ * * Any error code returned by pvr_page_table_l2_init().
+ */
+struct pvr_mmu_context *pvr_mmu_context_create(struct pvr_device *pvr_dev)
+{
+ struct pvr_mmu_context *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ int err;
+
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ err = pvr_page_table_l2_init(&ctx->page_table_l2, pvr_dev);
+ if (err)
+ return ERR_PTR(err);
+
+ ctx->pvr_dev = pvr_dev;
+
+ return ctx;
+}
+
+/**
+ * pvr_mmu_context_destroy() - Destroy an MMU context.
+ * @ctx: Target MMU context.
+ */
+void pvr_mmu_context_destroy(struct pvr_mmu_context *ctx)
+{
+ pvr_page_table_l2_fini(&ctx->page_table_l2);
+ kfree(ctx);
+}
+
+/**
+ * pvr_mmu_get_root_table_dma_addr() - Get the DMA address of the root of the
+ * page table structure behind a VM context.
+ * @ctx: Target MMU context.
+ */
+dma_addr_t pvr_mmu_get_root_table_dma_addr(struct pvr_mmu_context *ctx)
+{
+ return ctx->page_table_l2.backing_page.dma_addr;
+}
+
+/**
+ * pvr_page_table_l1_alloc() - Allocate a l1 page_table object.
+ * @ctx: MMU context of owning VM context.
+ *
+ * Returns:
+ * * Newly created page table object on success, or
+ * * -%ENOMEM if no memory is available,
+ * * Any error code returned by pvr_page_table_l1_init().
+ */
+static struct pvr_page_table_l1 *
+pvr_page_table_l1_alloc(struct pvr_mmu_context *ctx)
+{
+ int err;
+
+ struct pvr_page_table_l1 *table =
+ kzalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ err = pvr_page_table_l1_init(table, ctx->pvr_dev);
+ if (err) {
+ kfree(table);
+ return ERR_PTR(err);
+ }
+
+ return table;
+}
+
+/**
+ * pvr_page_table_l0_alloc() - Allocate a l0 page_table object.
+ * @ctx: MMU context of owning VM context.
+ *
+ * Returns:
+ * * Newly created page table object on success, or
+ * * -%ENOMEM if no memory is available,
+ * * Any error code returned by pvr_page_table_l0_init().
+ */
+static struct pvr_page_table_l0 *
+pvr_page_table_l0_alloc(struct pvr_mmu_context *ctx)
+{
+ int err;
+
+ struct pvr_page_table_l0 *table =
+ kzalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ err = pvr_page_table_l0_init(table, ctx->pvr_dev);
+ if (err) {
+ kfree(table);
+ return ERR_PTR(err);
+ }
+
+ return table;
+}
+
+/**
+ * pvr_mmu_op_context_require_sync() - Mark an MMU op context as requiring a
+ * sync operation for the referenced page tables up to a specified level.
+ * @op_ctx: Target MMU op context.
+ * @level: Maximum page table level for which a sync is required.
+ */
+static void
+pvr_mmu_op_context_require_sync(struct pvr_mmu_op_context *op_ctx,
+ enum pvr_mmu_sync_level level)
+{
+ if (op_ctx->sync_level_required < level)
+ op_ctx->sync_level_required = level;
+}
+
+/**
+ * pvr_mmu_op_context_sync_manual() - Trigger a sync of some or all of the
+ * page tables referenced by a MMU op context.
+ * @op_ctx: Target MMU op context.
+ * @level: Maximum page table level to sync.
+ *
+ * Do not call this function directly. Instead use
+ * pvr_mmu_op_context_sync_partial() which is checked against the current
+ * value of &op_ctx->sync_level_required as set by
+ * pvr_mmu_op_context_require_sync().
+ */
+static void
+pvr_mmu_op_context_sync_manual(struct pvr_mmu_op_context *op_ctx,
+ enum pvr_mmu_sync_level level)
+{
+ /*
+ * We sync the page table levels in ascending order (starting from the
+ * leaf node) to ensure consistency.
+ */
+
+ WARN_ON(level < PVR_MMU_SYNC_LEVEL_NONE);
+
+ if (level <= PVR_MMU_SYNC_LEVEL_NONE)
+ return;
+
+ if (op_ctx->curr_page.l0_table)
+ pvr_page_table_l0_sync(op_ctx->curr_page.l0_table);
+
+ if (level < PVR_MMU_SYNC_LEVEL_1)
+ return;
+
+ if (op_ctx->curr_page.l1_table)
+ pvr_page_table_l1_sync(op_ctx->curr_page.l1_table);
+
+ if (level < PVR_MMU_SYNC_LEVEL_2)
+ return;
+
+ pvr_page_table_l2_sync(&op_ctx->mmu_ctx->page_table_l2);
+}
+
+/**
+ * pvr_mmu_op_context_sync_partial() - Trigger a sync of some or all of the
+ * page tables referenced by a MMU op context.
+ * @op_ctx: Target MMU op context.
+ * @level: Requested page table level to sync up to (inclusive).
+ *
+ * If @level is greater than the maximum level recorded by @op_ctx as requiring
+ * a sync operation, only the previously recorded maximum will be used.
+ *
+ * Additionally, if @level is greater than or equal to the maximum level
+ * recorded by @op_ctx as requiring a sync operation, that maximum level will be
+ * reset as a full sync will be performed. This is equivalent to calling
+ * pvr_mmu_op_context_sync().
+ */
+static void
+pvr_mmu_op_context_sync_partial(struct pvr_mmu_op_context *op_ctx,
+ enum pvr_mmu_sync_level level)
+{
+ /*
+ * If the requested sync level is greater than or equal to the
+ * currently required sync level, we do two things:
+ * * Don't waste time syncing levels we haven't previously marked as
+ * requiring a sync, and
+ * * Reset the required sync level since we are about to sync
+ * everything that was previously marked as requiring a sync.
+ */
+ if (level >= op_ctx->sync_level_required) {
+ level = op_ctx->sync_level_required;
+ op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE;
+ }
+
+ pvr_mmu_op_context_sync_manual(op_ctx, level);
+}
+
+/**
+ * pvr_mmu_op_context_sync() - Trigger a sync of every page table referenced by
+ * a MMU op context.
+ * @op_ctx: Target MMU op context.
+ *
+ * The maximum level marked internally as requiring a sync will be reset so
+ * that subsequent calls to this function will be no-ops unless @op_ctx is
+ * otherwise updated.
+ */
+static void
+pvr_mmu_op_context_sync(struct pvr_mmu_op_context *op_ctx)
+{
+ pvr_mmu_op_context_sync_manual(op_ctx, op_ctx->sync_level_required);
+
+ op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE;
+}
+
+/**
+ * pvr_mmu_op_context_load_tables() - Load pointers to tables in each level of
+ * the page table tree structure needed to reference the physical page
+ * referenced by a MMU op context.
+ * @op_ctx: Target MMU op context.
+ * @should_create: Specifies whether new page tables should be created when
+ * empty page table entries are encountered during traversal.
+ * @load_level_required: Maximum page table level to load.
+ *
+ * If @should_create is %true, this function may modify the stored required
+ * sync level of @op_ctx as new page tables are created and inserted into their
+ * respective parents.
+ *
+ * Since there is only one root page table, it is technically incorrect to call
+ * this function with a value of @load_level_required greater than or equal to
+ * the root level number. However, this is not explicitly disallowed here.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by pvr_page_table_l1_get_or_create() if
+ * @load_level_required >= 1 except -%ENXIO, or
+ * * Any error returned by pvr_page_table_l0_get_or_create() if
+ * @load_level_required >= 0 except -%ENXIO.
+ */
+static int
+pvr_mmu_op_context_load_tables(struct pvr_mmu_op_context *op_ctx,
+ bool should_create,
+ enum pvr_mmu_sync_level load_level_required)
+{
+ const struct pvr_page_table_l1 *l1_head_before =
+ op_ctx->map.l1_prealloc_tables;
+ const struct pvr_page_table_l0 *l0_head_before =
+ op_ctx->map.l0_prealloc_tables;
+ int err;
+
+ /* Clear tables we're about to fetch in case of error states. */
+ if (load_level_required >= PVR_MMU_SYNC_LEVEL_1)
+ op_ctx->curr_page.l1_table = NULL;
+
+ if (load_level_required >= PVR_MMU_SYNC_LEVEL_0)
+ op_ctx->curr_page.l0_table = NULL;
+
+ /* Get or create L1 page table. */
+ if (load_level_required >= PVR_MMU_SYNC_LEVEL_1) {
+ err = pvr_page_table_l1_get_or_insert(op_ctx, should_create);
+ if (err) {
+ /*
+ * If @should_create is %false and no L1 page table was
+ * found, return early but without an error. Since
+ * pvr_page_table_l1_get_or_create() can only return
+ * -%ENXIO if @should_create is %false, there is no
+ * need to check it here.
+ */
+ if (err == -ENXIO)
+ err = 0;
+
+ return err;
+ }
+ }
+
+ /* Get or create L0 page table. */
+ if (load_level_required >= PVR_MMU_SYNC_LEVEL_0) {
+ err = pvr_page_table_l0_get_or_insert(op_ctx, should_create);
+ if (err) {
+ /*
+ * If @should_create is %false and no L0 page table was
+ * found, return early but without an error. Since
+ * pvr_page_table_l0_get_or_insert() can only return
+ * -%ENXIO if @should_create is %false, there is no
+ * need to check it here.
+ */
+ if (err == -ENXIO)
+ err = 0;
+
+ /*
+ * At this point, an L1 page table could have been
+ * inserted but is now empty due to the failed attempt
+ * at inserting an L0 page table. In this instance, we
+ * must remove the empty L1 page table ourselves as
+ * pvr_page_table_l1_remove() is never called as part
+ * of the error path in
+ * pvr_page_table_l0_get_or_insert().
+ */
+ if (l1_head_before != op_ctx->map.l1_prealloc_tables) {
+ pvr_page_table_l2_remove(op_ctx);
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_2);
+ }
+
+ return err;
+ }
+ }
+
+ /*
+ * A sync is only needed if table objects were inserted. This can be
+ * inferred by checking if the pointer at the head of the linked list
+ * has changed.
+ */
+ if (l1_head_before != op_ctx->map.l1_prealloc_tables)
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_2);
+ else if (l0_head_before != op_ctx->map.l0_prealloc_tables)
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_1);
+
+ return 0;
+}
+
+/**
+ * pvr_mmu_op_context_set_curr_page() - Reassign the current page of an MMU op
+ * context, syncing any page tables previously assigned to it which are no
+ * longer relevant.
+ * @op_ctx: Target MMU op context.
+ * @device_addr: New pointer target.
+ * @should_create: Specify whether new page tables should be created when
+ * empty page table entries are encountered during traversal.
+ *
+ * This function performs a full sync on the pointer, regardless of which
+ * levels are modified.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_mmu_op_context_load_tables().
+ */
+static int
+pvr_mmu_op_context_set_curr_page(struct pvr_mmu_op_context *op_ctx,
+ u64 device_addr, bool should_create)
+{
+ pvr_mmu_op_context_sync(op_ctx);
+
+ op_ctx->curr_page.l2_idx = pvr_page_table_l2_idx(device_addr);
+ op_ctx->curr_page.l1_idx = pvr_page_table_l1_idx(device_addr);
+ op_ctx->curr_page.l0_idx = pvr_page_table_l0_idx(device_addr);
+ op_ctx->curr_page.l1_table = NULL;
+ op_ctx->curr_page.l0_table = NULL;
+
+ return pvr_mmu_op_context_load_tables(op_ctx, should_create,
+ PVR_MMU_SYNC_LEVEL_1);
+}
+
+/**
+ * pvr_mmu_op_context_next_page() - Advance the current page of an MMU op
+ * context.
+ * @op_ctx: Target MMU op context.
+ * @should_create: Specify whether new page tables should be created when
+ * empty page table entries are encountered during traversal.
+ *
+ * If @should_create is %false, it is the caller's responsibility to verify that
+ * the state of the table references in @op_ctx is valid on return. If -%ENXIO
+ * is returned, at least one of the table references is invalid. It should be
+ * noted that @op_ctx as a whole will be left in a valid state if -%ENXIO is
+ * returned, unlike other error codes. The caller should check which references
+ * are invalid by comparing them to %NULL. Only &@ptr->l2_table is guaranteed
+ * to be valid, since it represents the root of the page table tree structure.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EPERM if the operation would wrap at the top of the page table
+ * hierarchy,
+ * * -%ENXIO if @should_create is %false and a page table of any level would
+ * have otherwise been created, or
+ * * Any error returned while attempting to create missing page tables if
+ * @should_create is %true.
+ */
+static int
+pvr_mmu_op_context_next_page(struct pvr_mmu_op_context *op_ctx,
+ bool should_create)
+{
+ s8 load_level_required = PVR_MMU_SYNC_LEVEL_NONE;
+
+ if (++op_ctx->curr_page.l0_idx != ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X)
+ goto load_tables;
+
+ op_ctx->curr_page.l0_idx = 0;
+ load_level_required = PVR_MMU_SYNC_LEVEL_0;
+
+ if (++op_ctx->curr_page.l1_idx != ROGUE_MMUCTRL_ENTRIES_PD_VALUE)
+ goto load_tables;
+
+ op_ctx->curr_page.l1_idx = 0;
+ load_level_required = PVR_MMU_SYNC_LEVEL_1;
+
+ if (++op_ctx->curr_page.l2_idx != ROGUE_MMUCTRL_ENTRIES_PC_VALUE)
+ goto load_tables;
+
+ /*
+ * If the pattern continued, we would set &op_ctx->curr_page.l2_idx to
+ * zero here. However, that would wrap the top layer of the page table
+ * hierarchy which is not a valid operation. Instead, we warn and return
+ * an error.
+ */
+ WARN(true,
+ "%s(%p) attempted to loop the top of the page table hierarchy",
+ __func__, op_ctx);
+ return -EPERM;
+
+ /* If indices have wrapped, we need to load new tables. */
+load_tables:
+ /* First, flush tables which will be unloaded. */
+ pvr_mmu_op_context_sync_partial(op_ctx, load_level_required);
+
+ /* Then load tables from the required level down. */
+ return pvr_mmu_op_context_load_tables(op_ctx, should_create,
+ load_level_required);
+}
+
+/**
+ * DOC: Single page operations
+ */
+
+/**
+ * pvr_page_create() - Create a device-virtual memory page and insert it into
+ * a level 0 page table.
+ * @op_ctx: Target MMU op context pointing at the device-virtual address of the
+ * target page.
+ * @dma_addr: DMA address of the physical page backing the created page.
+ * @flags: Page options saved on the level 0 page table entry for reading by
+ * the device.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EEXIST if the requested page already exists.
+ */
+static int
+pvr_page_create(struct pvr_mmu_op_context *op_ctx, dma_addr_t dma_addr,
+ struct pvr_page_flags_raw flags)
+{
+ /* Do not create a new page if one already exists. */
+ if (pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table,
+ op_ctx->curr_page.l0_idx)) {
+ return -EEXIST;
+ }
+
+ pvr_page_table_l0_insert(op_ctx, dma_addr, flags);
+
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0);
+
+ return 0;
+}
+
+/**
+ * pvr_page_destroy() - Destroy a device page after removing it from its
+ * parent level 0 page table.
+ * @op_ctx: Target MMU op context.
+ */
+static void
+pvr_page_destroy(struct pvr_mmu_op_context *op_ctx)
+{
+ /* Do nothing if the page does not exist. */
+ if (!pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table,
+ op_ctx->curr_page.l0_idx)) {
+ return;
+ }
+
+ /* Clear the parent L0 page table entry. */
+ pvr_page_table_l0_remove(op_ctx);
+
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0);
+}
+
+/**
+ * pvr_mmu_op_context_destroy() - Destroy an MMU op context.
+ * @op_ctx: Target MMU op context.
+ */
+void pvr_mmu_op_context_destroy(struct pvr_mmu_op_context *op_ctx)
+{
+ const bool flush_caches =
+ op_ctx->sync_level_required != PVR_MMU_SYNC_LEVEL_NONE;
+
+ pvr_mmu_op_context_sync(op_ctx);
+
+ /* Unmaps should be flushed immediately. Map flushes can be deferred. */
+ if (flush_caches && !op_ctx->map.sgt)
+ pvr_mmu_flush_exec(op_ctx->mmu_ctx->pvr_dev, true);
+
+ while (op_ctx->map.l0_prealloc_tables) {
+ struct pvr_page_table_l0 *tmp = op_ctx->map.l0_prealloc_tables;
+
+ op_ctx->map.l0_prealloc_tables =
+ op_ctx->map.l0_prealloc_tables->next_free;
+ pvr_page_table_l0_free(tmp);
+ }
+
+ while (op_ctx->map.l1_prealloc_tables) {
+ struct pvr_page_table_l1 *tmp = op_ctx->map.l1_prealloc_tables;
+
+ op_ctx->map.l1_prealloc_tables =
+ op_ctx->map.l1_prealloc_tables->next_free;
+ pvr_page_table_l1_free(tmp);
+ }
+
+ while (op_ctx->unmap.l0_free_tables) {
+ struct pvr_page_table_l0 *tmp = op_ctx->unmap.l0_free_tables;
+
+ op_ctx->unmap.l0_free_tables =
+ op_ctx->unmap.l0_free_tables->next_free;
+ pvr_page_table_l0_free(tmp);
+ }
+
+ while (op_ctx->unmap.l1_free_tables) {
+ struct pvr_page_table_l1 *tmp = op_ctx->unmap.l1_free_tables;
+
+ op_ctx->unmap.l1_free_tables =
+ op_ctx->unmap.l1_free_tables->next_free;
+ pvr_page_table_l1_free(tmp);
+ }
+
+ kfree(op_ctx);
+}
+
+/**
+ * pvr_mmu_op_context_create() - Create an MMU op context.
+ * @ctx: MMU context associated with owning VM context.
+ * @sgt: Scatter gather table containing pages pinned for use by this context.
+ * @sgt_offset: Start offset of the requested device-virtual memory mapping.
+ * @size: Size in bytes of the requested device-virtual memory mapping. For an
+ * unmapping, this should be zero so that no page tables are allocated.
+ *
+ * Returns:
+ * * Newly created MMU op context object on success, or
+ * * -%ENOMEM if no memory is available,
+ * * Any error code returned by pvr_page_table_l2_init().
+ */
+struct pvr_mmu_op_context *
+pvr_mmu_op_context_create(struct pvr_mmu_context *ctx, struct sg_table *sgt,
+ u64 sgt_offset, u64 size)
+{
+ int err;
+
+ struct pvr_mmu_op_context *op_ctx =
+ kzalloc(sizeof(*op_ctx), GFP_KERNEL);
+
+ if (!op_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ op_ctx->mmu_ctx = ctx;
+ op_ctx->map.sgt = sgt;
+ op_ctx->map.sgt_offset = sgt_offset;
+ op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE;
+
+ if (size) {
+ /*
+ * The number of page table objects we need to prealloc is
+ * indicated by the mapping size, start offset and the sizes
+ * of the areas mapped per PT or PD. The range calculation is
+ * identical to that for the index into a table for a device
+ * address, so we reuse those functions here.
+ */
+ const u32 l1_start_idx = pvr_page_table_l2_idx(sgt_offset);
+ const u32 l1_end_idx = pvr_page_table_l2_idx(sgt_offset + size);
+ const u32 l1_count = l1_end_idx - l1_start_idx + 1;
+ const u32 l0_start_idx = pvr_page_table_l1_idx(sgt_offset);
+ const u32 l0_end_idx = pvr_page_table_l1_idx(sgt_offset + size);
+ const u32 l0_count = l0_end_idx - l0_start_idx + 1;
+
+ /*
+ * Alloc and push page table entries until we have enough of
+ * each type, ending with linked lists of l0 and l1 entries in
+ * reverse order.
+ */
+ for (int i = 0; i < l1_count; i++) {
+ struct pvr_page_table_l1 *l1_tmp =
+ pvr_page_table_l1_alloc(ctx);
+
+ err = PTR_ERR_OR_ZERO(l1_tmp);
+ if (err)
+ goto err_cleanup;
+
+ l1_tmp->next_free = op_ctx->map.l1_prealloc_tables;
+ op_ctx->map.l1_prealloc_tables = l1_tmp;
+ }
+
+ for (int i = 0; i < l0_count; i++) {
+ struct pvr_page_table_l0 *l0_tmp =
+ pvr_page_table_l0_alloc(ctx);
+
+ err = PTR_ERR_OR_ZERO(l0_tmp);
+ if (err)
+ goto err_cleanup;
+
+ l0_tmp->next_free = op_ctx->map.l0_prealloc_tables;
+ op_ctx->map.l0_prealloc_tables = l0_tmp;
+ }
+ }
+
+ return op_ctx;
+
+err_cleanup:
+ pvr_mmu_op_context_destroy(op_ctx);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_mmu_op_context_unmap_curr_page() - Unmap pages from a memory context
+ * starting from the current page of an MMU op context.
+ * @op_ctx: Target MMU op context pointing at the first page to unmap.
+ * @nr_pages: Number of pages to unmap.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while advancing @op_ctx.curr_page with
+ * pvr_mmu_op_context_next_page() (except -%ENXIO).
+ */
+static int
+pvr_mmu_op_context_unmap_curr_page(struct pvr_mmu_op_context *op_ctx,
+ u64 nr_pages)
+{
+ int err;
+
+ if (nr_pages == 0)
+ return 0;
+
+ /*
+ * Destroy first page outside loop, as it doesn't require a page
+ * advance beforehand. If the L0 page table reference in
+ * @op_ctx.curr_page is %NULL, there cannot be a mapped page at
+ * @op_ctx.curr_page (so skip ahead).
+ */
+ if (op_ctx->curr_page.l0_table)
+ pvr_page_destroy(op_ctx);
+
+ for (u64 page = 1; page < nr_pages; ++page) {
+ err = pvr_mmu_op_context_next_page(op_ctx, false);
+ /*
+ * If the page table tree structure at @op_ctx.curr_page is
+ * incomplete, skip ahead. We don't care about unmapping pages
+ * that cannot exist.
+ *
+ * FIXME: This could be made more efficient by jumping ahead
+ * using pvr_mmu_op_context_set_curr_page().
+ */
+ if (err == -ENXIO)
+ continue;
+ else if (err)
+ return err;
+
+ pvr_page_destroy(op_ctx);
+ }
+
+ return 0;
+}
+
+/**
+ * pvr_mmu_unmap() - Unmap pages from a memory context.
+ * @op_ctx: Target MMU op context.
+ * @device_addr: First device-virtual address to unmap.
+ * @size: Size in bytes to unmap.
+ *
+ * The total amount of device-virtual memory unmapped is
+ * @nr_pages * %PVR_DEVICE_PAGE_SIZE.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_page_table_ptr_init(), or
+ * * Any error code returned by pvr_page_table_ptr_unmap().
+ */
+int pvr_mmu_unmap(struct pvr_mmu_op_context *op_ctx, u64 device_addr, u64 size)
+{
+ int err = pvr_mmu_op_context_set_curr_page(op_ctx, device_addr, false);
+
+ if (err)
+ return err;
+
+ return pvr_mmu_op_context_unmap_curr_page(op_ctx,
+ size >> PVR_DEVICE_PAGE_SHIFT);
+}
+
+/**
+ * pvr_mmu_map_sgl() - Map part of a scatter-gather table entry to
+ * device-virtual memory.
+ * @op_ctx: Target MMU op context pointing to the first page that should be
+ * mapped.
+ * @sgl: Target scatter-gather table entry.
+ * @offset: Offset into @sgl to map from. Must result in a starting address
+ * from @sgl which is CPU page-aligned.
+ * @size: Size of the memory to be mapped in bytes. Must be a non-zero multiple
+ * of the device page size.
+ * @page_flags: Page options to be applied to every device-virtual memory page
+ * in the created mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if the range specified by @offset and @size is not completely
+ * within @sgl, or
+ * * Any error encountered while creating a page with pvr_page_create(), or
+ * * Any error encountered while advancing @op_ctx.curr_page with
+ * pvr_mmu_op_context_next_page().
+ */
+static int
+pvr_mmu_map_sgl(struct pvr_mmu_op_context *op_ctx, struct scatterlist *sgl,
+ u64 offset, u64 size, struct pvr_page_flags_raw page_flags)
+{
+ const unsigned int pages = size >> PVR_DEVICE_PAGE_SHIFT;
+ dma_addr_t dma_addr = sg_dma_address(sgl) + offset;
+ const unsigned int dma_len = sg_dma_len(sgl);
+ struct pvr_page_table_ptr ptr_copy;
+ unsigned int page;
+ int err;
+
+ if (size > dma_len || offset > dma_len - size)
+ return -EINVAL;
+
+ /*
+ * Before progressing, save a copy of the start pointer so we can use
+ * it again if we enter an error state and have to destroy pages.
+ */
+ memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy));
+
+ /*
+ * Create first page outside loop, as it doesn't require a page advance
+ * beforehand.
+ */
+ err = pvr_page_create(op_ctx, dma_addr, page_flags);
+ if (err)
+ return err;
+
+ for (page = 1; page < pages; ++page) {
+ err = pvr_mmu_op_context_next_page(op_ctx, true);
+ if (err)
+ goto err_destroy_pages;
+
+ dma_addr += PVR_DEVICE_PAGE_SIZE;
+
+ err = pvr_page_create(op_ctx, dma_addr, page_flags);
+ if (err)
+ goto err_destroy_pages;
+ }
+
+ return 0;
+
+err_destroy_pages:
+ memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page));
+ err = pvr_mmu_op_context_unmap_curr_page(op_ctx, page);
+
+ return err;
+}
+
+/**
+ * pvr_mmu_map() - Map an object's virtual memory to physical memory.
+ * @op_ctx: Target MMU op context.
+ * @size: Size of memory to be mapped in bytes. Must be a non-zero multiple
+ * of the device page size.
+ * @flags: Flags from pvr_gem_object associated with the mapping.
+ * @device_addr: Virtual device address to map to. Must be device page-aligned.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_page_table_ptr_init(), or
+ * * Any error code returned by pvr_mmu_map_sgl(), or
+ * * Any error code returned by pvr_page_table_ptr_next_page().
+ */
+int pvr_mmu_map(struct pvr_mmu_op_context *op_ctx, u64 size, u64 flags,
+ u64 device_addr)
+{
+ struct pvr_page_table_ptr ptr_copy;
+ struct pvr_page_flags_raw flags_raw;
+ struct scatterlist *sgl;
+ u64 mapped_size = 0;
+ unsigned int count;
+ int err;
+
+ if (!size)
+ return 0;
+
+ if ((op_ctx->map.sgt_offset | size) & ~PVR_DEVICE_PAGE_MASK)
+ return -EINVAL;
+
+ err = pvr_mmu_op_context_set_curr_page(op_ctx, device_addr, true);
+ if (err)
+ return -EINVAL;
+
+ memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy));
+
+ flags_raw = pvr_page_flags_raw_create(false, false,
+ flags & DRM_PVR_BO_BYPASS_DEVICE_CACHE,
+ flags & DRM_PVR_BO_PM_FW_PROTECT);
+
+ /* Map scatter gather table */
+ for_each_sgtable_dma_sg(op_ctx->map.sgt, sgl, count) {
+ const size_t sgl_len = sg_dma_len(sgl);
+ u64 sgl_offset, map_sgl_len;
+
+ if (sgl_len <= op_ctx->map.sgt_offset) {
+ op_ctx->map.sgt_offset -= sgl_len;
+ continue;
+ }
+
+ sgl_offset = op_ctx->map.sgt_offset;
+ map_sgl_len = min_t(u64, sgl_len - sgl_offset, size - mapped_size);
+
+ err = pvr_mmu_map_sgl(op_ctx, sgl, sgl_offset, map_sgl_len,
+ flags_raw);
+ if (err)
+ break;
+
+ /*
+ * Flag the L0 page table as requiring a flush when the MMU op
+ * context is destroyed.
+ */
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0);
+
+ op_ctx->map.sgt_offset = 0;
+ mapped_size += map_sgl_len;
+
+ if (mapped_size >= size)
+ break;
+
+ err = pvr_mmu_op_context_next_page(op_ctx, true);
+ if (err)
+ break;
+ }
+
+ if (err && mapped_size) {
+ memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page));
+ pvr_mmu_op_context_unmap_curr_page(op_ctx,
+ mapped_size >> PVR_DEVICE_PAGE_SHIFT);
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.h b/drivers/gpu/drm/imagination/pvr_mmu.h
new file mode 100644
index 000000000000..a8ecd460168d
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_mmu.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_MMU_H
+#define PVR_MMU_H
+
+#include <linux/memory.h>
+#include <linux/types.h>
+
+/* Forward declaration from "pvr_device.h" */
+struct pvr_device;
+
+/* Forward declaration from "pvr_mmu.c" */
+struct pvr_mmu_context;
+struct pvr_mmu_op_context;
+
+/* Forward declaration from "pvr_vm.c" */
+struct pvr_vm_context;
+
+/* Forward declaration from <linux/scatterlist.h> */
+struct sg_table;
+
+/**
+ * DOC: Public API (constants)
+ *
+ * .. c:macro:: PVR_DEVICE_PAGE_SIZE
+ *
+ * Fixed page size referenced by leaf nodes in the page table tree
+ * structure. In the current implementation, this value is pegged to the
+ * CPU page size (%PAGE_SIZE). It is therefore an error to specify a CPU
+ * page size which is not also a supported device page size. The supported
+ * device page sizes are: 4KiB, 16KiB, 64KiB, 256KiB, 1MiB and 2MiB.
+ *
+ * .. c:macro:: PVR_DEVICE_PAGE_SHIFT
+ *
+ * Shift value used to efficiently multiply or divide by
+ * %PVR_DEVICE_PAGE_SIZE.
+ *
+ * This value is derived from %PVR_DEVICE_PAGE_SIZE.
+ *
+ * .. c:macro:: PVR_DEVICE_PAGE_MASK
+ *
+ * Mask used to round a value down to the nearest multiple of
+ * %PVR_DEVICE_PAGE_SIZE. When bitwise negated, it will indicate whether a
+ * value is already a multiple of %PVR_DEVICE_PAGE_SIZE.
+ *
+ * This value is derived from %PVR_DEVICE_PAGE_SIZE.
+ */
+
+/* PVR_DEVICE_PAGE_SIZE determines the page size */
+#define PVR_DEVICE_PAGE_SIZE (PAGE_SIZE)
+#define PVR_DEVICE_PAGE_SHIFT (PAGE_SHIFT)
+#define PVR_DEVICE_PAGE_MASK (PAGE_MASK)
+
+/**
+ * DOC: Page table index utilities (constants)
+ *
+ * .. c:macro:: PVR_PAGE_TABLE_ADDR_SPACE_SIZE
+ *
+ * Size of device-virtual address space which can be represented in the page
+ * table structure.
+ *
+ * This value is checked at runtime against
+ * &pvr_device_features.virtual_address_space_bits by
+ * pvr_vm_create_context(), which will return an error if the feature value
+ * does not match this constant.
+ *
+ * .. admonition:: Future work
+ *
+ * It should be possible to support other values of
+ * &pvr_device_features.virtual_address_space_bits, but so far no
+ * hardware has been created which advertises an unsupported value.
+ *
+ * .. c:macro:: PVR_PAGE_TABLE_ADDR_BITS
+ *
+ * Number of bits needed to represent any value less than
+ * %PVR_PAGE_TABLE_ADDR_SPACE_SIZE exactly.
+ *
+ * .. c:macro:: PVR_PAGE_TABLE_ADDR_MASK
+ *
+ * Bitmask of device-virtual addresses which are valid in the page table
+ * structure.
+ *
+ * This value is derived from %PVR_PAGE_TABLE_ADDR_SPACE_SIZE, so the same
+ * notes on that constant apply here.
+ */
+#define PVR_PAGE_TABLE_ADDR_SPACE_SIZE SZ_1T
+#define PVR_PAGE_TABLE_ADDR_BITS __ffs(PVR_PAGE_TABLE_ADDR_SPACE_SIZE)
+#define PVR_PAGE_TABLE_ADDR_MASK (PVR_PAGE_TABLE_ADDR_SPACE_SIZE - 1)
+
+void pvr_mmu_flush_request_all(struct pvr_device *pvr_dev);
+int pvr_mmu_flush_exec(struct pvr_device *pvr_dev, bool wait);
+
+struct pvr_mmu_context *pvr_mmu_context_create(struct pvr_device *pvr_dev);
+void pvr_mmu_context_destroy(struct pvr_mmu_context *ctx);
+
+dma_addr_t pvr_mmu_get_root_table_dma_addr(struct pvr_mmu_context *ctx);
+
+void pvr_mmu_op_context_destroy(struct pvr_mmu_op_context *op_ctx);
+struct pvr_mmu_op_context *
+pvr_mmu_op_context_create(struct pvr_mmu_context *ctx,
+ struct sg_table *sgt, u64 sgt_offset, u64 size);
+
+int pvr_mmu_map(struct pvr_mmu_op_context *op_ctx, u64 size, u64 flags,
+ u64 device_addr);
+int pvr_mmu_unmap(struct pvr_mmu_op_context *op_ctx, u64 device_addr, u64 size);
+
+#endif /* PVR_MMU_H */
diff --git a/drivers/gpu/drm/imagination/pvr_params.c b/drivers/gpu/drm/imagination/pvr_params.c
new file mode 100644
index 000000000000..b91759f362c5
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_params.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_params.h"
+
+#include <linux/cache.h>
+#include <linux/moduleparam.h>
+
+static struct pvr_device_params pvr_device_param_defaults __read_mostly = {
+#define X(type_, name_, value_, desc_, ...) .name_ = (value_),
+ PVR_DEVICE_PARAMS
+#undef X
+};
+
+#define PVR_DEVICE_PARAM_NAMED(name_, type_, desc_) \
+ module_param_named(name_, pvr_device_param_defaults.name_, type_, \
+ 0400); \
+ MODULE_PARM_DESC(name_, desc_);
+
+/*
+ * This list of defines must contain every type specified in "pvr_params.h" as
+ * ``PVR_PARAM_TYPE_*_C``.
+ */
+#define PVR_PARAM_TYPE_X32_MODPARAM uint
+
+#define X(type_, name_, value_, desc_, ...) \
+ PVR_DEVICE_PARAM_NAMED(name_, PVR_PARAM_TYPE_##type_##_MODPARAM, desc_);
+PVR_DEVICE_PARAMS
+#undef X
+
+int
+pvr_device_params_init(struct pvr_device_params *params)
+{
+ /*
+ * If heap-allocated parameters are added in the future (e.g.
+ * modparam's charp type), they must be handled specially here (via
+ * kstrdup() in the case of charp). Since that's not necessary yet,
+ * a straight copy will do for now. This change will also require a
+ * pvr_device_params_fini() function to free any heap-allocated copies.
+ */
+
+ *params = pvr_device_param_defaults;
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#include "pvr_device.h"
+
+#include <linux/dcache.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/stddef.h>
+
+/*
+ * This list of defines must contain every type specified in "pvr_params.h" as
+ * ``PVR_PARAM_TYPE_*_C``.
+ */
+#define PVR_PARAM_TYPE_X32_FMT "0x%08llx"
+
+#define X_SET(name_, mode_) X_SET_##mode_(name_)
+#define X_SET_DEF(name_, update_, mode_) X_SET_DEF_##mode_(name_, update_)
+
+#define X_SET_RO(name_) NULL
+#define X_SET_RW(name_) __pvr_device_param_##name_##set
+
+#define X_SET_DEF_RO(name_, update_)
+#define X_SET_DEF_RW(name_, update_) \
+ static int \
+ X_SET_RW(name_)(void *data, u64 val) \
+ { \
+ struct pvr_device *pvr_dev = data; \
+ /* This is not just (update_) to suppress -Waddress. */ \
+ if ((void *)(update_) != NULL) \
+ (update_)(pvr_dev, pvr_dev->params.name_, val); \
+ pvr_dev->params.name_ = val; \
+ return 0; \
+ }
+
+#define X(type_, name_, value_, desc_, mode_, update_) \
+ static int \
+ __pvr_device_param_##name_##_get(void *data, u64 *val) \
+ { \
+ struct pvr_device *pvr_dev = data; \
+ *val = pvr_dev->params.name_; \
+ return 0; \
+ } \
+ X_SET_DEF(name_, update_, mode_) \
+ static int \
+ __pvr_device_param_##name_##_open(struct inode *inode, \
+ struct file *file) \
+ { \
+ __simple_attr_check_format(PVR_PARAM_TYPE_##type_##_FMT, \
+ 0ull); \
+ return simple_attr_open(inode, file, \
+ __pvr_device_param_##name_##_get, \
+ X_SET(name_, mode_), \
+ PVR_PARAM_TYPE_##type_##_FMT); \
+ }
+PVR_DEVICE_PARAMS
+#undef X
+
+#undef X_SET
+#undef X_SET_RO
+#undef X_SET_RW
+#undef X_SET_DEF
+#undef X_SET_DEF_RO
+#undef X_SET_DEF_RW
+
+static struct {
+#define X(type_, name_, value_, desc_, mode_, update_) \
+ const struct file_operations name_;
+ PVR_DEVICE_PARAMS
+#undef X
+} pvr_device_param_debugfs_fops = {
+#define X(type_, name_, value_, desc_, mode_, update_) \
+ .name_ = { \
+ .owner = THIS_MODULE, \
+ .open = __pvr_device_param_##name_##_open, \
+ .release = simple_attr_release, \
+ .read = simple_attr_read, \
+ .write = simple_attr_write, \
+ .llseek = generic_file_llseek, \
+ },
+ PVR_DEVICE_PARAMS
+#undef X
+};
+
+void
+pvr_params_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
+{
+#define X_MODE(mode_) X_MODE_##mode_
+#define X_MODE_RO 0400
+#define X_MODE_RW 0600
+
+#define X(type_, name_, value_, desc_, mode_, update_) \
+ debugfs_create_file(#name_, X_MODE(mode_), dir, pvr_dev, \
+ &pvr_device_param_debugfs_fops.name_);
+ PVR_DEVICE_PARAMS
+#undef X
+
+#undef X_MODE
+#undef X_MODE_RO
+#undef X_MODE_RW
+}
+#endif
diff --git a/drivers/gpu/drm/imagination/pvr_params.h b/drivers/gpu/drm/imagination/pvr_params.h
new file mode 100644
index 000000000000..5807915b456b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_params.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_PARAMS_H
+#define PVR_PARAMS_H
+
+#include "pvr_rogue_fwif.h"
+
+#include <linux/cache.h>
+#include <linux/compiler_attributes.h>
+
+/*
+ * This is the definitive list of types allowed in the definition of
+ * %PVR_DEVICE_PARAMS.
+ */
+#define PVR_PARAM_TYPE_X32_C u32
+
+/*
+ * This macro defines all device-specific parameters; that is parameters which
+ * are set independently per device.
+ *
+ * The X-macro accepts the following arguments. Arguments marked with [debugfs]
+ * are ignored when debugfs is disabled; values used for these arguments may
+ * safely be gated behind CONFIG_DEBUG_FS.
+ *
+ * @type_: The definitive list of allowed values is PVR_PARAM_TYPE_*_C.
+ * @name_: Name of the parameter. This is used both as the field name in C and
+ * stringified as the parameter name.
+ * @value_: Initial/default value.
+ * @desc_: String literal used as help text to describe the usage of this
+ * parameter.
+ * @mode_: [debugfs] One of {RO,RW}. The access mode of the debugfs entry for
+ * this parameter.
+ * @update_: [debugfs] When debugfs support is enabled, parameters may be
+ * updated at runtime. When this happens, this function will be
+ * called to allow changes to propagate. The signature of this
+ * function is:
+ *
+ * void (*)(struct pvr_device *pvr_dev, T old_val, T new_val)
+ *
+ * Where T is the C type associated with @type_.
+ *
+ * If @mode_ does not allow write access, this function will never be
+ * called. In this case, or if no update callback is required, you
+ * should specify NULL for this argument.
+ */
+#define PVR_DEVICE_PARAMS \
+ X(X32, fw_trace_mask, ROGUE_FWIF_LOG_TYPE_NONE, \
+ "Enable FW trace for the specified groups. Specifying 0 disables " \
+ "all FW tracing.", \
+ RW, pvr_fw_trace_mask_update)
+
+struct pvr_device_params {
+#define X(type_, name_, value_, desc_, ...) \
+ PVR_PARAM_TYPE_##type_##_C name_;
+ PVR_DEVICE_PARAMS
+#undef X
+};
+
+int pvr_device_params_init(struct pvr_device_params *params);
+
+#if defined(CONFIG_DEBUG_FS)
+/* Forward declaration from "pvr_device.h". */
+struct pvr_device;
+
+/* Forward declaration from <linux/dcache.h>. */
+struct dentry;
+
+void pvr_params_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* PVR_PARAMS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
new file mode 100644
index 000000000000..ba7816fd28ec
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_startstop.h"
+#include "pvr_power.h"
+#include "pvr_queue.h"
+#include "pvr_rogue_fwif.h"
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#define POWER_SYNC_TIMEOUT_US (1000000) /* 1s */
+
+#define WATCHDOG_TIME_MS (500)
+
+/**
+ * pvr_device_lost() - Mark GPU device as lost
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This will cause the DRM device to be unplugged.
+ */
+void
+pvr_device_lost(struct pvr_device *pvr_dev)
+{
+ if (!pvr_dev->lost) {
+ pvr_dev->lost = true;
+ drm_dev_unplug(from_pvr_device(pvr_dev));
+ }
+}
+
+static int
+pvr_power_send_command(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *pow_cmd)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ u32 slot_nr;
+ u32 value;
+ int err;
+
+ WRITE_ONCE(*fw_dev->power_sync, 0);
+
+ err = pvr_kccb_send_cmd_powered(pvr_dev, pow_cmd, &slot_nr);
+ if (err)
+ return err;
+
+ /* Wait for FW to acknowledge. */
+ return readl_poll_timeout(pvr_dev->fw_dev.power_sync, value, value != 0, 100,
+ POWER_SYNC_TIMEOUT_US);
+}
+
+static int
+pvr_power_request_idle(struct pvr_device *pvr_dev)
+{
+ struct rogue_fwif_kccb_cmd pow_cmd;
+
+ /* Send FORCED_IDLE request to FW. */
+ pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
+ pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_FORCED_IDLE_REQ;
+ pow_cmd.cmd_data.pow_data.power_req_data.pow_request_type = ROGUE_FWIF_POWER_FORCE_IDLE;
+
+ return pvr_power_send_command(pvr_dev, &pow_cmd);
+}
+
+static int
+pvr_power_request_pwr_off(struct pvr_device *pvr_dev)
+{
+ struct rogue_fwif_kccb_cmd pow_cmd;
+
+ /* Send POW_OFF request to firmware. */
+ pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
+ pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_OFF_REQ;
+ pow_cmd.cmd_data.pow_data.power_req_data.forced = true;
+
+ return pvr_power_send_command(pvr_dev, &pow_cmd);
+}
+
+static int
+pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset)
+{
+ if (!hard_reset) {
+ int err;
+
+ cancel_delayed_work_sync(&pvr_dev->watchdog.work);
+
+ err = pvr_power_request_idle(pvr_dev);
+ if (err)
+ return err;
+
+ err = pvr_power_request_pwr_off(pvr_dev);
+ if (err)
+ return err;
+ }
+
+ return pvr_fw_stop(pvr_dev);
+}
+
+static int
+pvr_power_fw_enable(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = pvr_fw_start(pvr_dev);
+ if (err)
+ return err;
+
+ err = pvr_wait_for_fw_boot(pvr_dev);
+ if (err) {
+ drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
+ pvr_fw_stop(pvr_dev);
+ return err;
+ }
+
+ queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
+ msecs_to_jiffies(WATCHDOG_TIME_MS));
+
+ return 0;
+}
+
+bool
+pvr_power_is_idle(struct pvr_device *pvr_dev)
+{
+ /*
+ * FW power state can be out of date if a KCCB command has been submitted but the FW hasn't
+ * started processing it yet. So also check the KCCB status.
+ */
+ enum rogue_fwif_pow_state pow_state = READ_ONCE(pvr_dev->fw_dev.fwif_sysdata->pow_state);
+ bool kccb_idle = pvr_kccb_is_idle(pvr_dev);
+
+ return (pow_state == ROGUE_FWIF_POW_IDLE) && kccb_idle;
+}
+
+static bool
+pvr_watchdog_kccb_stalled(struct pvr_device *pvr_dev)
+{
+ /* Check KCCB commands are progressing. */
+ u32 kccb_cmds_executed = pvr_dev->fw_dev.fwif_osdata->kccb_cmds_executed;
+ bool kccb_is_idle = pvr_kccb_is_idle(pvr_dev);
+
+ if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed && !kccb_is_idle) {
+ pvr_dev->watchdog.kccb_stall_count++;
+
+ /*
+ * If we have commands pending with no progress for 2 consecutive polls then
+ * consider KCCB command processing stalled.
+ */
+ if (pvr_dev->watchdog.kccb_stall_count == 2) {
+ pvr_dev->watchdog.kccb_stall_count = 0;
+ return true;
+ }
+ } else if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed) {
+ bool has_active_contexts;
+
+ mutex_lock(&pvr_dev->queues.lock);
+ has_active_contexts = list_empty(&pvr_dev->queues.active);
+ mutex_unlock(&pvr_dev->queues.lock);
+
+ if (has_active_contexts) {
+ /* Send a HEALTH_CHECK command so we can verify FW is still alive. */
+ struct rogue_fwif_kccb_cmd health_check_cmd;
+
+ health_check_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK;
+
+ pvr_kccb_send_cmd_powered(pvr_dev, &health_check_cmd, NULL);
+ }
+ } else {
+ pvr_dev->watchdog.old_kccb_cmds_executed = kccb_cmds_executed;
+ pvr_dev->watchdog.kccb_stall_count = 0;
+ }
+
+ return false;
+}
+
+static void
+pvr_watchdog_worker(struct work_struct *work)
+{
+ struct pvr_device *pvr_dev = container_of(work, struct pvr_device,
+ watchdog.work.work);
+ bool stalled;
+
+ if (pvr_dev->lost)
+ return;
+
+ if (pm_runtime_get_if_in_use(from_pvr_device(pvr_dev)->dev) <= 0)
+ goto out_requeue;
+
+ if (!pvr_dev->fw_dev.booted)
+ goto out_pm_runtime_put;
+
+ stalled = pvr_watchdog_kccb_stalled(pvr_dev);
+
+ if (stalled) {
+ drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
+
+ pvr_power_reset(pvr_dev, true);
+ /* Device may be lost at this point. */
+ }
+
+out_pm_runtime_put:
+ pm_runtime_put(from_pvr_device(pvr_dev)->dev);
+
+out_requeue:
+ if (!pvr_dev->lost) {
+ queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
+ msecs_to_jiffies(WATCHDOG_TIME_MS));
+ }
+}
+
+/**
+ * pvr_watchdog_init() - Initialise watchdog for device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%ENOMEM on out of memory.
+ */
+int
+pvr_watchdog_init(struct pvr_device *pvr_dev)
+{
+ INIT_DELAYED_WORK(&pvr_dev->watchdog.work, pvr_watchdog_worker);
+
+ return 0;
+}
+
+int
+pvr_power_device_suspend(struct device *dev)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ int err = 0;
+ int idx;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ if (pvr_dev->fw_dev.booted) {
+ err = pvr_power_fw_disable(pvr_dev, false);
+ if (err)
+ goto err_drm_dev_exit;
+ }
+
+ clk_disable_unprepare(pvr_dev->mem_clk);
+ clk_disable_unprepare(pvr_dev->sys_clk);
+ clk_disable_unprepare(pvr_dev->core_clk);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+int
+pvr_power_device_resume(struct device *dev)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ err = clk_prepare_enable(pvr_dev->core_clk);
+ if (err)
+ goto err_drm_dev_exit;
+
+ err = clk_prepare_enable(pvr_dev->sys_clk);
+ if (err)
+ goto err_core_clk_disable;
+
+ err = clk_prepare_enable(pvr_dev->mem_clk);
+ if (err)
+ goto err_sys_clk_disable;
+
+ if (pvr_dev->fw_dev.booted) {
+ err = pvr_power_fw_enable(pvr_dev);
+ if (err)
+ goto err_mem_clk_disable;
+ }
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_mem_clk_disable:
+ clk_disable_unprepare(pvr_dev->mem_clk);
+
+err_sys_clk_disable:
+ clk_disable_unprepare(pvr_dev->sys_clk);
+
+err_core_clk_disable:
+ clk_disable_unprepare(pvr_dev->core_clk);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+int
+pvr_power_device_idle(struct device *dev)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+
+ return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY;
+}
+
+/**
+ * pvr_power_reset() - Reset the GPU
+ * @pvr_dev: Device pointer
+ * @hard_reset: %true for hard reset, %false for soft reset
+ *
+ * If @hard_reset is %false and the FW processor fails to respond during the reset process, this
+ * function will attempt a hard reset.
+ *
+ * If a hard reset fails then the GPU device is reported as lost.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_power_get, pvr_power_fw_disable or pvr_power_fw_enable().
+ */
+int
+pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
+{
+ bool queues_disabled = false;
+ int err;
+
+ /*
+ * Take a power reference during the reset. This should prevent any interference with the
+ * power state during reset.
+ */
+ WARN_ON(pvr_power_get(pvr_dev));
+
+ down_write(&pvr_dev->reset_sem);
+
+ if (pvr_dev->lost) {
+ err = -EIO;
+ goto err_up_write;
+ }
+
+ /* Disable IRQs for the duration of the reset. */
+ disable_irq(pvr_dev->irq);
+
+ do {
+ if (hard_reset) {
+ pvr_queue_device_pre_reset(pvr_dev);
+ queues_disabled = true;
+ }
+
+ err = pvr_power_fw_disable(pvr_dev, hard_reset);
+ if (!err) {
+ if (hard_reset) {
+ pvr_dev->fw_dev.booted = false;
+ WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev));
+
+ err = pvr_fw_hard_reset(pvr_dev);
+ if (err)
+ goto err_device_lost;
+
+ err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev);
+ pvr_dev->fw_dev.booted = true;
+ if (err)
+ goto err_device_lost;
+ } else {
+ /* Clear the FW faulted flags. */
+ pvr_dev->fw_dev.fwif_sysdata->hwr_state_flags &=
+ ~(ROGUE_FWIF_HWR_FW_FAULT |
+ ROGUE_FWIF_HWR_RESTART_REQUESTED);
+ }
+
+ pvr_fw_irq_clear(pvr_dev);
+
+ err = pvr_power_fw_enable(pvr_dev);
+ }
+
+ if (err && hard_reset)
+ goto err_device_lost;
+
+ if (err && !hard_reset) {
+ drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
+ hard_reset = true;
+ }
+ } while (err);
+
+ if (queues_disabled)
+ pvr_queue_device_post_reset(pvr_dev);
+
+ enable_irq(pvr_dev->irq);
+
+ up_write(&pvr_dev->reset_sem);
+
+ pvr_power_put(pvr_dev);
+
+ return 0;
+
+err_device_lost:
+ drm_err(from_pvr_device(pvr_dev), "GPU device lost");
+ pvr_device_lost(pvr_dev);
+
+ /* Leave IRQs disabled if the device is lost. */
+
+ if (queues_disabled)
+ pvr_queue_device_post_reset(pvr_dev);
+
+err_up_write:
+ up_write(&pvr_dev->reset_sem);
+
+ pvr_power_put(pvr_dev);
+
+ return err;
+}
+
+/**
+ * pvr_watchdog_fini() - Shutdown watchdog for device
+ * @pvr_dev: Target PowerVR device.
+ */
+void
+pvr_watchdog_fini(struct pvr_device *pvr_dev)
+{
+ cancel_delayed_work_sync(&pvr_dev->watchdog.work);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h
new file mode 100644
index 000000000000..9a9312dcb2da
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_power.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_POWER_H
+#define PVR_POWER_H
+
+#include "pvr_device.h"
+
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+int pvr_watchdog_init(struct pvr_device *pvr_dev);
+void pvr_watchdog_fini(struct pvr_device *pvr_dev);
+
+void pvr_device_lost(struct pvr_device *pvr_dev);
+
+bool pvr_power_is_idle(struct pvr_device *pvr_dev);
+
+int pvr_power_device_suspend(struct device *dev);
+int pvr_power_device_resume(struct device *dev);
+int pvr_power_device_idle(struct device *dev);
+
+int pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset);
+
+static __always_inline int
+pvr_power_get(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+
+ return pm_runtime_resume_and_get(drm_dev->dev);
+}
+
+static __always_inline int
+pvr_power_put(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+
+ return pm_runtime_put(drm_dev->dev);
+}
+
+#endif /* PVR_POWER_H */
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
new file mode 100644
index 000000000000..5ed9c98fb599
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -0,0 +1,1432 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+
+#include "pvr_cccb.h"
+#include "pvr_context.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_job.h"
+#include "pvr_queue.h"
+#include "pvr_vm.h"
+
+#include "pvr_rogue_fwif_client.h"
+
+#define MAX_DEADLINE_MS 30000
+
+#define CTX_COMPUTE_CCCB_SIZE_LOG2 15
+#define CTX_FRAG_CCCB_SIZE_LOG2 15
+#define CTX_GEOM_CCCB_SIZE_LOG2 15
+#define CTX_TRANSFER_CCCB_SIZE_LOG2 15
+
+static int get_xfer_ctx_state_size(struct pvr_device *pvr_dev)
+{
+ u32 num_isp_store_registers;
+
+ if (PVR_HAS_FEATURE(pvr_dev, xe_memory_hierarchy)) {
+ num_isp_store_registers = 1;
+ } else {
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, num_isp_ipp_pipes, &num_isp_store_registers);
+ if (WARN_ON(err))
+ return err;
+ }
+
+ return sizeof(struct rogue_fwif_frag_ctx_state) +
+ (num_isp_store_registers *
+ sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0]));
+}
+
+static int get_frag_ctx_state_size(struct pvr_device *pvr_dev)
+{
+ u32 num_isp_store_registers;
+ int err;
+
+ if (PVR_HAS_FEATURE(pvr_dev, xe_memory_hierarchy)) {
+ err = PVR_FEATURE_VALUE(pvr_dev, num_raster_pipes, &num_isp_store_registers);
+ if (WARN_ON(err))
+ return err;
+
+ if (PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support)) {
+ u32 xpu_max_slaves;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, xpu_max_slaves, &xpu_max_slaves);
+ if (WARN_ON(err))
+ return err;
+
+ num_isp_store_registers *= (1 + xpu_max_slaves);
+ }
+ } else {
+ err = PVR_FEATURE_VALUE(pvr_dev, num_isp_ipp_pipes, &num_isp_store_registers);
+ if (WARN_ON(err))
+ return err;
+ }
+
+ return sizeof(struct rogue_fwif_frag_ctx_state) +
+ (num_isp_store_registers *
+ sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0]));
+}
+
+static int get_ctx_state_size(struct pvr_device *pvr_dev, enum drm_pvr_job_type type)
+{
+ switch (type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return sizeof(struct rogue_fwif_geom_ctx_state);
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return get_frag_ctx_state_size(pvr_dev);
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return sizeof(struct rogue_fwif_compute_ctx_state);
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return get_xfer_ctx_state_size(pvr_dev);
+ }
+
+ WARN(1, "Invalid queue type");
+ return -EINVAL;
+}
+
+static u32 get_ctx_offset(enum drm_pvr_job_type type)
+{
+ switch (type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return offsetof(struct rogue_fwif_fwrendercontext, geom_context);
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return offsetof(struct rogue_fwif_fwrendercontext, frag_context);
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return offsetof(struct rogue_fwif_fwcomputecontext, cdm_context);
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return offsetof(struct rogue_fwif_fwtransfercontext, tq_context);
+ }
+
+ return 0;
+}
+
+static const char *
+pvr_queue_fence_get_driver_name(struct dma_fence *f)
+{
+ return PVR_DRIVER_NAME;
+}
+
+static void pvr_queue_fence_release(struct dma_fence *f)
+{
+ struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+
+ pvr_context_put(fence->queue->ctx);
+ dma_fence_free(f);
+}
+
+static const char *
+pvr_queue_job_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+
+ switch (fence->queue->type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return "geometry";
+
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return "fragment";
+
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return "compute";
+
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return "transfer";
+ }
+
+ WARN(1, "Invalid queue type");
+ return "invalid";
+}
+
+static const char *
+pvr_queue_cccb_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+
+ switch (fence->queue->type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return "geometry-cccb";
+
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return "fragment-cccb";
+
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return "compute-cccb";
+
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return "transfer-cccb";
+ }
+
+ WARN(1, "Invalid queue type");
+ return "invalid";
+}
+
+static const struct dma_fence_ops pvr_queue_job_fence_ops = {
+ .get_driver_name = pvr_queue_fence_get_driver_name,
+ .get_timeline_name = pvr_queue_job_fence_get_timeline_name,
+ .release = pvr_queue_fence_release,
+};
+
+/**
+ * to_pvr_queue_job_fence() - Return a pvr_queue_fence object if the fence is
+ * backed by a UFO.
+ * @f: The dma_fence to turn into a pvr_queue_fence.
+ *
+ * Return:
+ * * A non-NULL pvr_queue_fence object if the dma_fence is backed by a UFO, or
+ * * NULL otherwise.
+ */
+static struct pvr_queue_fence *
+to_pvr_queue_job_fence(struct dma_fence *f)
+{
+ struct drm_sched_fence *sched_fence = to_drm_sched_fence(f);
+
+ if (sched_fence)
+ f = sched_fence->parent;
+
+ if (f && f->ops == &pvr_queue_job_fence_ops)
+ return container_of(f, struct pvr_queue_fence, base);
+
+ return NULL;
+}
+
+static const struct dma_fence_ops pvr_queue_cccb_fence_ops = {
+ .get_driver_name = pvr_queue_fence_get_driver_name,
+ .get_timeline_name = pvr_queue_cccb_fence_get_timeline_name,
+ .release = pvr_queue_fence_release,
+};
+
+/**
+ * pvr_queue_fence_put() - Put wrapper for pvr_queue_fence objects.
+ * @f: The dma_fence object to put.
+ *
+ * If the pvr_queue_fence has been initialized, we call dma_fence_put(),
+ * otherwise we free the object with dma_fence_free(). This allows us
+ * to do the right thing before and after pvr_queue_fence_init() had been
+ * called.
+ */
+static void pvr_queue_fence_put(struct dma_fence *f)
+{
+ if (!f)
+ return;
+
+ if (WARN_ON(f->ops &&
+ f->ops != &pvr_queue_cccb_fence_ops &&
+ f->ops != &pvr_queue_job_fence_ops))
+ return;
+
+ /* If the fence hasn't been initialized yet, free the object directly. */
+ if (f->ops)
+ dma_fence_put(f);
+ else
+ dma_fence_free(f);
+}
+
+/**
+ * pvr_queue_fence_alloc() - Allocate a pvr_queue_fence fence object
+ *
+ * Call this function to allocate job CCCB and done fences. This only
+ * allocates the objects. Initialization happens when the underlying
+ * dma_fence object is to be returned to drm_sched (in prepare_job() or
+ * run_job()).
+ *
+ * Return:
+ * * A valid pointer if the allocation succeeds, or
+ * * NULL if the allocation fails.
+ */
+static struct dma_fence *
+pvr_queue_fence_alloc(void)
+{
+ struct pvr_queue_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ return &fence->base;
+}
+
+/**
+ * pvr_queue_fence_init() - Initializes a pvr_queue_fence object.
+ * @f: The fence to initialize
+ * @queue: The queue this fence belongs to.
+ * @fence_ops: The fence operations.
+ * @fence_ctx: The fence context.
+ *
+ * Wrapper around dma_fence_init() that takes care of initializing the
+ * pvr_queue_fence::queue field too.
+ */
+static void
+pvr_queue_fence_init(struct dma_fence *f,
+ struct pvr_queue *queue,
+ const struct dma_fence_ops *fence_ops,
+ struct pvr_queue_fence_ctx *fence_ctx)
+{
+ struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+
+ pvr_context_get(queue->ctx);
+ fence->queue = queue;
+ dma_fence_init(&fence->base, fence_ops,
+ &fence_ctx->lock, fence_ctx->id,
+ atomic_inc_return(&fence_ctx->seqno));
+}
+
+/**
+ * pvr_queue_cccb_fence_init() - Initializes a CCCB fence object.
+ * @fence: The fence to initialize.
+ * @queue: The queue this fence belongs to.
+ *
+ * Initializes a fence that can be used to wait for CCCB space.
+ *
+ * Should be called in the ::prepare_job() path, so the fence returned to
+ * drm_sched is valid.
+ */
+static void
+pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
+{
+ pvr_queue_fence_init(fence, queue, &pvr_queue_cccb_fence_ops,
+ &queue->cccb_fence_ctx.base);
+}
+
+/**
+ * pvr_queue_job_fence_init() - Initializes a job done fence object.
+ * @fence: The fence to initialize.
+ * @queue: The queue this fence belongs to.
+ *
+ * Initializes a fence that will be signaled when the GPU is done executing
+ * a job.
+ *
+ * Should be called *before* the ::run_job() path, so the fence is initialised
+ * before being placed in the pending_list.
+ */
+static void
+pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
+{
+ pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
+ &queue->job_fence_ctx);
+}
+
+/**
+ * pvr_queue_fence_ctx_init() - Queue fence context initialization.
+ * @fence_ctx: The context to initialize
+ */
+static void
+pvr_queue_fence_ctx_init(struct pvr_queue_fence_ctx *fence_ctx)
+{
+ spin_lock_init(&fence_ctx->lock);
+ fence_ctx->id = dma_fence_context_alloc(1);
+ atomic_set(&fence_ctx->seqno, 0);
+}
+
+static u32 ufo_cmds_size(u32 elem_count)
+{
+ /* We can pass at most ROGUE_FWIF_CCB_CMD_MAX_UFOS per UFO-related command. */
+ u32 full_cmd_count = elem_count / ROGUE_FWIF_CCB_CMD_MAX_UFOS;
+ u32 remaining_elems = elem_count % ROGUE_FWIF_CCB_CMD_MAX_UFOS;
+ u32 size = full_cmd_count *
+ pvr_cccb_get_size_of_cmd_with_hdr(ROGUE_FWIF_CCB_CMD_MAX_UFOS *
+ sizeof(struct rogue_fwif_ufo));
+
+ if (remaining_elems) {
+ size += pvr_cccb_get_size_of_cmd_with_hdr(remaining_elems *
+ sizeof(struct rogue_fwif_ufo));
+ }
+
+ return size;
+}
+
+static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count)
+{
+ /* One UFO cmd for the fence signaling, one UFO cmd per native fence native,
+ * and a command for the job itself.
+ */
+ return ufo_cmds_size(1) + ufo_cmds_size(ufo_wait_count) +
+ pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len);
+}
+
+/**
+ * job_count_remaining_native_deps() - Count the number of non-signaled native dependencies.
+ * @job: Job to operate on.
+ *
+ * Returns: Number of non-signaled native deps remaining.
+ */
+static unsigned long job_count_remaining_native_deps(struct pvr_job *job)
+{
+ unsigned long remaining_count = 0;
+ struct dma_fence *fence = NULL;
+ unsigned long index;
+
+ xa_for_each(&job->base.dependencies, index, fence) {
+ struct pvr_queue_fence *jfence;
+
+ jfence = to_pvr_queue_job_fence(fence);
+ if (!jfence)
+ continue;
+
+ if (!dma_fence_is_signaled(&jfence->base))
+ remaining_count++;
+ }
+
+ return remaining_count;
+}
+
+/**
+ * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job.
+ * @queue: The queue this job will be submitted to.
+ * @job: The job to get the CCCB fence on.
+ *
+ * The CCCB fence is a synchronization primitive allowing us to delay job
+ * submission until there's enough space in the CCCB to submit the job.
+ *
+ * Return:
+ * * NULL if there's enough space in the CCCB to submit this job, or
+ * * A valid dma_fence object otherwise.
+ */
+static struct dma_fence *
+pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job)
+{
+ struct pvr_queue_fence *cccb_fence;
+ unsigned int native_deps_remaining;
+
+ /* If the fence is NULL, that means we already checked that we had
+ * enough space in the cccb for our job.
+ */
+ if (!job->cccb_fence)
+ return NULL;
+
+ mutex_lock(&queue->cccb_fence_ctx.job_lock);
+
+ /* Count remaining native dependencies and check if the job fits in the CCCB. */
+ native_deps_remaining = job_count_remaining_native_deps(job);
+ if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) {
+ pvr_queue_fence_put(job->cccb_fence);
+ job->cccb_fence = NULL;
+ goto out_unlock;
+ }
+
+ /* There should be no job attached to the CCCB fence context:
+ * drm_sched_entity guarantees that jobs are submitted one at a time.
+ */
+ if (WARN_ON(queue->cccb_fence_ctx.job))
+ pvr_job_put(queue->cccb_fence_ctx.job);
+
+ queue->cccb_fence_ctx.job = pvr_job_get(job);
+
+ /* Initialize the fence before returning it. */
+ cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base);
+ if (!WARN_ON(cccb_fence->queue))
+ pvr_queue_cccb_fence_init(job->cccb_fence, queue);
+
+out_unlock:
+ mutex_unlock(&queue->cccb_fence_ctx.job_lock);
+
+ return dma_fence_get(job->cccb_fence);
+}
+
+/**
+ * pvr_queue_get_job_kccb_fence() - Get the KCCB fence attached to a job.
+ * @queue: The queue this job will be submitted to.
+ * @job: The job to get the KCCB fence on.
+ *
+ * The KCCB fence is a synchronization primitive allowing us to delay job
+ * submission until there's enough space in the KCCB to submit the job.
+ *
+ * Return:
+ * * NULL if there's enough space in the KCCB to submit this job, or
+ * * A valid dma_fence object otherwise.
+ */
+static struct dma_fence *
+pvr_queue_get_job_kccb_fence(struct pvr_queue *queue, struct pvr_job *job)
+{
+ struct pvr_device *pvr_dev = queue->ctx->pvr_dev;
+ struct dma_fence *kccb_fence = NULL;
+
+ /* If the fence is NULL, that means we already checked that we had
+ * enough space in the KCCB for our job.
+ */
+ if (!job->kccb_fence)
+ return NULL;
+
+ if (!WARN_ON(job->kccb_fence->ops)) {
+ kccb_fence = pvr_kccb_reserve_slot(pvr_dev, job->kccb_fence);
+ job->kccb_fence = NULL;
+ }
+
+ return kccb_fence;
+}
+
+static struct dma_fence *
+pvr_queue_get_paired_frag_job_dep(struct pvr_queue *queue, struct pvr_job *job)
+{
+ struct pvr_job *frag_job = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ?
+ job->paired_job : NULL;
+ struct dma_fence *f;
+ unsigned long index;
+
+ if (!frag_job)
+ return NULL;
+
+ xa_for_each(&frag_job->base.dependencies, index, f) {
+ /* Skip already signaled fences. */
+ if (dma_fence_is_signaled(f))
+ continue;
+
+ /* Skip our own fence. */
+ if (f == &job->base.s_fence->scheduled)
+ continue;
+
+ return dma_fence_get(f);
+ }
+
+ return frag_job->base.sched->ops->prepare_job(&frag_job->base, &queue->entity);
+}
+
+/**
+ * pvr_queue_prepare_job() - Return the next internal dependencies expressed as a dma_fence.
+ * @sched_job: The job to query the next internal dependency on
+ * @s_entity: The entity this job is queue on.
+ *
+ * After iterating over drm_sched_job::dependencies, drm_sched let the driver return
+ * its own internal dependencies. We use this function to return our internal dependencies.
+ */
+static struct dma_fence *
+pvr_queue_prepare_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+{
+ struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
+ struct pvr_queue *queue = container_of(s_entity, struct pvr_queue, entity);
+ struct dma_fence *internal_dep = NULL;
+
+ /*
+ * Initialize the done_fence, so we can signal it. This must be done
+ * here because otherwise by the time of run_job() the job will end up
+ * in the pending list without a valid fence.
+ */
+ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) {
+ /*
+ * This will be called on a paired fragment job after being
+ * submitted to firmware. We can tell if this is the case and
+ * bail early from whether run_job() has been called on the
+ * geometry job, which would issue a pm ref.
+ */
+ if (job->paired_job->has_pm_ref)
+ return NULL;
+
+ /*
+ * In this case we need to use the job's own ctx to initialise
+ * the done_fence. The other steps are done in the ctx of the
+ * paired geometry job.
+ */
+ pvr_queue_job_fence_init(job->done_fence,
+ job->ctx->queues.fragment);
+ } else {
+ pvr_queue_job_fence_init(job->done_fence, queue);
+ }
+
+ /* CCCB fence is used to make sure we have enough space in the CCCB to
+ * submit our commands.
+ */
+ internal_dep = pvr_queue_get_job_cccb_fence(queue, job);
+
+ /* KCCB fence is used to make sure we have a KCCB slot to queue our
+ * CMD_KICK.
+ */
+ if (!internal_dep)
+ internal_dep = pvr_queue_get_job_kccb_fence(queue, job);
+
+ /* Any extra internal dependency should be added here, using the following
+ * pattern:
+ *
+ * if (!internal_dep)
+ * internal_dep = pvr_queue_get_job_xxxx_fence(queue, job);
+ */
+
+ /* The paired job fence should come last, when everything else is ready. */
+ if (!internal_dep)
+ internal_dep = pvr_queue_get_paired_frag_job_dep(queue, job);
+
+ return internal_dep;
+}
+
+/**
+ * pvr_queue_update_active_state_locked() - Update the queue active state.
+ * @queue: Queue to update the state on.
+ *
+ * Locked version of pvr_queue_update_active_state(). Must be called with
+ * pvr_device::queue::lock held.
+ */
+static void pvr_queue_update_active_state_locked(struct pvr_queue *queue)
+{
+ struct pvr_device *pvr_dev = queue->ctx->pvr_dev;
+
+ lockdep_assert_held(&pvr_dev->queues.lock);
+
+ /* The queue is temporary out of any list when it's being reset,
+ * we don't want a call to pvr_queue_update_active_state_locked()
+ * to re-insert it behind our back.
+ */
+ if (list_empty(&queue->node))
+ return;
+
+ if (!atomic_read(&queue->in_flight_job_count))
+ list_move_tail(&queue->node, &pvr_dev->queues.idle);
+ else
+ list_move_tail(&queue->node, &pvr_dev->queues.active);
+}
+
+/**
+ * pvr_queue_update_active_state() - Update the queue active state.
+ * @queue: Queue to update the state on.
+ *
+ * Active state is based on the in_flight_job_count value.
+ *
+ * Updating the active state implies moving the queue in or out of the
+ * active queue list, which also defines whether the queue is checked
+ * or not when a FW event is received.
+ *
+ * This function should be called any time a job is submitted or it done
+ * fence is signaled.
+ */
+static void pvr_queue_update_active_state(struct pvr_queue *queue)
+{
+ struct pvr_device *pvr_dev = queue->ctx->pvr_dev;
+
+ mutex_lock(&pvr_dev->queues.lock);
+ pvr_queue_update_active_state_locked(queue);
+ mutex_unlock(&pvr_dev->queues.lock);
+}
+
+static void pvr_queue_submit_job_to_cccb(struct pvr_job *job)
+{
+ struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler);
+ struct rogue_fwif_ufo ufos[ROGUE_FWIF_CCB_CMD_MAX_UFOS];
+ struct pvr_cccb *cccb = &queue->cccb;
+ struct pvr_queue_fence *jfence;
+ struct dma_fence *fence;
+ unsigned long index;
+ u32 ufo_count = 0;
+
+ /* We need to add the queue to the active list before updating the CCCB,
+ * otherwise we might miss the FW event informing us that something
+ * happened on this queue.
+ */
+ atomic_inc(&queue->in_flight_job_count);
+ pvr_queue_update_active_state(queue);
+
+ xa_for_each(&job->base.dependencies, index, fence) {
+ jfence = to_pvr_queue_job_fence(fence);
+ if (!jfence)
+ continue;
+
+ /* Skip the partial render fence, we will place it at the end. */
+ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job &&
+ &job->paired_job->base.s_fence->scheduled == fence)
+ continue;
+
+ if (dma_fence_is_signaled(&jfence->base))
+ continue;
+
+ pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj,
+ &ufos[ufo_count].addr);
+ ufos[ufo_count++].value = jfence->base.seqno;
+
+ if (ufo_count == ARRAY_SIZE(ufos)) {
+ pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR,
+ sizeof(ufos), ufos, 0, 0);
+ ufo_count = 0;
+ }
+ }
+
+ /* Partial render fence goes last. */
+ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) {
+ jfence = to_pvr_queue_job_fence(job->paired_job->done_fence);
+ if (!WARN_ON(!jfence)) {
+ pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj,
+ &ufos[ufo_count].addr);
+ ufos[ufo_count++].value = job->paired_job->done_fence->seqno;
+ }
+ }
+
+ if (ufo_count) {
+ pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR,
+ sizeof(ufos[0]) * ufo_count, ufos, 0, 0);
+ }
+
+ if (job->type == DRM_PVR_JOB_TYPE_GEOMETRY && job->paired_job) {
+ struct rogue_fwif_cmd_geom *cmd = job->cmd;
+
+ /* Reference value for the partial render test is the current queue fence
+ * seqno minus one.
+ */
+ pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj,
+ &cmd->partial_render_geom_frag_fence.addr);
+ cmd->partial_render_geom_frag_fence.value = job->done_fence->seqno - 1;
+ }
+
+ /* Submit job to FW */
+ pvr_cccb_write_command_with_header(cccb, job->fw_ccb_cmd_type, job->cmd_len, job->cmd,
+ job->id, job->id);
+
+ /* Signal the job fence. */
+ pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, &ufos[0].addr);
+ ufos[0].value = job->done_fence->seqno;
+ pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_UPDATE,
+ sizeof(ufos[0]), ufos, 0, 0);
+}
+
+/**
+ * pvr_queue_run_job() - Submit a job to the FW.
+ * @sched_job: The job to submit.
+ *
+ * This function is called when all non-native dependencies have been met and
+ * when the commands resulting from this job are guaranteed to fit in the CCCB.
+ */
+static struct dma_fence *pvr_queue_run_job(struct drm_sched_job *sched_job)
+{
+ struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
+ struct pvr_device *pvr_dev = job->pvr_dev;
+ int err;
+
+ /* The fragment job is issued along the geometry job when we use combined
+ * geom+frag kicks. When we get there, we should simply return the
+ * done_fence that's been initialized earlier.
+ */
+ if (job->paired_job && job->type == DRM_PVR_JOB_TYPE_FRAGMENT &&
+ job->done_fence->ops) {
+ return dma_fence_get(job->done_fence);
+ }
+
+ /* The only kind of jobs that can be paired are geometry and fragment, and
+ * we bail out early if we see a fragment job that's paired with a geomtry
+ * job.
+ * Paired jobs must also target the same context and point to the same
+ * HWRT.
+ */
+ if (WARN_ON(job->paired_job &&
+ (job->type != DRM_PVR_JOB_TYPE_GEOMETRY ||
+ job->paired_job->type != DRM_PVR_JOB_TYPE_FRAGMENT ||
+ job->hwrt != job->paired_job->hwrt ||
+ job->ctx != job->paired_job->ctx)))
+ return ERR_PTR(-EINVAL);
+
+ err = pvr_job_get_pm_ref(job);
+ if (WARN_ON(err))
+ return ERR_PTR(err);
+
+ if (job->paired_job) {
+ err = pvr_job_get_pm_ref(job->paired_job);
+ if (WARN_ON(err))
+ return ERR_PTR(err);
+ }
+
+ /* Submit our job to the CCCB */
+ pvr_queue_submit_job_to_cccb(job);
+
+ if (job->paired_job) {
+ struct pvr_job *geom_job = job;
+ struct pvr_job *frag_job = job->paired_job;
+ struct pvr_queue *geom_queue = job->ctx->queues.geometry;
+ struct pvr_queue *frag_queue = job->ctx->queues.fragment;
+
+ /* Submit the fragment job along the geometry job and send a combined kick. */
+ pvr_queue_submit_job_to_cccb(frag_job);
+ pvr_cccb_send_kccb_combined_kick(pvr_dev,
+ &geom_queue->cccb, &frag_queue->cccb,
+ pvr_context_get_fw_addr(geom_job->ctx) +
+ geom_queue->ctx_offset,
+ pvr_context_get_fw_addr(frag_job->ctx) +
+ frag_queue->ctx_offset,
+ job->hwrt,
+ frag_job->fw_ccb_cmd_type ==
+ ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR);
+ } else {
+ struct pvr_queue *queue = container_of(job->base.sched,
+ struct pvr_queue, scheduler);
+
+ pvr_cccb_send_kccb_kick(pvr_dev, &queue->cccb,
+ pvr_context_get_fw_addr(job->ctx) + queue->ctx_offset,
+ job->hwrt);
+ }
+
+ return dma_fence_get(job->done_fence);
+}
+
+static void pvr_queue_stop(struct pvr_queue *queue, struct pvr_job *bad_job)
+{
+ drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
+}
+
+static void pvr_queue_start(struct pvr_queue *queue)
+{
+ struct pvr_job *job;
+
+ /* Make sure we CPU-signal the UFO object, so other queues don't get
+ * blocked waiting on it.
+ */
+ *queue->timeline_ufo.value = atomic_read(&queue->job_fence_ctx.seqno);
+
+ list_for_each_entry(job, &queue->scheduler.pending_list, base.list) {
+ if (dma_fence_is_signaled(job->done_fence)) {
+ /* Jobs might have completed after drm_sched_stop() was called.
+ * In that case, re-assign the parent field to the done_fence.
+ */
+ WARN_ON(job->base.s_fence->parent);
+ job->base.s_fence->parent = dma_fence_get(job->done_fence);
+ } else {
+ /* If we had unfinished jobs, flag the entity as guilty so no
+ * new job can be submitted.
+ */
+ atomic_set(&queue->ctx->faulty, 1);
+ }
+ }
+
+ drm_sched_start(&queue->scheduler, true);
+}
+
+/**
+ * pvr_queue_timedout_job() - Handle a job timeout event.
+ * @s_job: The job this timeout occurred on.
+ *
+ * FIXME: We don't do anything here to unblock the situation, we just stop+start
+ * the scheduler, and re-assign parent fences in the middle.
+ *
+ * Return:
+ * * DRM_GPU_SCHED_STAT_NOMINAL.
+ */
+static enum drm_gpu_sched_stat
+pvr_queue_timedout_job(struct drm_sched_job *s_job)
+{
+ struct drm_gpu_scheduler *sched = s_job->sched;
+ struct pvr_queue *queue = container_of(sched, struct pvr_queue, scheduler);
+ struct pvr_device *pvr_dev = queue->ctx->pvr_dev;
+ struct pvr_job *job;
+ u32 job_count = 0;
+
+ dev_err(sched->dev, "Job timeout\n");
+
+ /* Before we stop the scheduler, make sure the queue is out of any list, so
+ * any call to pvr_queue_update_active_state_locked() that might happen
+ * until the scheduler is really stopped doesn't end up re-inserting the
+ * queue in the active list. This would cause
+ * pvr_queue_signal_done_fences() and drm_sched_stop() to race with each
+ * other when accessing the pending_list, since drm_sched_stop() doesn't
+ * grab the job_list_lock when modifying the list (it's assuming the
+ * only other accessor is the scheduler, and it's safe to not grab the
+ * lock since it's stopped).
+ */
+ mutex_lock(&pvr_dev->queues.lock);
+ list_del_init(&queue->node);
+ mutex_unlock(&pvr_dev->queues.lock);
+
+ drm_sched_stop(sched, s_job);
+
+ /* Re-assign job parent fences. */
+ list_for_each_entry(job, &sched->pending_list, base.list) {
+ job->base.s_fence->parent = dma_fence_get(job->done_fence);
+ job_count++;
+ }
+ WARN_ON(atomic_read(&queue->in_flight_job_count) != job_count);
+
+ /* Re-insert the queue in the proper list, and kick a queue processing
+ * operation if there were jobs pending.
+ */
+ mutex_lock(&pvr_dev->queues.lock);
+ if (!job_count) {
+ list_move_tail(&queue->node, &pvr_dev->queues.idle);
+ } else {
+ atomic_set(&queue->in_flight_job_count, job_count);
+ list_move_tail(&queue->node, &pvr_dev->queues.active);
+ pvr_queue_process(queue);
+ }
+ mutex_unlock(&pvr_dev->queues.lock);
+
+ drm_sched_start(sched, true);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+/**
+ * pvr_queue_free_job() - Release the reference the scheduler had on a job object.
+ * @sched_job: Job object to free.
+ */
+static void pvr_queue_free_job(struct drm_sched_job *sched_job)
+{
+ struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
+
+ drm_sched_job_cleanup(sched_job);
+ job->paired_job = NULL;
+ pvr_job_put(job);
+}
+
+static const struct drm_sched_backend_ops pvr_queue_sched_ops = {
+ .prepare_job = pvr_queue_prepare_job,
+ .run_job = pvr_queue_run_job,
+ .timedout_job = pvr_queue_timedout_job,
+ .free_job = pvr_queue_free_job,
+};
+
+/**
+ * pvr_queue_fence_is_ufo_backed() - Check if a dma_fence is backed by a UFO object
+ * @f: Fence to test.
+ *
+ * A UFO-backed fence is a fence that can be signaled or waited upon FW-side.
+ * pvr_job::done_fence objects are backed by the timeline UFO attached to the queue
+ * they are pushed to, but those fences are not directly exposed to the outside
+ * world, so we also need to check if the fence we're being passed is a
+ * drm_sched_fence that was coming from our driver.
+ */
+bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f)
+{
+ struct drm_sched_fence *sched_fence = f ? to_drm_sched_fence(f) : NULL;
+
+ if (sched_fence &&
+ sched_fence->sched->ops == &pvr_queue_sched_ops)
+ return true;
+
+ if (f && f->ops == &pvr_queue_job_fence_ops)
+ return true;
+
+ return false;
+}
+
+/**
+ * pvr_queue_signal_done_fences() - Signal done fences.
+ * @queue: Queue to check.
+ *
+ * Signal done fences of jobs whose seqno is less than the current value of
+ * the UFO object attached to the queue.
+ */
+static void
+pvr_queue_signal_done_fences(struct pvr_queue *queue)
+{
+ struct pvr_job *job, *tmp_job;
+ u32 cur_seqno;
+
+ spin_lock(&queue->scheduler.job_list_lock);
+ cur_seqno = *queue->timeline_ufo.value;
+ list_for_each_entry_safe(job, tmp_job, &queue->scheduler.pending_list, base.list) {
+ if ((int)(cur_seqno - lower_32_bits(job->done_fence->seqno)) < 0)
+ break;
+
+ if (!dma_fence_is_signaled(job->done_fence)) {
+ dma_fence_signal(job->done_fence);
+ pvr_job_release_pm_ref(job);
+ atomic_dec(&queue->in_flight_job_count);
+ }
+ }
+ spin_unlock(&queue->scheduler.job_list_lock);
+}
+
+/**
+ * pvr_queue_check_job_waiting_for_cccb_space() - Check if the job waiting for CCCB space
+ * can be unblocked
+ * pushed to the CCCB
+ * @queue: Queue to check
+ *
+ * If we have a job waiting for CCCB, and this job now fits in the CCCB, we signal
+ * its CCCB fence, which should kick drm_sched.
+ */
+static void
+pvr_queue_check_job_waiting_for_cccb_space(struct pvr_queue *queue)
+{
+ struct pvr_queue_fence *cccb_fence;
+ u32 native_deps_remaining;
+ struct pvr_job *job;
+
+ mutex_lock(&queue->cccb_fence_ctx.job_lock);
+ job = queue->cccb_fence_ctx.job;
+ if (!job)
+ goto out_unlock;
+
+ /* If we have a job attached to the CCCB fence context, its CCCB fence
+ * shouldn't be NULL.
+ */
+ if (WARN_ON(!job->cccb_fence)) {
+ job = NULL;
+ goto out_unlock;
+ }
+
+ /* If we get there, CCCB fence has to be initialized. */
+ cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base);
+ if (WARN_ON(!cccb_fence->queue)) {
+ job = NULL;
+ goto out_unlock;
+ }
+
+ /* Evict signaled dependencies before checking for CCCB space.
+ * If the job fits, signal the CCCB fence, this should unblock
+ * the drm_sched_entity.
+ */
+ native_deps_remaining = job_count_remaining_native_deps(job);
+ if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) {
+ job = NULL;
+ goto out_unlock;
+ }
+
+ dma_fence_signal(job->cccb_fence);
+ pvr_queue_fence_put(job->cccb_fence);
+ job->cccb_fence = NULL;
+ queue->cccb_fence_ctx.job = NULL;
+
+out_unlock:
+ mutex_unlock(&queue->cccb_fence_ctx.job_lock);
+
+ pvr_job_put(job);
+}
+
+/**
+ * pvr_queue_process() - Process events that happened on a queue.
+ * @queue: Queue to check
+ *
+ * Signal job fences and check if jobs waiting for CCCB space can be unblocked.
+ */
+void pvr_queue_process(struct pvr_queue *queue)
+{
+ lockdep_assert_held(&queue->ctx->pvr_dev->queues.lock);
+
+ pvr_queue_check_job_waiting_for_cccb_space(queue);
+ pvr_queue_signal_done_fences(queue);
+ pvr_queue_update_active_state_locked(queue);
+}
+
+static u32 get_dm_type(struct pvr_queue *queue)
+{
+ switch (queue->type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return PVR_FWIF_DM_GEOM;
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return PVR_FWIF_DM_FRAG;
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return PVR_FWIF_DM_CDM;
+ }
+
+ return ~0;
+}
+
+/**
+ * init_fw_context() - Initializes the queue part of a FW context.
+ * @queue: Queue object to initialize the FW context for.
+ * @fw_ctx_map: The FW context CPU mapping.
+ *
+ * FW contexts are containing various states, one of them being a per-queue state
+ * that needs to be initialized for each queue being exposed by a context. This
+ * function takes care of that.
+ */
+static void init_fw_context(struct pvr_queue *queue, void *fw_ctx_map)
+{
+ struct pvr_context *ctx = queue->ctx;
+ struct pvr_fw_object *fw_mem_ctx_obj = pvr_vm_get_fw_mem_context(ctx->vm_ctx);
+ struct rogue_fwif_fwcommoncontext *cctx_fw;
+ struct pvr_cccb *cccb = &queue->cccb;
+
+ cctx_fw = fw_ctx_map + queue->ctx_offset;
+ cctx_fw->ccbctl_fw_addr = cccb->ctrl_fw_addr;
+ cctx_fw->ccb_fw_addr = cccb->cccb_fw_addr;
+
+ cctx_fw->dm = get_dm_type(queue);
+ cctx_fw->priority = ctx->priority;
+ cctx_fw->priority_seq_num = 0;
+ cctx_fw->max_deadline_ms = MAX_DEADLINE_MS;
+ cctx_fw->pid = task_tgid_nr(current);
+ cctx_fw->server_common_context_id = ctx->ctx_id;
+
+ pvr_fw_object_get_fw_addr(fw_mem_ctx_obj, &cctx_fw->fw_mem_context_fw_addr);
+
+ pvr_fw_object_get_fw_addr(queue->reg_state_obj, &cctx_fw->context_state_addr);
+}
+
+/**
+ * pvr_queue_cleanup_fw_context() - Wait for the FW context to be idle and clean it up.
+ * @queue: Queue on FW context to clean up.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by pvr_fw_structure_cleanup() otherwise.
+ */
+static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
+{
+ if (!queue->ctx->fw_obj)
+ return 0;
+
+ return pvr_fw_structure_cleanup(queue->ctx->pvr_dev,
+ ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT,
+ queue->ctx->fw_obj, queue->ctx_offset);
+}
+
+/**
+ * pvr_queue_job_init() - Initialize queue related fields in a pvr_job object.
+ * @job: The job to initialize.
+ *
+ * Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm()
+ * and pvr_queue_job_push() can't fail. We also make sure the context type is
+ * valid and the job can fit in the CCCB.
+ *
+ * Return:
+ * * 0 on success, or
+ * * An error code if something failed.
+ */
+int pvr_queue_job_init(struct pvr_job *job)
+{
+ /* Fragment jobs need at least one native fence wait on the geometry job fence. */
+ u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0;
+ struct pvr_queue *queue;
+ int err;
+
+ if (atomic_read(&job->ctx->faulty))
+ return -EIO;
+
+ queue = pvr_context_get_queue_for_job(job->ctx, job->type);
+ if (!queue)
+ return -EINVAL;
+
+ if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count)))
+ return -E2BIG;
+
+ err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE);
+ if (err)
+ return err;
+
+ job->cccb_fence = pvr_queue_fence_alloc();
+ job->kccb_fence = pvr_kccb_fence_alloc();
+ job->done_fence = pvr_queue_fence_alloc();
+ if (!job->cccb_fence || !job->kccb_fence || !job->done_fence)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * pvr_queue_job_arm() - Arm a job object.
+ * @job: The job to arm.
+ *
+ * Initializes fences and return the drm_sched finished fence so it can
+ * be exposed to the outside world. Once this function is called, you should
+ * make sure the job is pushed using pvr_queue_job_push(), or guarantee that
+ * no one grabbed a reference to the returned fence. The latter can happen if
+ * we do multi-job submission, and something failed when creating/initializing
+ * a job. In that case, we know the fence didn't leave the driver, and we
+ * can thus guarantee nobody will wait on an dead fence object.
+ *
+ * Return:
+ * * A dma_fence object.
+ */
+struct dma_fence *pvr_queue_job_arm(struct pvr_job *job)
+{
+ drm_sched_job_arm(&job->base);
+
+ return &job->base.s_fence->finished;
+}
+
+/**
+ * pvr_queue_job_cleanup() - Cleanup fence/scheduler related fields in the job object.
+ * @job: The job to cleanup.
+ *
+ * Should be called in the job release path.
+ */
+void pvr_queue_job_cleanup(struct pvr_job *job)
+{
+ pvr_queue_fence_put(job->done_fence);
+ pvr_queue_fence_put(job->cccb_fence);
+ pvr_kccb_fence_put(job->kccb_fence);
+
+ if (job->base.s_fence)
+ drm_sched_job_cleanup(&job->base);
+}
+
+/**
+ * pvr_queue_job_push() - Push a job to its queue.
+ * @job: The job to push.
+ *
+ * Must be called after pvr_queue_job_init() and after all dependencies
+ * have been added to the job. This will effectively queue the job to
+ * the drm_sched_entity attached to the queue. We grab a reference on
+ * the job object, so the caller is free to drop its reference when it's
+ * done accessing the job object.
+ */
+void pvr_queue_job_push(struct pvr_job *job)
+{
+ struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler);
+
+ /* Keep track of the last queued job scheduled fence for combined submit. */
+ dma_fence_put(queue->last_queued_job_scheduled_fence);
+ queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled);
+
+ pvr_job_get(job);
+ drm_sched_entity_push_job(&job->base);
+}
+
+static void reg_state_init(void *cpu_ptr, void *priv)
+{
+ struct pvr_queue *queue = priv;
+
+ if (queue->type == DRM_PVR_JOB_TYPE_GEOMETRY) {
+ struct rogue_fwif_geom_ctx_state *geom_ctx_state_fw = cpu_ptr;
+
+ geom_ctx_state_fw->geom_core[0].geom_reg_vdm_call_stack_pointer_init =
+ queue->callstack_addr;
+ }
+}
+
+/**
+ * pvr_queue_create() - Create a queue object.
+ * @ctx: The context this queue will be attached to.
+ * @type: The type of jobs being pushed to this queue.
+ * @args: The arguments passed to the context creation function.
+ * @fw_ctx_map: CPU mapping of the FW context object.
+ *
+ * Create a queue object that will be used to queue and track jobs.
+ *
+ * Return:
+ * * A valid pointer to a pvr_queue object, or
+ * * An error pointer if the creation/initialization failed.
+ */
+struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
+ enum drm_pvr_job_type type,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ static const struct {
+ u32 cccb_size;
+ const char *name;
+ } props[] = {
+ [DRM_PVR_JOB_TYPE_GEOMETRY] = {
+ .cccb_size = CTX_GEOM_CCCB_SIZE_LOG2,
+ .name = "geometry",
+ },
+ [DRM_PVR_JOB_TYPE_FRAGMENT] = {
+ .cccb_size = CTX_FRAG_CCCB_SIZE_LOG2,
+ .name = "fragment"
+ },
+ [DRM_PVR_JOB_TYPE_COMPUTE] = {
+ .cccb_size = CTX_COMPUTE_CCCB_SIZE_LOG2,
+ .name = "compute"
+ },
+ [DRM_PVR_JOB_TYPE_TRANSFER_FRAG] = {
+ .cccb_size = CTX_TRANSFER_CCCB_SIZE_LOG2,
+ .name = "transfer_frag"
+ },
+ };
+ struct pvr_device *pvr_dev = ctx->pvr_dev;
+ struct drm_gpu_scheduler *sched;
+ struct pvr_queue *queue;
+ int ctx_state_size, err;
+ void *cpu_map;
+
+ if (WARN_ON(type >= sizeof(props)))
+ return ERR_PTR(-EINVAL);
+
+ switch (ctx->type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ if (type != DRM_PVR_JOB_TYPE_GEOMETRY &&
+ type != DRM_PVR_JOB_TYPE_FRAGMENT)
+ return ERR_PTR(-EINVAL);
+ break;
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ if (type != DRM_PVR_JOB_TYPE_COMPUTE)
+ return ERR_PTR(-EINVAL);
+ break;
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ if (type != DRM_PVR_JOB_TYPE_TRANSFER_FRAG)
+ return ERR_PTR(-EINVAL);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctx_state_size = get_ctx_state_size(pvr_dev, type);
+ if (ctx_state_size < 0)
+ return ERR_PTR(ctx_state_size);
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return ERR_PTR(-ENOMEM);
+
+ queue->type = type;
+ queue->ctx_offset = get_ctx_offset(type);
+ queue->ctx = ctx;
+ queue->callstack_addr = args->callstack_addr;
+ sched = &queue->scheduler;
+ INIT_LIST_HEAD(&queue->node);
+ mutex_init(&queue->cccb_fence_ctx.job_lock);
+ pvr_queue_fence_ctx_init(&queue->cccb_fence_ctx.base);
+ pvr_queue_fence_ctx_init(&queue->job_fence_ctx);
+
+ err = pvr_cccb_init(pvr_dev, &queue->cccb, props[type].cccb_size, props[type].name);
+ if (err)
+ goto err_free_queue;
+
+ err = pvr_fw_object_create(pvr_dev, ctx_state_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ reg_state_init, queue, &queue->reg_state_obj);
+ if (err)
+ goto err_cccb_fini;
+
+ init_fw_context(queue, fw_ctx_map);
+
+ if (type != DRM_PVR_JOB_TYPE_GEOMETRY && type != DRM_PVR_JOB_TYPE_FRAGMENT &&
+ args->callstack_addr) {
+ err = -EINVAL;
+ goto err_release_reg_state;
+ }
+
+ cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &queue->timeline_ufo.fw_obj);
+ if (IS_ERR(cpu_map)) {
+ err = PTR_ERR(cpu_map);
+ goto err_release_reg_state;
+ }
+
+ queue->timeline_ufo.value = cpu_map;
+
+ err = drm_sched_init(&queue->scheduler,
+ &pvr_queue_sched_ops,
+ pvr_dev->sched_wq, 1, 64 * 1024, 1,
+ msecs_to_jiffies(500),
+ pvr_dev->sched_wq, NULL, "pvr-queue",
+ pvr_dev->base.dev);
+ if (err)
+ goto err_release_ufo;
+
+ err = drm_sched_entity_init(&queue->entity,
+ DRM_SCHED_PRIORITY_KERNEL,
+ &sched, 1, &ctx->faulty);
+ if (err)
+ goto err_sched_fini;
+
+ mutex_lock(&pvr_dev->queues.lock);
+ list_add_tail(&queue->node, &pvr_dev->queues.idle);
+ mutex_unlock(&pvr_dev->queues.lock);
+
+ return queue;
+
+err_sched_fini:
+ drm_sched_fini(&queue->scheduler);
+
+err_release_ufo:
+ pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj);
+
+err_release_reg_state:
+ pvr_fw_object_destroy(queue->reg_state_obj);
+
+err_cccb_fini:
+ pvr_cccb_fini(&queue->cccb);
+
+err_free_queue:
+ mutex_destroy(&queue->cccb_fence_ctx.job_lock);
+ kfree(queue);
+
+ return ERR_PTR(err);
+}
+
+void pvr_queue_device_pre_reset(struct pvr_device *pvr_dev)
+{
+ struct pvr_queue *queue;
+
+ mutex_lock(&pvr_dev->queues.lock);
+ list_for_each_entry(queue, &pvr_dev->queues.idle, node)
+ pvr_queue_stop(queue, NULL);
+ list_for_each_entry(queue, &pvr_dev->queues.active, node)
+ pvr_queue_stop(queue, NULL);
+ mutex_unlock(&pvr_dev->queues.lock);
+}
+
+void pvr_queue_device_post_reset(struct pvr_device *pvr_dev)
+{
+ struct pvr_queue *queue;
+
+ mutex_lock(&pvr_dev->queues.lock);
+ list_for_each_entry(queue, &pvr_dev->queues.active, node)
+ pvr_queue_start(queue);
+ list_for_each_entry(queue, &pvr_dev->queues.idle, node)
+ pvr_queue_start(queue);
+ mutex_unlock(&pvr_dev->queues.lock);
+}
+
+/**
+ * pvr_queue_kill() - Kill a queue.
+ * @queue: The queue to kill.
+ *
+ * Kill the queue so no new jobs can be pushed. Should be called when the
+ * context handle is destroyed. The queue object might last longer if jobs
+ * are still in flight and holding a reference to the context this queue
+ * belongs to.
+ */
+void pvr_queue_kill(struct pvr_queue *queue)
+{
+ drm_sched_entity_destroy(&queue->entity);
+ dma_fence_put(queue->last_queued_job_scheduled_fence);
+ queue->last_queued_job_scheduled_fence = NULL;
+}
+
+/**
+ * pvr_queue_destroy() - Destroy a queue.
+ * @queue: The queue to destroy.
+ *
+ * Cleanup the queue and free the resources attached to it. Should be
+ * called from the context release function.
+ */
+void pvr_queue_destroy(struct pvr_queue *queue)
+{
+ if (!queue)
+ return;
+
+ mutex_lock(&queue->ctx->pvr_dev->queues.lock);
+ list_del_init(&queue->node);
+ mutex_unlock(&queue->ctx->pvr_dev->queues.lock);
+
+ drm_sched_fini(&queue->scheduler);
+ drm_sched_entity_fini(&queue->entity);
+
+ if (WARN_ON(queue->last_queued_job_scheduled_fence))
+ dma_fence_put(queue->last_queued_job_scheduled_fence);
+
+ pvr_queue_cleanup_fw_context(queue);
+
+ pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj);
+ pvr_fw_object_destroy(queue->reg_state_obj);
+ pvr_cccb_fini(&queue->cccb);
+ mutex_destroy(&queue->cccb_fence_ctx.job_lock);
+ kfree(queue);
+}
+
+/**
+ * pvr_queue_device_init() - Device-level initialization of queue related fields.
+ * @pvr_dev: The device to initialize.
+ *
+ * Initializes all fields related to queue management in pvr_device.
+ *
+ * Return:
+ * * 0 on success, or
+ * * An error code on failure.
+ */
+int pvr_queue_device_init(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ INIT_LIST_HEAD(&pvr_dev->queues.active);
+ INIT_LIST_HEAD(&pvr_dev->queues.idle);
+ err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_dev->queues.lock);
+ if (err)
+ return err;
+
+ pvr_dev->sched_wq = alloc_workqueue("powervr-sched", WQ_UNBOUND, 0);
+ if (!pvr_dev->sched_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * pvr_queue_device_fini() - Device-level cleanup of queue related fields.
+ * @pvr_dev: The device to cleanup.
+ *
+ * Cleanup/free all queue-related resources attached to a pvr_device object.
+ */
+void pvr_queue_device_fini(struct pvr_device *pvr_dev)
+{
+ destroy_workqueue(pvr_dev->sched_wq);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
new file mode 100644
index 000000000000..e06ced69302f
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_QUEUE_H
+#define PVR_QUEUE_H
+
+#include <drm/gpu_scheduler.h>
+
+#include "pvr_cccb.h"
+#include "pvr_device.h"
+
+struct pvr_context;
+struct pvr_queue;
+
+/**
+ * struct pvr_queue_fence_ctx - Queue fence context
+ *
+ * Used to implement dma_fence_ops for pvr_job::{done,cccb}_fence.
+ */
+struct pvr_queue_fence_ctx {
+ /** @id: Fence context ID allocated with dma_fence_context_alloc(). */
+ u64 id;
+
+ /** @seqno: Sequence number incremented each time a fence is created. */
+ atomic_t seqno;
+
+ /** @lock: Lock used to synchronize access to fences allocated by this context. */
+ spinlock_t lock;
+};
+
+/**
+ * struct pvr_queue_cccb_fence_ctx - CCCB fence context
+ *
+ * Context used to manage fences controlling access to the CCCB. No fences are
+ * issued if there's enough space in the CCCB to push job commands.
+ */
+struct pvr_queue_cccb_fence_ctx {
+ /** @base: Base queue fence context. */
+ struct pvr_queue_fence_ctx base;
+
+ /**
+ * @job: Job waiting for CCCB space.
+ *
+ * Thanks to the serializationg done at the drm_sched_entity level,
+ * there's no more than one job waiting for CCCB at a given time.
+ *
+ * This field is NULL if no jobs are currently waiting for CCCB space.
+ *
+ * Must be accessed with @job_lock held.
+ */
+ struct pvr_job *job;
+
+ /** @job_lock: Lock protecting access to the job object. */
+ struct mutex job_lock;
+};
+
+/**
+ * struct pvr_queue_fence - Queue fence object
+ */
+struct pvr_queue_fence {
+ /** @base: Base dma_fence. */
+ struct dma_fence base;
+
+ /** @queue: Queue that created this fence. */
+ struct pvr_queue *queue;
+};
+
+/**
+ * struct pvr_queue - Job queue
+ *
+ * Used to queue and track execution of pvr_job objects.
+ */
+struct pvr_queue {
+ /** @scheduler: Single entity scheduler use to push jobs to this queue. */
+ struct drm_gpu_scheduler scheduler;
+
+ /** @entity: Scheduling entity backing this queue. */
+ struct drm_sched_entity entity;
+
+ /** @type: Type of jobs queued to this queue. */
+ enum drm_pvr_job_type type;
+
+ /** @ctx: Context object this queue is bound to. */
+ struct pvr_context *ctx;
+
+ /** @node: Used to add the queue to the active/idle queue list. */
+ struct list_head node;
+
+ /**
+ * @in_flight_job_count: Number of jobs submitted to the CCCB that
+ * have not been processed yet.
+ */
+ atomic_t in_flight_job_count;
+
+ /**
+ * @cccb_fence_ctx: CCCB fence context.
+ *
+ * Used to control access to the CCCB is full, such that we don't
+ * end up trying to push commands to the CCCB if there's not enough
+ * space to receive all commands needed for a job to complete.
+ */
+ struct pvr_queue_cccb_fence_ctx cccb_fence_ctx;
+
+ /** @job_fence_ctx: Job fence context object. */
+ struct pvr_queue_fence_ctx job_fence_ctx;
+
+ /** @timeline_ufo: Timeline UFO for the context queue. */
+ struct {
+ /** @fw_obj: FW object representing the UFO value. */
+ struct pvr_fw_object *fw_obj;
+
+ /** @value: CPU mapping of the UFO value. */
+ u32 *value;
+ } timeline_ufo;
+
+ /**
+ * @last_queued_job_scheduled_fence: The scheduled fence of the last
+ * job queued to this queue.
+ *
+ * We use it to insert frag -> geom dependencies when issuing combined
+ * geom+frag jobs, to guarantee that the fragment job that's part of
+ * the combined operation comes after all fragment jobs that were queued
+ * before it.
+ */
+ struct dma_fence *last_queued_job_scheduled_fence;
+
+ /** @cccb: Client Circular Command Buffer. */
+ struct pvr_cccb cccb;
+
+ /** @reg_state_obj: FW object representing the register state of this queue. */
+ struct pvr_fw_object *reg_state_obj;
+
+ /** @ctx_offset: Offset of the queue context in the FW context object. */
+ u32 ctx_offset;
+
+ /** @callstack_addr: Initial call stack address for register state object. */
+ u64 callstack_addr;
+};
+
+bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f);
+
+int pvr_queue_job_init(struct pvr_job *job);
+
+void pvr_queue_job_cleanup(struct pvr_job *job);
+
+void pvr_queue_job_push(struct pvr_job *job);
+
+struct dma_fence *pvr_queue_job_arm(struct pvr_job *job);
+
+struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
+ enum drm_pvr_job_type type,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map);
+
+void pvr_queue_kill(struct pvr_queue *queue);
+
+void pvr_queue_destroy(struct pvr_queue *queue);
+
+void pvr_queue_process(struct pvr_queue *queue);
+
+void pvr_queue_device_pre_reset(struct pvr_device *pvr_dev);
+
+void pvr_queue_device_post_reset(struct pvr_device *pvr_dev);
+
+int pvr_queue_device_init(struct pvr_device *pvr_dev);
+
+void pvr_queue_device_fini(struct pvr_device *pvr_dev);
+
+#endif /* PVR_QUEUE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
new file mode 100644
index 000000000000..2a90d02796d3
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
@@ -0,0 +1,6193 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+/* *** Autogenerated C -- do not edit *** */
+
+#ifndef PVR_ROGUE_CR_DEFS_H
+#define PVR_ROGUE_CR_DEFS_H
+
+/* clang-format off */
+
+#define ROGUE_CR_DEFS_REVISION 1
+
+/* Register ROGUE_CR_RASTERISATION_INDIRECT */
+#define ROGUE_CR_RASTERISATION_INDIRECT 0x8238U
+#define ROGUE_CR_RASTERISATION_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_PBE_INDIRECT */
+#define ROGUE_CR_PBE_INDIRECT 0x83E0U
+#define ROGUE_CR_PBE_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_PBE_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_PBE_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_PBE_PERF_INDIRECT */
+#define ROGUE_CR_PBE_PERF_INDIRECT 0x83D8U
+#define ROGUE_CR_PBE_PERF_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_TPU_PERF_INDIRECT */
+#define ROGUE_CR_TPU_PERF_INDIRECT 0x83F0U
+#define ROGUE_CR_TPU_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_RASTERISATION_PERF_INDIRECT */
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT 0x8318U
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT */
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT 0x8028U
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_USC_PERF_INDIRECT */
+#define ROGUE_CR_USC_PERF_INDIRECT 0x8030U
+#define ROGUE_CR_USC_PERF_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_BLACKPEARL_INDIRECT */
+#define ROGUE_CR_BLACKPEARL_INDIRECT 0x8388U
+#define ROGUE_CR_BLACKPEARL_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_BLACKPEARL_PERF_INDIRECT */
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT 0x83F8U
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_TEXAS3_PERF_INDIRECT */
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT 0x83D0U
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_TEXAS_PERF_INDIRECT */
+#define ROGUE_CR_TEXAS_PERF_INDIRECT 0x8288U
+#define ROGUE_CR_TEXAS_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_BX_TU_PERF_INDIRECT */
+#define ROGUE_CR_BX_TU_PERF_INDIRECT 0xC900U
+#define ROGUE_CR_BX_TU_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_CLK_CTRL */
+#define ROGUE_CR_CLK_CTRL 0x0000U
+#define ROGUE_CR_CLK_CTRL__PBE2_XE__MASKFULL 0xFFFFFF003F3FFFFFULL
+#define ROGUE_CR_CLK_CTRL__S7_TOP__MASKFULL 0xCFCF03000F3F3F0FULL
+#define ROGUE_CR_CLK_CTRL_MASKFULL 0xFFFFFF003F3FFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_SHIFT 62U
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_CLRMSK 0x3FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_ON 0x4000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_AUTO 0x8000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_IPP_SHIFT 60U
+#define ROGUE_CR_CLK_CTRL_IPP_CLRMSK 0xCFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_IPP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_IPP_ON 0x1000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_IPP_AUTO 0x2000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBC_SHIFT 58U
+#define ROGUE_CR_CLK_CTRL_FBC_CLRMSK 0xF3FFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_FBC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBC_ON 0x0400000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBC_AUTO 0x0800000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBDC_SHIFT 56U
+#define ROGUE_CR_CLK_CTRL_FBDC_CLRMSK 0xFCFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_FBDC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBDC_ON 0x0100000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBDC_AUTO 0x0200000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_SHIFT 54U
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_CLRMSK 0xFF3FFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_ON 0x0040000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_AUTO 0x0080000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USCS_SHIFT 52U
+#define ROGUE_CR_CLK_CTRL_USCS_CLRMSK 0xFFCFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_USCS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USCS_ON 0x0010000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USCS_AUTO 0x0020000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PBE_SHIFT 50U
+#define ROGUE_CR_CLK_CTRL_PBE_CLRMSK 0xFFF3FFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_PBE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PBE_ON 0x0004000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PBE_AUTO 0x0008000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_SHIFT 48U
+#define ROGUE_CR_CLK_CTRL_MCU_L1_CLRMSK 0xFFFCFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_ON 0x0001000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_AUTO 0x0002000000000000ULL
+#define ROGUE_CR_CLK_CTRL_CDM_SHIFT 46U
+#define ROGUE_CR_CLK_CTRL_CDM_CLRMSK 0xFFFF3FFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_CDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_CDM_ON 0x0000400000000000ULL
+#define ROGUE_CR_CLK_CTRL_CDM_AUTO 0x0000800000000000ULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_SHIFT 44U
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_CLRMSK 0xFFFFCFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_ON 0x0000100000000000ULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_AUTO 0x0000200000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT 42U
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK 0xFFFFF3FFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_ON 0x0000040000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_AUTO 0x0000080000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SHIFT 40U
+#define ROGUE_CR_CLK_CTRL_BIF_CLRMSK 0xFFFFFCFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_ON 0x0000010000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_AUTO 0x0000020000000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT 28U
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFCFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_ON 0x0000000010000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO 0x0000000020000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_SHIFT 26U
+#define ROGUE_CR_CLK_CTRL_MCU_L0_CLRMSK 0xFFFFFFFFF3FFFFFFULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_ON 0x0000000004000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_AUTO 0x0000000008000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_SHIFT 24U
+#define ROGUE_CR_CLK_CTRL_TPU_CLRMSK 0xFFFFFFFFFCFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_TPU_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_ON 0x0000000001000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_AUTO 0x0000000002000000ULL
+#define ROGUE_CR_CLK_CTRL_USC_SHIFT 20U
+#define ROGUE_CR_CLK_CTRL_USC_CLRMSK 0xFFFFFFFFFFCFFFFFULL
+#define ROGUE_CR_CLK_CTRL_USC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USC_ON 0x0000000000100000ULL
+#define ROGUE_CR_CLK_CTRL_USC_AUTO 0x0000000000200000ULL
+#define ROGUE_CR_CLK_CTRL_TLA_SHIFT 18U
+#define ROGUE_CR_CLK_CTRL_TLA_CLRMSK 0xFFFFFFFFFFF3FFFFULL
+#define ROGUE_CR_CLK_CTRL_TLA_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TLA_ON 0x0000000000040000ULL
+#define ROGUE_CR_CLK_CTRL_TLA_AUTO 0x0000000000080000ULL
+#define ROGUE_CR_CLK_CTRL_SLC_SHIFT 16U
+#define ROGUE_CR_CLK_CTRL_SLC_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_CLK_CTRL_SLC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_SLC_ON 0x0000000000010000ULL
+#define ROGUE_CR_CLK_CTRL_SLC_AUTO 0x0000000000020000ULL
+#define ROGUE_CR_CLK_CTRL_UVS_SHIFT 14U
+#define ROGUE_CR_CLK_CTRL_UVS_CLRMSK 0xFFFFFFFFFFFF3FFFULL
+#define ROGUE_CR_CLK_CTRL_UVS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_UVS_ON 0x0000000000004000ULL
+#define ROGUE_CR_CLK_CTRL_UVS_AUTO 0x0000000000008000ULL
+#define ROGUE_CR_CLK_CTRL_PDS_SHIFT 12U
+#define ROGUE_CR_CLK_CTRL_PDS_CLRMSK 0xFFFFFFFFFFFFCFFFULL
+#define ROGUE_CR_CLK_CTRL_PDS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PDS_ON 0x0000000000001000ULL
+#define ROGUE_CR_CLK_CTRL_PDS_AUTO 0x0000000000002000ULL
+#define ROGUE_CR_CLK_CTRL_VDM_SHIFT 10U
+#define ROGUE_CR_CLK_CTRL_VDM_CLRMSK 0xFFFFFFFFFFFFF3FFULL
+#define ROGUE_CR_CLK_CTRL_VDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_VDM_ON 0x0000000000000400ULL
+#define ROGUE_CR_CLK_CTRL_VDM_AUTO 0x0000000000000800ULL
+#define ROGUE_CR_CLK_CTRL_PM_SHIFT 8U
+#define ROGUE_CR_CLK_CTRL_PM_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_CLK_CTRL_PM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PM_ON 0x0000000000000100ULL
+#define ROGUE_CR_CLK_CTRL_PM_AUTO 0x0000000000000200ULL
+#define ROGUE_CR_CLK_CTRL_GPP_SHIFT 6U
+#define ROGUE_CR_CLK_CTRL_GPP_CLRMSK 0xFFFFFFFFFFFFFF3FULL
+#define ROGUE_CR_CLK_CTRL_GPP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_GPP_ON 0x0000000000000040ULL
+#define ROGUE_CR_CLK_CTRL_GPP_AUTO 0x0000000000000080ULL
+#define ROGUE_CR_CLK_CTRL_TE_SHIFT 4U
+#define ROGUE_CR_CLK_CTRL_TE_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_CLK_CTRL_TE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TE_ON 0x0000000000000010ULL
+#define ROGUE_CR_CLK_CTRL_TE_AUTO 0x0000000000000020ULL
+#define ROGUE_CR_CLK_CTRL_TSP_SHIFT 2U
+#define ROGUE_CR_CLK_CTRL_TSP_CLRMSK 0xFFFFFFFFFFFFFFF3ULL
+#define ROGUE_CR_CLK_CTRL_TSP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TSP_ON 0x0000000000000004ULL
+#define ROGUE_CR_CLK_CTRL_TSP_AUTO 0x0000000000000008ULL
+#define ROGUE_CR_CLK_CTRL_ISP_SHIFT 0U
+#define ROGUE_CR_CLK_CTRL_ISP_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+#define ROGUE_CR_CLK_CTRL_ISP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_ISP_ON 0x0000000000000001ULL
+#define ROGUE_CR_CLK_CTRL_ISP_AUTO 0x0000000000000002ULL
+
+/* Register ROGUE_CR_CLK_STATUS */
+#define ROGUE_CR_CLK_STATUS 0x0008U
+#define ROGUE_CR_CLK_STATUS__PBE2_XE__MASKFULL 0x00000001FFF077FFULL
+#define ROGUE_CR_CLK_STATUS__S7_TOP__MASKFULL 0x00000001B3101773ULL
+#define ROGUE_CR_CLK_STATUS_MASKFULL 0x00000001FFF077FFULL
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_SHIFT 32U
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_RUNNING 0x0000000100000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_SHIFT 31U
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_RUNNING 0x0000000080000000ULL
+#define ROGUE_CR_CLK_STATUS_IPP_SHIFT 30U
+#define ROGUE_CR_CLK_STATUS_IPP_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_IPP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_IPP_RUNNING 0x0000000040000000ULL
+#define ROGUE_CR_CLK_STATUS_FBC_SHIFT 29U
+#define ROGUE_CR_CLK_STATUS_FBC_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_FBC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_FBC_RUNNING 0x0000000020000000ULL
+#define ROGUE_CR_CLK_STATUS_FBDC_SHIFT 28U
+#define ROGUE_CR_CLK_STATUS_FBDC_CLRMSK 0xFFFFFFFFEFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_FBDC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_FBDC_RUNNING 0x0000000010000000ULL
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_SHIFT 27U
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_CLRMSK 0xFFFFFFFFF7FFFFFFULL
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_RUNNING 0x0000000008000000ULL
+#define ROGUE_CR_CLK_STATUS_USCS_SHIFT 26U
+#define ROGUE_CR_CLK_STATUS_USCS_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_USCS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_USCS_RUNNING 0x0000000004000000ULL
+#define ROGUE_CR_CLK_STATUS_PBE_SHIFT 25U
+#define ROGUE_CR_CLK_STATUS_PBE_CLRMSK 0xFFFFFFFFFDFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_PBE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_PBE_RUNNING 0x0000000002000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L1_SHIFT 24U
+#define ROGUE_CR_CLK_STATUS_MCU_L1_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_MCU_L1_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L1_RUNNING 0x0000000001000000ULL
+#define ROGUE_CR_CLK_STATUS_CDM_SHIFT 23U
+#define ROGUE_CR_CLK_STATUS_CDM_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_CLK_STATUS_CDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_CDM_RUNNING 0x0000000000800000ULL
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_SHIFT 22U
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_RUNNING 0x0000000000400000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT 21U
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING 0x0000000000200000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_SHIFT 20U
+#define ROGUE_CR_CLK_STATUS_BIF_CLRMSK 0xFFFFFFFFFFEFFFFFULL
+#define ROGUE_CR_CLK_STATUS_BIF_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_RUNNING 0x0000000000100000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT 14U
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING 0x0000000000004000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L0_SHIFT 13U
+#define ROGUE_CR_CLK_STATUS_MCU_L0_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_CLK_STATUS_MCU_L0_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L0_RUNNING 0x0000000000002000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_SHIFT 12U
+#define ROGUE_CR_CLK_STATUS_TPU_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_CLK_STATUS_TPU_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_RUNNING 0x0000000000001000ULL
+#define ROGUE_CR_CLK_STATUS_USC_SHIFT 10U
+#define ROGUE_CR_CLK_STATUS_USC_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_CLK_STATUS_USC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_USC_RUNNING 0x0000000000000400ULL
+#define ROGUE_CR_CLK_STATUS_TLA_SHIFT 9U
+#define ROGUE_CR_CLK_STATUS_TLA_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_CLK_STATUS_TLA_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TLA_RUNNING 0x0000000000000200ULL
+#define ROGUE_CR_CLK_STATUS_SLC_SHIFT 8U
+#define ROGUE_CR_CLK_STATUS_SLC_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_CLK_STATUS_SLC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_SLC_RUNNING 0x0000000000000100ULL
+#define ROGUE_CR_CLK_STATUS_UVS_SHIFT 7U
+#define ROGUE_CR_CLK_STATUS_UVS_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_CLK_STATUS_UVS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_UVS_RUNNING 0x0000000000000080ULL
+#define ROGUE_CR_CLK_STATUS_PDS_SHIFT 6U
+#define ROGUE_CR_CLK_STATUS_PDS_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_CLK_STATUS_PDS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_PDS_RUNNING 0x0000000000000040ULL
+#define ROGUE_CR_CLK_STATUS_VDM_SHIFT 5U
+#define ROGUE_CR_CLK_STATUS_VDM_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_CLK_STATUS_VDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_VDM_RUNNING 0x0000000000000020ULL
+#define ROGUE_CR_CLK_STATUS_PM_SHIFT 4U
+#define ROGUE_CR_CLK_STATUS_PM_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_CLK_STATUS_PM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_PM_RUNNING 0x0000000000000010ULL
+#define ROGUE_CR_CLK_STATUS_GPP_SHIFT 3U
+#define ROGUE_CR_CLK_STATUS_GPP_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_CLK_STATUS_GPP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_GPP_RUNNING 0x0000000000000008ULL
+#define ROGUE_CR_CLK_STATUS_TE_SHIFT 2U
+#define ROGUE_CR_CLK_STATUS_TE_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_CLK_STATUS_TE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TE_RUNNING 0x0000000000000004ULL
+#define ROGUE_CR_CLK_STATUS_TSP_SHIFT 1U
+#define ROGUE_CR_CLK_STATUS_TSP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_CLK_STATUS_TSP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TSP_RUNNING 0x0000000000000002ULL
+#define ROGUE_CR_CLK_STATUS_ISP_SHIFT 0U
+#define ROGUE_CR_CLK_STATUS_ISP_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_CLK_STATUS_ISP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_ISP_RUNNING 0x0000000000000001ULL
+
+/* Register ROGUE_CR_CORE_ID */
+#define ROGUE_CR_CORE_ID__PBVNC 0x0020U
+#define ROGUE_CR_CORE_ID__PBVNC__MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT 48U
+#define ROGUE_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT 32U
+#define ROGUE_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK 0xFFFF0000FFFFFFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT 16U
+#define ROGUE_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK 0xFFFFFFFF0000FFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT 0U
+#define ROGUE_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_CORE_ID */
+#define ROGUE_CR_CORE_ID 0x0018U
+#define ROGUE_CR_CORE_ID_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_CORE_ID_ID_SHIFT 16U
+#define ROGUE_CR_CORE_ID_ID_CLRMSK 0x0000FFFFU
+#define ROGUE_CR_CORE_ID_CONFIG_SHIFT 0U
+#define ROGUE_CR_CORE_ID_CONFIG_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_CORE_REVISION */
+#define ROGUE_CR_CORE_REVISION 0x0020U
+#define ROGUE_CR_CORE_REVISION_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_CORE_REVISION_DESIGNER_SHIFT 24U
+#define ROGUE_CR_CORE_REVISION_DESIGNER_CLRMSK 0x00FFFFFFU
+#define ROGUE_CR_CORE_REVISION_MAJOR_SHIFT 16U
+#define ROGUE_CR_CORE_REVISION_MAJOR_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CORE_REVISION_MINOR_SHIFT 8U
+#define ROGUE_CR_CORE_REVISION_MINOR_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CORE_REVISION_MAINTENANCE_SHIFT 0U
+#define ROGUE_CR_CORE_REVISION_MAINTENANCE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_DESIGNER_REV_FIELD1 */
+#define ROGUE_CR_DESIGNER_REV_FIELD1 0x0028U
+#define ROGUE_CR_DESIGNER_REV_FIELD1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0U
+#define ROGUE_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_DESIGNER_REV_FIELD2 */
+#define ROGUE_CR_DESIGNER_REV_FIELD2 0x0030U
+#define ROGUE_CR_DESIGNER_REV_FIELD2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0U
+#define ROGUE_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_CHANGESET_NUMBER */
+#define ROGUE_CR_CHANGESET_NUMBER 0x0040U
+#define ROGUE_CR_CHANGESET_NUMBER_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT 0U
+#define ROGUE_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_CLK_XTPLUS_CTRL */
+#define ROGUE_CR_CLK_XTPLUS_CTRL 0x0080U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_MASKFULL 0x0000003FFFFF0000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_SHIFT 36U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK 0xFFFFFFCFFFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_ON 0x0000001000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_AUTO 0x0000002000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT 34U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK 0xFFFFFFF3FFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_ON 0x0000000400000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_AUTO 0x0000000800000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_SHIFT 32U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK 0xFFFFFFFCFFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_ON 0x0000000100000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_AUTO 0x0000000200000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT 30U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK 0xFFFFFFFF3FFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_ON 0x0000000040000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO 0x0000000080000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT 28U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK 0xFFFFFFFFCFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_ON 0x0000000010000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO 0x0000000020000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT 26U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK 0xFFFFFFFFF3FFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_ON 0x0000000004000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO 0x0000000008000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT 24U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK 0xFFFFFFFFFCFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_ON 0x0000000001000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_AUTO 0x0000000002000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT 22U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK 0xFFFFFFFFFF3FFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON 0x0000000000400000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO 0x0000000000800000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT 20U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK 0xFFFFFFFFFFCFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON 0x0000000000100000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO 0x0000000000200000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT 18U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK 0xFFFFFFFFFFF3FFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON 0x0000000000040000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO 0x0000000000080000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT 16U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON 0x0000000000010000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO 0x0000000000020000ULL
+
+/* Register ROGUE_CR_CLK_XTPLUS_STATUS */
+#define ROGUE_CR_CLK_XTPLUS_STATUS 0x0088U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_MASKFULL 0x00000000000007FFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_SHIFT 10U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_RUNNING 0x0000000000000400ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_SHIFT 9U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_RUNNING 0x0000000000000200ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT 8U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING 0x0000000000000100ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT 7U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING 0x0000000000000080ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT 6U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING 0x0000000000000040ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT 5U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING 0x0000000000000020ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT 4U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING 0x0000000000000010ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT 3U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING 0x0000000000000008ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT 2U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING 0x0000000000000004ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT 1U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING 0x0000000000000002ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT 0U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SOFT_RESET */
+#define ROGUE_CR_SOFT_RESET 0x0100U
+#define ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL 0xFFEFFFFFFFFFFC3DULL
+#define ROGUE_CR_SOFT_RESET_MASKFULL 0x00E7FFFFFFFFFC3DULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_SHIFT 63U
+#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_CLRMSK 0x7FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_EN 0x8000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_SHIFT 62U
+#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_EN 0x4000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_SHIFT 61U
+#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_EN 0x2000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_JONES_CORE_SHIFT 60U
+#define ROGUE_CR_SOFT_RESET_JONES_CORE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_JONES_CORE_EN 0x1000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_TILING_CORE_SHIFT 59U
+#define ROGUE_CR_SOFT_RESET_TILING_CORE_CLRMSK 0xF7FFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TILING_CORE_EN 0x0800000000000000ULL
+#define ROGUE_CR_SOFT_RESET_TE3_SHIFT 58U
+#define ROGUE_CR_SOFT_RESET_TE3_CLRMSK 0xFBFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TE3_EN 0x0400000000000000ULL
+#define ROGUE_CR_SOFT_RESET_VCE_SHIFT 57U
+#define ROGUE_CR_SOFT_RESET_VCE_CLRMSK 0xFDFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_VCE_EN 0x0200000000000000ULL
+#define ROGUE_CR_SOFT_RESET_VBS_SHIFT 56U
+#define ROGUE_CR_SOFT_RESET_VBS_CLRMSK 0xFEFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_VBS_EN 0x0100000000000000ULL
+#define ROGUE_CR_SOFT_RESET_DPX1_CORE_SHIFT 55U
+#define ROGUE_CR_SOFT_RESET_DPX1_CORE_CLRMSK 0xFF7FFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DPX1_CORE_EN 0x0080000000000000ULL
+#define ROGUE_CR_SOFT_RESET_DPX0_CORE_SHIFT 54U
+#define ROGUE_CR_SOFT_RESET_DPX0_CORE_CLRMSK 0xFFBFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DPX0_CORE_EN 0x0040000000000000ULL
+#define ROGUE_CR_SOFT_RESET_FBA_SHIFT 53U
+#define ROGUE_CR_SOFT_RESET_FBA_CLRMSK 0xFFDFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FBA_EN 0x0020000000000000ULL
+#define ROGUE_CR_SOFT_RESET_FB_CDC_SHIFT 51U
+#define ROGUE_CR_SOFT_RESET_FB_CDC_CLRMSK 0xFFF7FFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FB_CDC_EN 0x0008000000000000ULL
+#define ROGUE_CR_SOFT_RESET_SH_SHIFT 50U
+#define ROGUE_CR_SOFT_RESET_SH_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_SH_EN 0x0004000000000000ULL
+#define ROGUE_CR_SOFT_RESET_VRDM_SHIFT 49U
+#define ROGUE_CR_SOFT_RESET_VRDM_CLRMSK 0xFFFDFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_VRDM_EN 0x0002000000000000ULL
+#define ROGUE_CR_SOFT_RESET_MCU_FBTC_SHIFT 48U
+#define ROGUE_CR_SOFT_RESET_MCU_FBTC_CLRMSK 0xFFFEFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_MCU_FBTC_EN 0x0001000000000000ULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT 47U
+#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK 0xFFFF7FFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_EN 0x0000800000000000ULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT 46U
+#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK 0xFFFFBFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_EN 0x0000400000000000ULL
+#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_SHIFT 45U
+#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK 0xFFFFDFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_EN 0x0000200000000000ULL
+#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_SHIFT 44U
+#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK 0xFFFFEFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_EN 0x0000100000000000ULL
+#define ROGUE_CR_SOFT_RESET_IPP_SHIFT 43U
+#define ROGUE_CR_SOFT_RESET_IPP_CLRMSK 0xFFFFF7FFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_IPP_EN 0x0000080000000000ULL
+#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_SHIFT 42U
+#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_CLRMSK 0xFFFFFBFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_EN 0x0000040000000000ULL
+#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_SHIFT 41U
+#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_CLRMSK 0xFFFFFDFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_EN 0x0000020000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_SHIFT 40U
+#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_CLRMSK 0xFFFFFEFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_EN 0x0000010000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_SHIFT 39U
+#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_CLRMSK 0xFFFFFF7FFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_EN 0x0000008000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_SHIFT 38U
+#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_CLRMSK 0xFFFFFFBFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_EN 0x0000004000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_SHIFT 37U
+#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_CLRMSK 0xFFFFFFDFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_EN 0x0000002000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_SHIFT 36U
+#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_CLRMSK 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_EN 0x0000001000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_SHIFT 35U
+#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_CLRMSK 0xFFFFFFF7FFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_EN 0x0000000800000000ULL
+#define ROGUE_CR_SOFT_RESET_MMU_SHIFT 34U
+#define ROGUE_CR_SOFT_RESET_MMU_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_MMU_EN 0x0000000400000000ULL
+#define ROGUE_CR_SOFT_RESET_BIF1_SHIFT 33U
+#define ROGUE_CR_SOFT_RESET_BIF1_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BIF1_EN 0x0000000200000000ULL
+#define ROGUE_CR_SOFT_RESET_GARTEN_SHIFT 32U
+#define ROGUE_CR_SOFT_RESET_GARTEN_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_GARTEN_EN 0x0000000100000000ULL
+#define ROGUE_CR_SOFT_RESET_CPU_SHIFT 32U
+#define ROGUE_CR_SOFT_RESET_CPU_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_CPU_EN 0x0000000100000000ULL
+#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_SHIFT 31U
+#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_EN 0x0000000080000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_SHIFT 30U
+#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_EN 0x0000000040000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_SHIFT 29U
+#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_EN 0x0000000020000000ULL
+#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_SHIFT 28U
+#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_CLRMSK 0xFFFFFFFFEFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_EN 0x0000000010000000ULL
+#define ROGUE_CR_SOFT_RESET_SLC_SHIFT 27U
+#define ROGUE_CR_SOFT_RESET_SLC_CLRMSK 0xFFFFFFFFF7FFFFFFULL
+#define ROGUE_CR_SOFT_RESET_SLC_EN 0x0000000008000000ULL
+#define ROGUE_CR_SOFT_RESET_TLA_SHIFT 26U
+#define ROGUE_CR_SOFT_RESET_TLA_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TLA_EN 0x0000000004000000ULL
+#define ROGUE_CR_SOFT_RESET_UVS_SHIFT 25U
+#define ROGUE_CR_SOFT_RESET_UVS_CLRMSK 0xFFFFFFFFFDFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_UVS_EN 0x0000000002000000ULL
+#define ROGUE_CR_SOFT_RESET_TE_SHIFT 24U
+#define ROGUE_CR_SOFT_RESET_TE_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TE_EN 0x0000000001000000ULL
+#define ROGUE_CR_SOFT_RESET_GPP_SHIFT 23U
+#define ROGUE_CR_SOFT_RESET_GPP_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_SOFT_RESET_GPP_EN 0x0000000000800000ULL
+#define ROGUE_CR_SOFT_RESET_FBDC_SHIFT 22U
+#define ROGUE_CR_SOFT_RESET_FBDC_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FBDC_EN 0x0000000000400000ULL
+#define ROGUE_CR_SOFT_RESET_FBC_SHIFT 21U
+#define ROGUE_CR_SOFT_RESET_FBC_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FBC_EN 0x0000000000200000ULL
+#define ROGUE_CR_SOFT_RESET_PM_SHIFT 20U
+#define ROGUE_CR_SOFT_RESET_PM_CLRMSK 0xFFFFFFFFFFEFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PM_EN 0x0000000000100000ULL
+#define ROGUE_CR_SOFT_RESET_PBE_SHIFT 19U
+#define ROGUE_CR_SOFT_RESET_PBE_CLRMSK 0xFFFFFFFFFFF7FFFFULL
+#define ROGUE_CR_SOFT_RESET_PBE_EN 0x0000000000080000ULL
+#define ROGUE_CR_SOFT_RESET_USC_SHARED_SHIFT 18U
+#define ROGUE_CR_SOFT_RESET_USC_SHARED_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_SOFT_RESET_USC_SHARED_EN 0x0000000000040000ULL
+#define ROGUE_CR_SOFT_RESET_MCU_L1_SHIFT 17U
+#define ROGUE_CR_SOFT_RESET_MCU_L1_CLRMSK 0xFFFFFFFFFFFDFFFFULL
+#define ROGUE_CR_SOFT_RESET_MCU_L1_EN 0x0000000000020000ULL
+#define ROGUE_CR_SOFT_RESET_BIF_SHIFT 16U
+#define ROGUE_CR_SOFT_RESET_BIF_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_SOFT_RESET_BIF_EN 0x0000000000010000ULL
+#define ROGUE_CR_SOFT_RESET_CDM_SHIFT 15U
+#define ROGUE_CR_SOFT_RESET_CDM_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_SOFT_RESET_CDM_EN 0x0000000000008000ULL
+#define ROGUE_CR_SOFT_RESET_VDM_SHIFT 14U
+#define ROGUE_CR_SOFT_RESET_VDM_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_SOFT_RESET_VDM_EN 0x0000000000004000ULL
+#define ROGUE_CR_SOFT_RESET_TESS_SHIFT 13U
+#define ROGUE_CR_SOFT_RESET_TESS_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_SOFT_RESET_TESS_EN 0x0000000000002000ULL
+#define ROGUE_CR_SOFT_RESET_PDS_SHIFT 12U
+#define ROGUE_CR_SOFT_RESET_PDS_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_SOFT_RESET_PDS_EN 0x0000000000001000ULL
+#define ROGUE_CR_SOFT_RESET_ISP_SHIFT 11U
+#define ROGUE_CR_SOFT_RESET_ISP_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_SOFT_RESET_ISP_EN 0x0000000000000800ULL
+#define ROGUE_CR_SOFT_RESET_TSP_SHIFT 10U
+#define ROGUE_CR_SOFT_RESET_TSP_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_SOFT_RESET_TSP_EN 0x0000000000000400ULL
+#define ROGUE_CR_SOFT_RESET_SYSARB_SHIFT 5U
+#define ROGUE_CR_SOFT_RESET_SYSARB_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_SOFT_RESET_SYSARB_EN 0x0000000000000020ULL
+#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT 4U
+#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_EN 0x0000000000000010ULL
+#define ROGUE_CR_SOFT_RESET_MCU_L0_SHIFT 3U
+#define ROGUE_CR_SOFT_RESET_MCU_L0_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_SOFT_RESET_MCU_L0_EN 0x0000000000000008ULL
+#define ROGUE_CR_SOFT_RESET_TPU_SHIFT 2U
+#define ROGUE_CR_SOFT_RESET_TPU_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_SOFT_RESET_TPU_EN 0x0000000000000004ULL
+#define ROGUE_CR_SOFT_RESET_USC_SHIFT 0U
+#define ROGUE_CR_SOFT_RESET_USC_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_SOFT_RESET_USC_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SOFT_RESET2 */
+#define ROGUE_CR_SOFT_RESET2 0x0108U
+#define ROGUE_CR_SOFT_RESET2_MASKFULL 0x00000000001FFFFFULL
+#define ROGUE_CR_SOFT_RESET2_SPFILTER_SHIFT 12U
+#define ROGUE_CR_SOFT_RESET2_SPFILTER_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_SOFT_RESET2_TDM_SHIFT 11U
+#define ROGUE_CR_SOFT_RESET2_TDM_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_SOFT_RESET2_TDM_EN 0x00000800U
+#define ROGUE_CR_SOFT_RESET2_ASTC_SHIFT 10U
+#define ROGUE_CR_SOFT_RESET2_ASTC_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_SOFT_RESET2_ASTC_EN 0x00000400U
+#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_SHIFT 9U
+#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_EN 0x00000200U
+#define ROGUE_CR_SOFT_RESET2_USCPS_SHIFT 8U
+#define ROGUE_CR_SOFT_RESET2_USCPS_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SOFT_RESET2_USCPS_EN 0x00000100U
+#define ROGUE_CR_SOFT_RESET2_IPF_SHIFT 7U
+#define ROGUE_CR_SOFT_RESET2_IPF_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_SOFT_RESET2_IPF_EN 0x00000080U
+#define ROGUE_CR_SOFT_RESET2_GEOMETRY_SHIFT 6U
+#define ROGUE_CR_SOFT_RESET2_GEOMETRY_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SOFT_RESET2_GEOMETRY_EN 0x00000040U
+#define ROGUE_CR_SOFT_RESET2_USC_SHARED_SHIFT 5U
+#define ROGUE_CR_SOFT_RESET2_USC_SHARED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SOFT_RESET2_USC_SHARED_EN 0x00000020U
+#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_SHIFT 4U
+#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_EN 0x00000010U
+#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT 3U
+#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_EN 0x00000008U
+#define ROGUE_CR_SOFT_RESET2_PIXEL_SHIFT 2U
+#define ROGUE_CR_SOFT_RESET2_PIXEL_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SOFT_RESET2_PIXEL_EN 0x00000004U
+#define ROGUE_CR_SOFT_RESET2_CDM_SHIFT 1U
+#define ROGUE_CR_SOFT_RESET2_CDM_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SOFT_RESET2_CDM_EN 0x00000002U
+#define ROGUE_CR_SOFT_RESET2_VERTEX_SHIFT 0U
+#define ROGUE_CR_SOFT_RESET2_VERTEX_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SOFT_RESET2_VERTEX_EN 0x00000001U
+
+/* Register ROGUE_CR_EVENT_STATUS */
+#define ROGUE_CR_EVENT_STATUS 0x0130U
+#define ROGUE_CR_EVENT_STATUS__ROGUEXE__MASKFULL 0x00000000E01DFFFFULL
+#define ROGUE_CR_EVENT_STATUS__SIGNALS__MASKFULL 0x00000000E007FFFFULL
+#define ROGUE_CR_EVENT_STATUS_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT 31U
+#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN 0x80000000U
+#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT 30U
+#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN 0x40000000U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT 29U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN 0x20000000U
+#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT 28U
+#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN 0x10000000U
+#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT 27U
+#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN 0x08000000U
+#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT 26U
+#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN 0x04000000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT 25U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN 0x02000000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT 24U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK 0xFEFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN 0x01000000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT 23U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK 0xFF7FFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN 0x00800000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT 22U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN 0x00400000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT 21U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN 0x00200000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT 20U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN 0x00100000U
+#define ROGUE_CR_EVENT_STATUS_SAFETY_SHIFT 20U
+#define ROGUE_CR_EVENT_STATUS_SAFETY_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_STATUS_SAFETY_EN 0x00100000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT 19U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN 0x00080000U
+#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_SHIFT 19U
+#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_EN 0x00080000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_SHIFT 17U
+#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_EN 0x00020000U
+#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT 17U
+#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN 0x00020000U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT 16U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN 0x00010000U
+#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_SHIFT 15U
+#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK 0xFFFF7FFFU
+#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_EN 0x00008000U
+#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT 14U
+#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK 0xFFFFBFFFU
+#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_EN 0x00004000U
+#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_SHIFT 13U
+#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_CLRMSK 0xFFFFDFFFU
+#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_EN 0x00002000U
+#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_SHIFT 12U
+#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_CLRMSK 0xFFFFEFFFU
+#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_EN 0x00001000U
+#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_SHIFT 11U
+#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_EN 0x00000800U
+#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT 10U
+#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_EN 0x00000400U
+#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT 9U
+#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN 0x00000200U
+#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT 8U
+#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN 0x00000100U
+#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT 7U
+#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN 0x00000080U
+#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 6U
+#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_EN 0x00000040U
+#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_SHIFT 5U
+#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_EN 0x00000020U
+#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT 4U
+#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_EN 0x00000010U
+#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 3U
+#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN 0x00000008U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT 2U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_EN 0x00000004U
+#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT 1U
+#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_EN 0x00000002U
+#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT 0U
+#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_EN 0x00000001U
+
+/* Register ROGUE_CR_TIMER */
+#define ROGUE_CR_TIMER 0x0160U
+#define ROGUE_CR_TIMER_MASKFULL 0x8000FFFFFFFFFFFFULL
+#define ROGUE_CR_TIMER_BIT31_SHIFT 63U
+#define ROGUE_CR_TIMER_BIT31_CLRMSK 0x7FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_TIMER_BIT31_EN 0x8000000000000000ULL
+#define ROGUE_CR_TIMER_VALUE_SHIFT 0U
+#define ROGUE_CR_TIMER_VALUE_CLRMSK 0xFFFF000000000000ULL
+
+/* Register ROGUE_CR_TLA_STATUS */
+#define ROGUE_CR_TLA_STATUS 0x0178U
+#define ROGUE_CR_TLA_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_TLA_STATUS_BLIT_COUNT_SHIFT 39U
+#define ROGUE_CR_TLA_STATUS_BLIT_COUNT_CLRMSK 0x0000007FFFFFFFFFULL
+#define ROGUE_CR_TLA_STATUS_REQUEST_SHIFT 7U
+#define ROGUE_CR_TLA_STATUS_REQUEST_CLRMSK 0xFFFFFF800000007FULL
+#define ROGUE_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT 1U
+#define ROGUE_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK 0xFFFFFFFFFFFFFF81ULL
+#define ROGUE_CR_TLA_STATUS_BUSY_SHIFT 0U
+#define ROGUE_CR_TLA_STATUS_BUSY_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_TLA_STATUS_BUSY_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_PM_PARTIAL_RENDER_ENABLE */
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE 0x0338U
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT 0U
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN 0x00000001U
+
+/* Register ROGUE_CR_SIDEKICK_IDLE */
+#define ROGUE_CR_SIDEKICK_IDLE 0x03C8U
+#define ROGUE_CR_SIDEKICK_IDLE_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_SHIFT 6U
+#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_EN 0x00000040U
+#define ROGUE_CR_SIDEKICK_IDLE_MMU_SHIFT 5U
+#define ROGUE_CR_SIDEKICK_IDLE_MMU_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SIDEKICK_IDLE_MMU_EN 0x00000020U
+#define ROGUE_CR_SIDEKICK_IDLE_BIF128_SHIFT 4U
+#define ROGUE_CR_SIDEKICK_IDLE_BIF128_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SIDEKICK_IDLE_BIF128_EN 0x00000010U
+#define ROGUE_CR_SIDEKICK_IDLE_TLA_SHIFT 3U
+#define ROGUE_CR_SIDEKICK_IDLE_TLA_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SIDEKICK_IDLE_TLA_EN 0x00000008U
+#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_SHIFT 2U
+#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN 0x00000004U
+#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_SHIFT 1U
+#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_EN 0x00000002U
+#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_SHIFT 0U
+#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_EN 0x00000001U
+
+/* Register ROGUE_CR_MARS_IDLE */
+#define ROGUE_CR_MARS_IDLE 0x08F8U
+#define ROGUE_CR_MARS_IDLE_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_SHIFT 2U
+#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_EN 0x00000004U
+#define ROGUE_CR_MARS_IDLE_CPU_SHIFT 1U
+#define ROGUE_CR_MARS_IDLE_CPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_MARS_IDLE_CPU_EN 0x00000002U
+#define ROGUE_CR_MARS_IDLE_SOCIF_SHIFT 0U
+#define ROGUE_CR_MARS_IDLE_SOCIF_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MARS_IDLE_SOCIF_EN 0x00000001U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_STATUS */
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS 0x0430U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL 0x00000000000000F3ULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT 4U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK 0xFFFFFF0FU
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT 1U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN 0x00000002U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN 0x00000001U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK0 */
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0 0x0438U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK1 */
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1 0x0440U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK2 */
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2 0x0448U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK0 */
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0 0x0450U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK1 */
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1 0x0458U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK2 */
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2 0x0460U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_CDM_CONTEXT_STORE_STATUS */
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS 0x04A0U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT 1U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN 0x00000002U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT 0U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN 0x00000001U
+
+/* Register ROGUE_CR_CDM_CONTEXT_PDS0 */
+#define ROGUE_CR_CDM_CONTEXT_PDS0 0x04A8U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_MASKFULL 0xFFFFFFF0FFFFFFF0ULL
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT 36U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE 16U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_CDM_CONTEXT_PDS1 */
+#define ROGUE_CR_CDM_CONTEXT_PDS1 0x04B0U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_PDS1_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT 27U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_EN 0x08000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT 20U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN 0x00100000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT 11U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT 1U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT 0U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_EN 0x00000001U
+
+/* Register ROGUE_CR_CDM_TERMINATE_PDS */
+#define ROGUE_CR_CDM_TERMINATE_PDS 0x04B8U
+#define ROGUE_CR_CDM_TERMINATE_PDS_MASKFULL 0xFFFFFFF0FFFFFFF0ULL
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT 36U
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE 16U
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT 4U
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_CDM_TERMINATE_PDS1 */
+#define ROGUE_CR_CDM_TERMINATE_PDS1 0x04C0U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL
+#define ROGUE_CR_CDM_TERMINATE_PDS1_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT 28U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN 0x10000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT 28U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT 27U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_EN 0x08000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT 21U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT 20U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN 0x00100000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT 11U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT 1U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT 0U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_EN 0x00000001U
+
+/* Register ROGUE_CR_CDM_CONTEXT_LOAD_PDS0 */
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0 0x04D8U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL 0xFFFFFFF0FFFFFFF0ULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT 36U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE 16U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_CDM_CONTEXT_LOAD_PDS1 */
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1 0x04E0U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT 27U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN 0x08000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT 20U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN 0x00100000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT 11U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT 1U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT 0U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_CONFIG */
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG 0x0810U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_MASKFULL 0x000001030F01FFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT 40U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK 0xFFFFFEFFFFFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN 0x0000010000000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT 33U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN 0x0000000200000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT 32U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN 0x0000000100000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT 25U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK 0xFFFFFFFFF1FFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT 24U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN 0x0000000001000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT 16U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 0x0000000000000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS 0x0000000000010000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1 0x0818U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2 0x0820U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1 0x0828U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2 0x0830U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1 0x0838U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2 0x0840U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1 0x0848U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2 0x0850U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1 0x0858U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2 0x0860U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS */
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS 0x0868U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL 0x00000001FFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT 32U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN 0x0000000100000000ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR */
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR 0x0870U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG */
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG 0x0878U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL 0xFFFFFFF7FFFFFFBFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT 36U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT 32U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK 0xFFFFFFF8FFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT 11U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN 0x0000000000000800ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT 7U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK 0xFFFFFFFFFFFFF87FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB 0x0000000000000000ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB 0x0000000000000080ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB 0x0000000000000100ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB 0x0000000000000180ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB 0x0000000000000200ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB 0x0000000000000280ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB 0x0000000000000300ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB 0x0000000000000380ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB 0x0000000000000400ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT 1U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK 0xFFFFFFFFFFFFFFC1ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ */
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ 0x0880U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL 0x000000000000003FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT 1U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK 0xFFFFFFC1U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA */
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA 0x0888U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL 0xFFFFFFF7FFFFFF81ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT 36U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT 32U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK 0xFFFFFFF8FFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT 11U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN 0x0000000000000800ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT 7U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK 0xFFFFFFFFFFFFF87FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE */
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE 0x08A0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS */
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS 0x08A8U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR */
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR 0x08B0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE */
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE 0x08B8U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_NMI_EVENT */
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT 0x08C0U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_DEBUG_CONFIG */
+#define ROGUE_CR_MIPS_DEBUG_CONFIG 0x08C8U
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT 0U
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_EXCEPTION_STATUS */
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS 0x08D0U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_MASKFULL 0x000000000000003FULL
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT 5U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN 0x00000020U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT 4U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN 0x00000010U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT 3U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN 0x00000008U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT 2U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN 0x00000004U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT 1U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN 0x00000002U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT 0U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_STATUS */
+#define ROGUE_CR_MIPS_WRAPPER_STATUS 0x08E8U
+#define ROGUE_CR_MIPS_WRAPPER_STATUS_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_XPU_BROADCAST */
+#define ROGUE_CR_XPU_BROADCAST 0x0890U
+#define ROGUE_CR_XPU_BROADCAST_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_XPU_BROADCAST_MASK_SHIFT 0U
+#define ROGUE_CR_XPU_BROADCAST_MASK_CLRMSK 0xFFFFFE00U
+
+/* Register ROGUE_CR_META_SP_MSLVDATAX */
+#define ROGUE_CR_META_SP_MSLVDATAX 0x0A00U
+#define ROGUE_CR_META_SP_MSLVDATAX_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_META_SP_MSLVDATAT */
+#define ROGUE_CR_META_SP_MSLVDATAT 0x0A08U
+#define ROGUE_CR_META_SP_MSLVDATAT_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_META_SP_MSLVCTRL0 */
+#define ROGUE_CR_META_SP_MSLVCTRL0 0x0A10U
+#define ROGUE_CR_META_SP_MSLVCTRL0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_META_SP_MSLVCTRL0_ADDR_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK 0x00000003U
+#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT 1U
+#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_EN 0x00000002U
+#define ROGUE_CR_META_SP_MSLVCTRL0_RD_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVCTRL0_RD_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_SP_MSLVCTRL0_RD_EN 0x00000001U
+
+/* Register ROGUE_CR_META_SP_MSLVCTRL1 */
+#define ROGUE_CR_META_SP_MSLVCTRL1 0x0A18U
+#define ROGUE_CR_META_SP_MSLVCTRL1_MASKFULL 0x00000000F7F4003FULL
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT 30U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT 29U
+#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN 0x20000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT 28U
+#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN 0x10000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT 26U
+#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN 0x04000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT 25U
+#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN 0x02000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_READY_SHIFT 24U
+#define ROGUE_CR_META_SP_MSLVCTRL1_READY_CLRMSK 0xFEFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_READY_EN 0x01000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT 21U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK 0xFF1FFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT 20U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_EN 0x00100000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT 18U
+#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN 0x00040000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_THREAD_SHIFT 4U
+#define ROGUE_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK 0xFFFFFFCFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_META_SP_MSLVHANDSHKE */
+#define ROGUE_CR_META_SP_MSLVHANDSHKE 0x0A50U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_META_SP_MSLVT0KICK */
+#define ROGUE_CR_META_SP_MSLVT0KICK 0x0A80U
+#define ROGUE_CR_META_SP_MSLVT0KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT0KICKI */
+#define ROGUE_CR_META_SP_MSLVT0KICKI 0x0A88U
+#define ROGUE_CR_META_SP_MSLVT0KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT1KICK */
+#define ROGUE_CR_META_SP_MSLVT1KICK 0x0A90U
+#define ROGUE_CR_META_SP_MSLVT1KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT1KICKI */
+#define ROGUE_CR_META_SP_MSLVT1KICKI 0x0A98U
+#define ROGUE_CR_META_SP_MSLVT1KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT2KICK */
+#define ROGUE_CR_META_SP_MSLVT2KICK 0x0AA0U
+#define ROGUE_CR_META_SP_MSLVT2KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT2KICKI */
+#define ROGUE_CR_META_SP_MSLVT2KICKI 0x0AA8U
+#define ROGUE_CR_META_SP_MSLVT2KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT3KICK */
+#define ROGUE_CR_META_SP_MSLVT3KICK 0x0AB0U
+#define ROGUE_CR_META_SP_MSLVT3KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT3KICKI */
+#define ROGUE_CR_META_SP_MSLVT3KICKI 0x0AB8U
+#define ROGUE_CR_META_SP_MSLVT3KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVRST */
+#define ROGUE_CR_META_SP_MSLVRST 0x0AC0U
+#define ROGUE_CR_META_SP_MSLVRST_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_EN 0x00000001U
+
+/* Register ROGUE_CR_META_SP_MSLVIRQSTATUS */
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS 0x0AC8U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_MASKFULL 0x000000000000000CULL
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT 3U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN 0x00000008U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN 0x00000004U
+
+/* Register ROGUE_CR_META_SP_MSLVIRQENABLE */
+#define ROGUE_CR_META_SP_MSLVIRQENABLE 0x0AD0U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_MASKFULL 0x000000000000000CULL
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT 3U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_EN 0x00000008U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_EN 0x00000004U
+
+/* Register ROGUE_CR_META_SP_MSLVIRQLEVEL */
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL 0x0AD8U
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_EN 0x00000001U
+
+/* Register ROGUE_CR_MTS_SCHEDULE */
+#define ROGUE_CR_MTS_SCHEDULE 0x0B00U
+#define ROGUE_CR_MTS_SCHEDULE_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE1 */
+#define ROGUE_CR_MTS_SCHEDULE1 0x10B00U
+#define ROGUE_CR_MTS_SCHEDULE1_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE2 */
+#define ROGUE_CR_MTS_SCHEDULE2 0x20B00U
+#define ROGUE_CR_MTS_SCHEDULE2_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE3 */
+#define ROGUE_CR_MTS_SCHEDULE3 0x30B00U
+#define ROGUE_CR_MTS_SCHEDULE3_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE4 */
+#define ROGUE_CR_MTS_SCHEDULE4 0x40B00U
+#define ROGUE_CR_MTS_SCHEDULE4_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE5 */
+#define ROGUE_CR_MTS_SCHEDULE5 0x50B00U
+#define ROGUE_CR_MTS_SCHEDULE5_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE6 */
+#define ROGUE_CR_MTS_SCHEDULE6 0x60B00U
+#define ROGUE_CR_MTS_SCHEDULE6_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE7 */
+#define ROGUE_CR_MTS_SCHEDULE7 0x70B00U
+#define ROGUE_CR_MTS_SCHEDULE7_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC */
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC 0x0B30U
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC */
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC 0x0B38U
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC */
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC 0x0B40U
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC */
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC 0x0B48U
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG */
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG 0x0B50U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL 0x000FF0FFFFFFF701ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL 0x0000FFFFFFFFF001ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT 44U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK 0xFFFF0FFFFFFFFFFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT 44U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK 0xFFF00FFFFFFFFFFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT 40U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT 12U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_SHIFT 9U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK 0xFFFFFFFFFFFFF9FFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT 8U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN 0x0000000000000100ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT 0U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META 0x0000000000000000ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE 0x0B58U
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE 0x0B60U
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE 0x0B68U
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE 0x0B70U
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE 0x0B78U
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE 0x0B80U
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_INTCTX */
+#define ROGUE_CR_MTS_INTCTX 0x0B98U
+#define ROGUE_CR_MTS_INTCTX_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT 22U
+#define ROGUE_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK 0xC03FFFFFU
+#define ROGUE_CR_MTS_INTCTX_DM_PTR_SHIFT 18U
+#define ROGUE_CR_MTS_INTCTX_DM_PTR_CLRMSK 0xFFC3FFFFU
+#define ROGUE_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT 16U
+#define ROGUE_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK 0xFFFCFFFFU
+#define ROGUE_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT 8U
+#define ROGUE_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT 0U
+#define ROGUE_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MTS_BGCTX */
+#define ROGUE_CR_MTS_BGCTX 0x0BA0U
+#define ROGUE_CR_MTS_BGCTX_MASKFULL 0x0000000000003FFFULL
+#define ROGUE_CR_MTS_BGCTX_DM_PTR_SHIFT 10U
+#define ROGUE_CR_MTS_BGCTX_DM_PTR_CLRMSK 0xFFFFC3FFU
+#define ROGUE_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT 8U
+#define ROGUE_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK 0xFFFFFCFFU
+#define ROGUE_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE */
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE 0x0BA8U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT 56U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK 0x00FFFFFFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT 48U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK 0xFF00FFFFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT 40U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK 0xFFFF00FFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT 32U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK 0xFFFFFF00FFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT 24U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK 0xFFFFFFFF00FFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT 16U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK 0xFFFFFFFFFF00FFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT 8U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_MTS_GPU_INT_STATUS */
+#define ROGUE_CR_MTS_GPU_INT_STATUS 0x0BB0U
+#define ROGUE_CR_MTS_GPU_INT_STATUS_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT 0U
+#define ROGUE_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_SCHEDULE_ENABLE */
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE 0x0BC8U
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_IRQ_OS0_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS 0x0BD8U
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS0_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR 0x0BE8U
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS1_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS 0x10BD8U
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS1_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR 0x10BE8U
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS2_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS 0x20BD8U
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS2_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR 0x20BE8U
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS3_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS 0x30BD8U
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS3_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR 0x30BE8U
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS4_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS 0x40BD8U
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS4_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR 0x40BE8U
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS5_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS 0x50BD8U
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS5_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR 0x50BE8U
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS6_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS 0x60BD8U
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS6_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR 0x60BE8U
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS7_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS 0x70BD8U
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS7_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR 0x70BE8U
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_META_BOOT */
+#define ROGUE_CR_META_BOOT 0x0BF8U
+#define ROGUE_CR_META_BOOT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_META_BOOT_MODE_SHIFT 0U
+#define ROGUE_CR_META_BOOT_MODE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_BOOT_MODE_EN 0x00000001U
+
+/* Register ROGUE_CR_GARTEN_SLC */
+#define ROGUE_CR_GARTEN_SLC 0x0BB8U
+#define ROGUE_CR_GARTEN_SLC_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT 0U
+#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_EN 0x00000001U
+
+/* Register ROGUE_CR_PPP */
+#define ROGUE_CR_PPP 0x0CD0U
+#define ROGUE_CR_PPP_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PPP_CHECKSUM_SHIFT 0U
+#define ROGUE_CR_PPP_CHECKSUM_CLRMSK 0x00000000U
+
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_MASK 0x00000003U
+/* Top-left to bottom-right */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_TL2BR 0x00000000U
+/* Top-right to bottom-left */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_TR2BL 0x00000001U
+/* Bottom-left to top-right */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_BL2TR 0x00000002U
+/* Bottom-right to top-left */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_BR2TL 0x00000003U
+
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_MASK 0x00000003U
+/* Normal render */
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_NORM 0x00000000U
+/* Fast 2D render */
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_FAST_2D 0x00000002U
+/* Fast scale render */
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE 0x00000003U
+
+/* Register ROGUE_CR_ISP_RENDER */
+#define ROGUE_CR_ISP_RENDER 0x0F08U
+#define ROGUE_CR_ISP_RENDER_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_SHIFT 8U
+#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_EN 0x00000100U
+#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT 7U
+#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN 0x00000080U
+#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT 6U
+#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN 0x00000040U
+#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_SHIFT 5U
+#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_EN 0x00000020U
+#define ROGUE_CR_ISP_RENDER_RESUME_SHIFT 4U
+#define ROGUE_CR_ISP_RENDER_RESUME_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ISP_RENDER_RESUME_EN 0x00000010U
+#define ROGUE_CR_ISP_RENDER_DIR_SHIFT 2U
+#define ROGUE_CR_ISP_RENDER_DIR_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_ISP_RENDER_DIR_TL2BR 0x00000000U
+#define ROGUE_CR_ISP_RENDER_DIR_TR2BL 0x00000004U
+#define ROGUE_CR_ISP_RENDER_DIR_BL2TR 0x00000008U
+#define ROGUE_CR_ISP_RENDER_DIR_BR2TL 0x0000000CU
+#define ROGUE_CR_ISP_RENDER_MODE_SHIFT 0U
+#define ROGUE_CR_ISP_RENDER_MODE_CLRMSK 0xFFFFFFFCU
+#define ROGUE_CR_ISP_RENDER_MODE_NORM 0x00000000U
+#define ROGUE_CR_ISP_RENDER_MODE_FAST_2D 0x00000002U
+#define ROGUE_CR_ISP_RENDER_MODE_FAST_SCALE 0x00000003U
+
+/* Register ROGUE_CR_ISP_CTL */
+#define ROGUE_CR_ISP_CTL 0x0F38U
+#define ROGUE_CR_ISP_CTL_MASKFULL 0x00000000FFFFF3FFULL
+#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_SHIFT 31U
+#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_EN 0x80000000U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_SHIFT 30U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_EN 0x40000000U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT 29U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_EN 0x20000000U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT 28U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_EN 0x10000000U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_SHIFT 27U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_EN 0x08000000U
+#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_SHIFT 26U
+#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_EN 0x04000000U
+#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_SHIFT 25U
+#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_EN 0x02000000U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT 23U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK 0xFE7FFFFFU
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 0x00000000U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 0x00800000U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL 0x01000000U
+#define ROGUE_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT 21U
+#define ROGUE_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK 0xFF9FFFFFU
+#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_SHIFT 20U
+#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_EN 0x00100000U
+#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT 19U
+#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN 0x00080000U
+#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT 18U
+#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN 0x00040000U
+#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT 17U
+#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN 0x00020000U
+#define ROGUE_CR_ISP_CTL_SAMPLE_POS_SHIFT 16U
+#define ROGUE_CR_ISP_CTL_SAMPLE_POS_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_ISP_CTL_SAMPLE_POS_EN 0x00010000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_SHIFT 12U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE 0x00000000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO 0x00001000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE 0x00002000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR 0x00003000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE 0x00004000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX 0x00005000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN 0x00006000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT 0x00007000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE 0x00008000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN 0x00009000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN 0x0000A000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE 0x0000B000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN 0x0000C000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN 0x0000D000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN 0x0000E000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN 0x0000F000U
+#define ROGUE_CR_ISP_CTL_VALID_ID_SHIFT 4U
+#define ROGUE_CR_ISP_CTL_VALID_ID_CLRMSK 0xFFFFFC0FU
+#define ROGUE_CR_ISP_CTL_UPASS_START_SHIFT 0U
+#define ROGUE_CR_ISP_CTL_UPASS_START_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_ISP_STATUS */
+#define ROGUE_CR_ISP_STATUS 0x1038U
+#define ROGUE_CR_ISP_STATUS_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_SHIFT 2U
+#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_EN 0x00000004U
+#define ROGUE_CR_ISP_STATUS_ACTIVE_SHIFT 1U
+#define ROGUE_CR_ISP_STATUS_ACTIVE_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ISP_STATUS_ACTIVE_EN 0x00000002U
+#define ROGUE_CR_ISP_STATUS_EOR_SHIFT 0U
+#define ROGUE_CR_ISP_STATUS_EOR_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ISP_STATUS_EOR_EN 0x00000001U
+
+/* Register group: ROGUE_CR_ISP_XTP_RESUME, with 64 repeats */
+#define ROGUE_CR_ISP_XTP_RESUME_REPEATCOUNT 64U
+/* Register ROGUE_CR_ISP_XTP_RESUME0 */
+#define ROGUE_CR_ISP_XTP_RESUME0 0x3A00U
+#define ROGUE_CR_ISP_XTP_RESUME0_MASKFULL 0x00000000003FF3FFULL
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_X_SHIFT 12U
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK 0xFFC00FFFU
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT 0U
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK 0xFFFFFC00U
+
+/* Register group: ROGUE_CR_ISP_XTP_STORE, with 32 repeats */
+#define ROGUE_CR_ISP_XTP_STORE_REPEATCOUNT 32U
+/* Register ROGUE_CR_ISP_XTP_STORE0 */
+#define ROGUE_CR_ISP_XTP_STORE0 0x3C00U
+#define ROGUE_CR_ISP_XTP_STORE0_MASKFULL 0x000000007F3FF3FFULL
+#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_SHIFT 30U
+#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_EN 0x40000000U
+#define ROGUE_CR_ISP_XTP_STORE0_EOR_SHIFT 29U
+#define ROGUE_CR_ISP_XTP_STORE0_EOR_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_EOR_EN 0x20000000U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT 28U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_EN 0x10000000U
+#define ROGUE_CR_ISP_XTP_STORE0_MT_SHIFT 24U
+#define ROGUE_CR_ISP_XTP_STORE0_MT_CLRMSK 0xF0FFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_X_SHIFT 12U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_X_CLRMSK 0xFFC00FFFU
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_Y_SHIFT 0U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK 0xFFFFFC00U
+
+/* Register group: ROGUE_CR_BIF_CAT_BASE, with 8 repeats */
+#define ROGUE_CR_BIF_CAT_BASE_REPEATCOUNT 8U
+/* Register ROGUE_CR_BIF_CAT_BASE0 */
+#define ROGUE_CR_BIF_CAT_BASE0 0x1200U
+#define ROGUE_CR_BIF_CAT_BASE0_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE1 */
+#define ROGUE_CR_BIF_CAT_BASE1 0x1208U
+#define ROGUE_CR_BIF_CAT_BASE1_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE2 */
+#define ROGUE_CR_BIF_CAT_BASE2 0x1210U
+#define ROGUE_CR_BIF_CAT_BASE2_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE3 */
+#define ROGUE_CR_BIF_CAT_BASE3 0x1218U
+#define ROGUE_CR_BIF_CAT_BASE3_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE4 */
+#define ROGUE_CR_BIF_CAT_BASE4 0x1220U
+#define ROGUE_CR_BIF_CAT_BASE4_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE5 */
+#define ROGUE_CR_BIF_CAT_BASE5 0x1228U
+#define ROGUE_CR_BIF_CAT_BASE5_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE6 */
+#define ROGUE_CR_BIF_CAT_BASE6 0x1230U
+#define ROGUE_CR_BIF_CAT_BASE6_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE7 */
+#define ROGUE_CR_BIF_CAT_BASE7 0x1238U
+#define ROGUE_CR_BIF_CAT_BASE7_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE_INDEX */
+#define ROGUE_CR_BIF_CAT_BASE_INDEX 0x1240U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_MASKFULL 0x00070707073F0707ULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT 48U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK 0xFFF8FFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT 40U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT 32U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK 0xFFFFFFF8FFFFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT 24U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK 0xFFFFFFFFF8FFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT 19U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK 0xFFFFFFFFFFC7FFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT 16U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK 0xFFFFFFFFFFF8FFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT 8U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK 0xFFFFFFFFFFFFF8FFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TA_SHIFT 0U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK 0xFFFFFFFFFFFFFFF8ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_VCE0 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0 0x1248U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_TE0 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0 0x1250U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_ALIST0 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0 0x1260U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_VCE1 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1 0x1268U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_TE1 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1 0x1270U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_ALIST1 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1 0x1280U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_MMU_ENTRY_STATUS */
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS 0x1288U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_MASKFULL 0x000000FFFFFFF0F3ULL
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT 12U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT 4U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK 0xFFFFFFFFFFFFFF0FULL
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT 0U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+
+/* Register ROGUE_CR_BIF_MMU_ENTRY */
+#define ROGUE_CR_BIF_MMU_ENTRY 0x1290U
+#define ROGUE_CR_BIF_MMU_ENTRY_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_SHIFT 1U
+#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_EN 0x00000002U
+#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_SHIFT 0U
+#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_CTRL_INVAL */
+#define ROGUE_CR_BIF_CTRL_INVAL 0x12A0U
+#define ROGUE_CR_BIF_CTRL_INVAL_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_SHIFT 3U
+#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_EN 0x00000008U
+#define ROGUE_CR_BIF_CTRL_INVAL_PC_SHIFT 2U
+#define ROGUE_CR_BIF_CTRL_INVAL_PC_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_CTRL_INVAL_PC_EN 0x00000004U
+#define ROGUE_CR_BIF_CTRL_INVAL_PD_SHIFT 1U
+#define ROGUE_CR_BIF_CTRL_INVAL_PD_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_CTRL_INVAL_PD_EN 0x00000002U
+#define ROGUE_CR_BIF_CTRL_INVAL_PT_SHIFT 0U
+#define ROGUE_CR_BIF_CTRL_INVAL_PT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_CTRL_INVAL_PT_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_CTRL */
+#define ROGUE_CR_BIF_CTRL 0x12A8U
+#define ROGUE_CR_BIF_CTRL__XE_MEM__MASKFULL 0x000000000000033FULL
+#define ROGUE_CR_BIF_CTRL_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_SHIFT 9U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_EN 0x00000200U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_SHIFT 8U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_EN 0x00000100U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT 7U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN 0x00000080U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT 6U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN 0x00000040U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT 5U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN 0x00000020U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT 4U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN 0x00000010U
+#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_SHIFT 3U
+#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_EN 0x00000008U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT 2U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_EN 0x00000004U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT 1U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN 0x00000002U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT 0U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS 0x12B0U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS 0x12B8U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL 0x001FFFFFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT 52U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK 0xFFEFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN 0x0010000000000000ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT 50U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN 0x0004000000000000ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT 46U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK 0xFFF03FFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT 44U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT 40U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK 0xFFFFC0FFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS 0x12C0U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS 0x12C8U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT 50U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN 0x0004000000000000ULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT 44U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_BIF_MMU_STATUS */
+#define ROGUE_CR_BIF_MMU_STATUS 0x12D0U
+#define ROGUE_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL 0x000000001FFFFFF7ULL
+#define ROGUE_CR_BIF_MMU_STATUS_MASKFULL 0x000000001FFFFFF7ULL
+#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT 28U
+#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_EN 0x10000000U
+#define ROGUE_CR_BIF_MMU_STATUS_PC_DATA_SHIFT 20U
+#define ROGUE_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU
+#define ROGUE_CR_BIF_MMU_STATUS_PD_DATA_SHIFT 12U
+#define ROGUE_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU
+#define ROGUE_CR_BIF_MMU_STATUS_PT_DATA_SHIFT 4U
+#define ROGUE_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU
+#define ROGUE_CR_BIF_MMU_STATUS_STALLED_SHIFT 2U
+#define ROGUE_CR_BIF_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_MMU_STATUS_STALLED_EN 0x00000004U
+#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_SHIFT 1U
+#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_EN 0x00000002U
+#define ROGUE_CR_BIF_MMU_STATUS_BUSY_SHIFT 0U
+#define ROGUE_CR_BIF_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_MMU_STATUS_BUSY_EN 0x00000001U
+
+/* Register group: ROGUE_CR_BIF_TILING_CFG, with 8 repeats */
+#define ROGUE_CR_BIF_TILING_CFG_REPEATCOUNT 8U
+/* Register ROGUE_CR_BIF_TILING_CFG0 */
+#define ROGUE_CR_BIF_TILING_CFG0 0x12D8U
+#define ROGUE_CR_BIF_TILING_CFG0_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG0_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG1 */
+#define ROGUE_CR_BIF_TILING_CFG1 0x12E0U
+#define ROGUE_CR_BIF_TILING_CFG1_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG1_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG2 */
+#define ROGUE_CR_BIF_TILING_CFG2 0x12E8U
+#define ROGUE_CR_BIF_TILING_CFG2_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG2_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG3 */
+#define ROGUE_CR_BIF_TILING_CFG3 0x12F0U
+#define ROGUE_CR_BIF_TILING_CFG3_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG3_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG4 */
+#define ROGUE_CR_BIF_TILING_CFG4 0x12F8U
+#define ROGUE_CR_BIF_TILING_CFG4_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG4_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG5 */
+#define ROGUE_CR_BIF_TILING_CFG5 0x1300U
+#define ROGUE_CR_BIF_TILING_CFG5_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG5_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG6 */
+#define ROGUE_CR_BIF_TILING_CFG6 0x1308U
+#define ROGUE_CR_BIF_TILING_CFG6_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG6_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG7 */
+#define ROGUE_CR_BIF_TILING_CFG7 0x1310U
+#define ROGUE_CR_BIF_TILING_CFG7_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG7_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_READS_EXT_STATUS */
+#define ROGUE_CR_BIF_READS_EXT_STATUS 0x1320U
+#define ROGUE_CR_BIF_READS_EXT_STATUS_MASKFULL 0x000000000FFFFFFFULL
+#define ROGUE_CR_BIF_READS_EXT_STATUS_MMU_SHIFT 16U
+#define ROGUE_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK 0xF000FFFFU
+#define ROGUE_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT 0U
+#define ROGUE_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIF_READS_INT_STATUS */
+#define ROGUE_CR_BIF_READS_INT_STATUS 0x1328U
+#define ROGUE_CR_BIF_READS_INT_STATUS_MASKFULL 0x0000000007FFFFFFULL
+#define ROGUE_CR_BIF_READS_INT_STATUS_MMU_SHIFT 16U
+#define ROGUE_CR_BIF_READS_INT_STATUS_MMU_CLRMSK 0xF800FFFFU
+#define ROGUE_CR_BIF_READS_INT_STATUS_BANK1_SHIFT 0U
+#define ROGUE_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIFPM_READS_INT_STATUS */
+#define ROGUE_CR_BIFPM_READS_INT_STATUS 0x1330U
+#define ROGUE_CR_BIFPM_READS_INT_STATUS_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT 0U
+#define ROGUE_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIFPM_READS_EXT_STATUS */
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS 0x1338U
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT 0U
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIFPM_STATUS_MMU */
+#define ROGUE_CR_BIFPM_STATUS_MMU 0x1350U
+#define ROGUE_CR_BIFPM_STATUS_MMU_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT 0U
+#define ROGUE_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_BIF_STATUS_MMU */
+#define ROGUE_CR_BIF_STATUS_MMU 0x1358U
+#define ROGUE_CR_BIF_STATUS_MMU_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_BIF_STATUS_MMU_REQUESTS_SHIFT 0U
+#define ROGUE_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_BIF_FAULT_READ */
+#define ROGUE_CR_BIF_FAULT_READ 0x13E0U
+#define ROGUE_CR_BIF_FAULT_READ_MASKFULL 0x000000FFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS */
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS 0x1430U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS */
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS 0x1438U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT 50U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN 0x0004000000000000ULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT 44U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_MCU_FENCE */
+#define ROGUE_CR_MCU_FENCE 0x1740U
+#define ROGUE_CR_MCU_FENCE_MASKFULL 0x000007FFFFFFFFE0ULL
+#define ROGUE_CR_MCU_FENCE_DM_SHIFT 40U
+#define ROGUE_CR_MCU_FENCE_DM_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_MCU_FENCE_DM_VERTEX 0x0000000000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_PIXEL 0x0000010000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_COMPUTE 0x0000020000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_RAY_VERTEX 0x0000030000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_RAY 0x0000040000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_FASTRENDER 0x0000050000000000ULL
+#define ROGUE_CR_MCU_FENCE_ADDR_SHIFT 5U
+#define ROGUE_CR_MCU_FENCE_ADDR_CLRMSK 0xFFFFFF000000001FULL
+#define ROGUE_CR_MCU_FENCE_ADDR_ALIGNSHIFT 5U
+#define ROGUE_CR_MCU_FENCE_ADDR_ALIGNSIZE 32U
+
+/* Register group: ROGUE_CR_SCRATCH, with 16 repeats */
+#define ROGUE_CR_SCRATCH_REPEATCOUNT 16U
+/* Register ROGUE_CR_SCRATCH0 */
+#define ROGUE_CR_SCRATCH0 0x1A00U
+#define ROGUE_CR_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH1 */
+#define ROGUE_CR_SCRATCH1 0x1A08U
+#define ROGUE_CR_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH2 */
+#define ROGUE_CR_SCRATCH2 0x1A10U
+#define ROGUE_CR_SCRATCH2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH2_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH3 */
+#define ROGUE_CR_SCRATCH3 0x1A18U
+#define ROGUE_CR_SCRATCH3_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH3_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH4 */
+#define ROGUE_CR_SCRATCH4 0x1A20U
+#define ROGUE_CR_SCRATCH4_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH4_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH4_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH5 */
+#define ROGUE_CR_SCRATCH5 0x1A28U
+#define ROGUE_CR_SCRATCH5_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH5_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH5_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH6 */
+#define ROGUE_CR_SCRATCH6 0x1A30U
+#define ROGUE_CR_SCRATCH6_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH6_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH6_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH7 */
+#define ROGUE_CR_SCRATCH7 0x1A38U
+#define ROGUE_CR_SCRATCH7_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH7_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH7_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH8 */
+#define ROGUE_CR_SCRATCH8 0x1A40U
+#define ROGUE_CR_SCRATCH8_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH8_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH8_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH9 */
+#define ROGUE_CR_SCRATCH9 0x1A48U
+#define ROGUE_CR_SCRATCH9_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH9_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH9_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH10 */
+#define ROGUE_CR_SCRATCH10 0x1A50U
+#define ROGUE_CR_SCRATCH10_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH10_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH10_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH11 */
+#define ROGUE_CR_SCRATCH11 0x1A58U
+#define ROGUE_CR_SCRATCH11_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH11_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH11_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH12 */
+#define ROGUE_CR_SCRATCH12 0x1A60U
+#define ROGUE_CR_SCRATCH12_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH12_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH12_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH13 */
+#define ROGUE_CR_SCRATCH13 0x1A68U
+#define ROGUE_CR_SCRATCH13_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH13_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH13_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH14 */
+#define ROGUE_CR_SCRATCH14 0x1A70U
+#define ROGUE_CR_SCRATCH14_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH14_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH14_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH15 */
+#define ROGUE_CR_SCRATCH15 0x1A78U
+#define ROGUE_CR_SCRATCH15_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH15_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH15_DATA_CLRMSK 0x00000000U
+
+/* Register group: ROGUE_CR_OS0_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS0_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS0_SCRATCH0 */
+#define ROGUE_CR_OS0_SCRATCH0 0x1A80U
+#define ROGUE_CR_OS0_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS0_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS0_SCRATCH1 */
+#define ROGUE_CR_OS0_SCRATCH1 0x1A88U
+#define ROGUE_CR_OS0_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS0_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS0_SCRATCH2 */
+#define ROGUE_CR_OS0_SCRATCH2 0x1A90U
+#define ROGUE_CR_OS0_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS0_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS0_SCRATCH3 */
+#define ROGUE_CR_OS0_SCRATCH3 0x1A98U
+#define ROGUE_CR_OS0_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS0_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS1_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS1_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS1_SCRATCH0 */
+#define ROGUE_CR_OS1_SCRATCH0 0x11A80U
+#define ROGUE_CR_OS1_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS1_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS1_SCRATCH1 */
+#define ROGUE_CR_OS1_SCRATCH1 0x11A88U
+#define ROGUE_CR_OS1_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS1_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS1_SCRATCH2 */
+#define ROGUE_CR_OS1_SCRATCH2 0x11A90U
+#define ROGUE_CR_OS1_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS1_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS1_SCRATCH3 */
+#define ROGUE_CR_OS1_SCRATCH3 0x11A98U
+#define ROGUE_CR_OS1_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS1_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS2_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS2_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS2_SCRATCH0 */
+#define ROGUE_CR_OS2_SCRATCH0 0x21A80U
+#define ROGUE_CR_OS2_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS2_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS2_SCRATCH1 */
+#define ROGUE_CR_OS2_SCRATCH1 0x21A88U
+#define ROGUE_CR_OS2_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS2_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS2_SCRATCH2 */
+#define ROGUE_CR_OS2_SCRATCH2 0x21A90U
+#define ROGUE_CR_OS2_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS2_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS2_SCRATCH3 */
+#define ROGUE_CR_OS2_SCRATCH3 0x21A98U
+#define ROGUE_CR_OS2_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS2_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS3_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS3_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS3_SCRATCH0 */
+#define ROGUE_CR_OS3_SCRATCH0 0x31A80U
+#define ROGUE_CR_OS3_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS3_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS3_SCRATCH1 */
+#define ROGUE_CR_OS3_SCRATCH1 0x31A88U
+#define ROGUE_CR_OS3_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS3_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS3_SCRATCH2 */
+#define ROGUE_CR_OS3_SCRATCH2 0x31A90U
+#define ROGUE_CR_OS3_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS3_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS3_SCRATCH3 */
+#define ROGUE_CR_OS3_SCRATCH3 0x31A98U
+#define ROGUE_CR_OS3_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS3_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS4_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS4_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS4_SCRATCH0 */
+#define ROGUE_CR_OS4_SCRATCH0 0x41A80U
+#define ROGUE_CR_OS4_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS4_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS4_SCRATCH1 */
+#define ROGUE_CR_OS4_SCRATCH1 0x41A88U
+#define ROGUE_CR_OS4_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS4_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS4_SCRATCH2 */
+#define ROGUE_CR_OS4_SCRATCH2 0x41A90U
+#define ROGUE_CR_OS4_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS4_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS4_SCRATCH3 */
+#define ROGUE_CR_OS4_SCRATCH3 0x41A98U
+#define ROGUE_CR_OS4_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS4_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS5_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS5_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS5_SCRATCH0 */
+#define ROGUE_CR_OS5_SCRATCH0 0x51A80U
+#define ROGUE_CR_OS5_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS5_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS5_SCRATCH1 */
+#define ROGUE_CR_OS5_SCRATCH1 0x51A88U
+#define ROGUE_CR_OS5_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS5_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS5_SCRATCH2 */
+#define ROGUE_CR_OS5_SCRATCH2 0x51A90U
+#define ROGUE_CR_OS5_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS5_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS5_SCRATCH3 */
+#define ROGUE_CR_OS5_SCRATCH3 0x51A98U
+#define ROGUE_CR_OS5_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS5_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS6_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS6_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS6_SCRATCH0 */
+#define ROGUE_CR_OS6_SCRATCH0 0x61A80U
+#define ROGUE_CR_OS6_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS6_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS6_SCRATCH1 */
+#define ROGUE_CR_OS6_SCRATCH1 0x61A88U
+#define ROGUE_CR_OS6_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS6_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS6_SCRATCH2 */
+#define ROGUE_CR_OS6_SCRATCH2 0x61A90U
+#define ROGUE_CR_OS6_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS6_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS6_SCRATCH3 */
+#define ROGUE_CR_OS6_SCRATCH3 0x61A98U
+#define ROGUE_CR_OS6_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS6_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS7_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS7_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS7_SCRATCH0 */
+#define ROGUE_CR_OS7_SCRATCH0 0x71A80U
+#define ROGUE_CR_OS7_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS7_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS7_SCRATCH1 */
+#define ROGUE_CR_OS7_SCRATCH1 0x71A88U
+#define ROGUE_CR_OS7_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS7_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS7_SCRATCH2 */
+#define ROGUE_CR_OS7_SCRATCH2 0x71A90U
+#define ROGUE_CR_OS7_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS7_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS7_SCRATCH3 */
+#define ROGUE_CR_OS7_SCRATCH3 0x71A98U
+#define ROGUE_CR_OS7_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS7_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_SPFILTER_SIGNAL_DESCR */
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR 0x2700U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT 0U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK 0xFFFF0000U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT 4U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN */
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN 0x2708U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL 0x000000FFFFFFFFF0ULL
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT 4U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE 16U
+
+/* Register group: ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT 16U
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 0x3000U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1 0x3008U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2 0x3010U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3 0x3018U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4 0x3020U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5 0x3028U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6 0x3030U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7 0x3038U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8 0x3040U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9 0x3048U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10 0x3050U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11 0x3058U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12 0x3060U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13 0x3068U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14 0x3070U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15 0x3078U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_BOOT */
+#define ROGUE_CR_FWCORE_BOOT 0x3090U
+#define ROGUE_CR_FWCORE_BOOT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_BOOT_ENABLE_SHIFT 0U
+#define ROGUE_CR_FWCORE_BOOT_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_BOOT_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_RESET_ADDR */
+#define ROGUE_CR_FWCORE_RESET_ADDR 0x3098U
+#define ROGUE_CR_FWCORE_RESET_ADDR_MASKFULL 0x00000000FFFFFFFEULL
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_SHIFT 1U
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK 0x00000001U
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSHIFT 1U
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSIZE 2U
+
+/* Register ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR */
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR 0x30A0U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_MASKFULL 0x00000000FFFFFFFEULL
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_SHIFT 1U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_CLRMSK 0x00000001U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSHIFT 1U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSIZE 2U
+
+/* Register ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT */
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT 0x30A8U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT 0U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS */
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS 0x30B0U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_MASKFULL 0x000000000000F771ULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS */
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS 0x30B8U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_MASKFULL 0x001FFFFFFFFFFFF0ULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_SHIFT 52U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_CLRMSK 0xFFEFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN 0x0010000000000000ULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT 46U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK 0xFFF03FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFC0FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_FWCORE_MEM_CTRL_INVAL */
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL 0x30C0U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_SHIFT 3U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_EN 0x00000008U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_SHIFT 2U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_EN 0x00000004U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_SHIFT 1U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_EN 0x00000002U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_MMU_STATUS */
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS 0x30C8U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_MASKFULL 0x000000000FFFFFF7ULL
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_SHIFT 20U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_SHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_SHIFT 2U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_EN 0x00000004U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_SHIFT 1U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_EN 0x00000002U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS */
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS 0x30D8U
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MASKFULL 0x0000000000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_CLRMSK 0xFFFFF000U
+
+/* Register ROGUE_CR_FWCORE_MEM_READS_INT_STATUS */
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS 0x30E0U
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MASKFULL 0x00000000000007FFULL
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MMU_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MMU_CLRMSK 0xFFFFF800U
+
+/* Register ROGUE_CR_FWCORE_WRAPPER_FENCE */
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE 0x30E8U
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_SHIFT 0U
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_EN 0x00000001U
+
+/* Register group: ROGUE_CR_FWCORE_MEM_CAT_BASE, with 8 repeats */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE_REPEATCOUNT 8U
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE0 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0 0x30F0U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE1 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1 0x30F8U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE2 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2 0x3100U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE3 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3 0x3108U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE4 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4 0x3110U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE5 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5 0x3118U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE6 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6 0x3120U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE7 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7 0x3128U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_WDT_RESET */
+#define ROGUE_CR_FWCORE_WDT_RESET 0x3130U
+#define ROGUE_CR_FWCORE_WDT_RESET_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_WDT_RESET_EN_SHIFT 0U
+#define ROGUE_CR_FWCORE_WDT_RESET_EN_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WDT_RESET_EN_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_WDT_CTRL */
+#define ROGUE_CR_FWCORE_WDT_CTRL 0x3138U
+#define ROGUE_CR_FWCORE_WDT_CTRL_MASKFULL 0x00000000FFFF1F01ULL
+#define ROGUE_CR_FWCORE_WDT_CTRL_PROT_SHIFT 16U
+#define ROGUE_CR_FWCORE_WDT_CTRL_PROT_CLRMSK 0x0000FFFFU
+#define ROGUE_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT 8U
+#define ROGUE_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK 0xFFFFE0FFU
+#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_WDT_COUNT */
+#define ROGUE_CR_FWCORE_WDT_COUNT 0x3140U
+#define ROGUE_CR_FWCORE_WDT_COUNT_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FWCORE_WDT_COUNT_VALUE_SHIFT 0U
+#define ROGUE_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK 0x00000000U
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED0, with 4 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT 4U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED00 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED00 0x3400U
+#define ROGUE_CR_FWCORE_DMI_RESERVED00_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED01 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED01 0x3408U
+#define ROGUE_CR_FWCORE_DMI_RESERVED01_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED02 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED02 0x3410U
+#define ROGUE_CR_FWCORE_DMI_RESERVED02_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED03 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED03 0x3418U
+#define ROGUE_CR_FWCORE_DMI_RESERVED03_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DATA0 */
+#define ROGUE_CR_FWCORE_DMI_DATA0 0x3420U
+#define ROGUE_CR_FWCORE_DMI_DATA0_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DATA1 */
+#define ROGUE_CR_FWCORE_DMI_DATA1 0x3428U
+#define ROGUE_CR_FWCORE_DMI_DATA1_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED1, with 5 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT 5U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED10 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED10 0x3430U
+#define ROGUE_CR_FWCORE_DMI_RESERVED10_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED11 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED11 0x3438U
+#define ROGUE_CR_FWCORE_DMI_RESERVED11_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED12 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED12 0x3440U
+#define ROGUE_CR_FWCORE_DMI_RESERVED12_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED13 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED13 0x3448U
+#define ROGUE_CR_FWCORE_DMI_RESERVED13_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED14 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED14 0x3450U
+#define ROGUE_CR_FWCORE_DMI_RESERVED14_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DMCONTROL */
+#define ROGUE_CR_FWCORE_DMI_DMCONTROL 0x3480U
+#define ROGUE_CR_FWCORE_DMI_DMCONTROL_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DMSTATUS */
+#define ROGUE_CR_FWCORE_DMI_DMSTATUS 0x3488U
+#define ROGUE_CR_FWCORE_DMI_DMSTATUS_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED2, with 4 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT 4U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED20 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED20 0x3490U
+#define ROGUE_CR_FWCORE_DMI_RESERVED20_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED21 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED21 0x3498U
+#define ROGUE_CR_FWCORE_DMI_RESERVED21_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED22 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED22 0x34A0U
+#define ROGUE_CR_FWCORE_DMI_RESERVED22_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED23 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED23 0x34A8U
+#define ROGUE_CR_FWCORE_DMI_RESERVED23_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_ABSTRACTCS */
+#define ROGUE_CR_FWCORE_DMI_ABSTRACTCS 0x34B0U
+#define ROGUE_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_COMMAND */
+#define ROGUE_CR_FWCORE_DMI_COMMAND 0x34B8U
+#define ROGUE_CR_FWCORE_DMI_COMMAND_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBCS */
+#define ROGUE_CR_FWCORE_DMI_SBCS 0x35C0U
+#define ROGUE_CR_FWCORE_DMI_SBCS_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBADDRESS0 */
+#define ROGUE_CR_FWCORE_DMI_SBADDRESS0 0x35C8U
+#define ROGUE_CR_FWCORE_DMI_SBADDRESS0_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED3, with 2 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT 2U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED30 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED30 0x34D0U
+#define ROGUE_CR_FWCORE_DMI_RESERVED30_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED31 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED31 0x34D8U
+#define ROGUE_CR_FWCORE_DMI_RESERVED31_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_SBDATA, with 4 repeats */
+#define ROGUE_CR_FWCORE_DMI_SBDATA_REPEATCOUNT 4U
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA0 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA0 0x35E0U
+#define ROGUE_CR_FWCORE_DMI_SBDATA0_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA1 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA1 0x35E8U
+#define ROGUE_CR_FWCORE_DMI_SBDATA1_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA2 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA2 0x35F0U
+#define ROGUE_CR_FWCORE_DMI_SBDATA2_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA3 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA3 0x35F8U
+#define ROGUE_CR_FWCORE_DMI_SBDATA3_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_HALTSUM0 */
+#define ROGUE_CR_FWCORE_DMI_HALTSUM0 0x3600U
+#define ROGUE_CR_FWCORE_DMI_HALTSUM0_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC_CTRL_MISC */
+#define ROGUE_CR_SLC_CTRL_MISC 0x3800U
+#define ROGUE_CR_SLC_CTRL_MISC_MASKFULL 0xFFFFFFFF01FF010FULL
+#define ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT 32U
+#define ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_SHIFT 24U
+#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN 0x0000000001000000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT 16U
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK 0xFFFFFFFFFF00FFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE 0x0000000000000000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE 0x0000000000010000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 0x0000000000100000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 0x0000000000110000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 0x0000000000200000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE 0x0000000000210000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_SHIFT 8U
+#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_EN 0x0000000000000100ULL
+#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT 3U
+#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN 0x0000000000000008ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT 2U
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN 0x0000000000000004ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT 1U
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN 0x0000000000000002ULL
+#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SLC_CTRL_FLUSH_INVAL */
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL 0x3818U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL 0x0000000080000FFFULL
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT 31U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN 0x80000000U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT 11U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN 0x00000800U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT 10U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN 0x00000400U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT 9U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN 0x00000200U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT 8U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN 0x00000100U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT 7U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN 0x00000080U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT 6U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN 0x00000040U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT 5U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN 0x00000020U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT 4U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN 0x00000010U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT 3U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN 0x00000008U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT 2U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN 0x00000004U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT 1U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN 0x00000002U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC_STATUS0 */
+#define ROGUE_CR_SLC_STATUS0 0x3820U
+#define ROGUE_CR_SLC_STATUS0_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT 2U
+#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN 0x00000004U
+#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_SHIFT 1U
+#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_EN 0x00000002U
+#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT 0U
+#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC_CTRL_BYPASS */
+#define ROGUE_CR_SLC_CTRL_BYPASS 0x3828U
+#define ROGUE_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL 0x0FFFFFFFFFFF7FFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_MASKFULL 0x000000000FFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_SHIFT 59U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_CLRMSK 0xF7FFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_EN 0x0800000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_SHIFT 58U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_CLRMSK 0xFBFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_EN 0x0400000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_SHIFT 57U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_CLRMSK 0xFDFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_EN 0x0200000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_SHIFT 56U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_CLRMSK 0xFEFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_EN 0x0100000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_SHIFT 55U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_CLRMSK 0xFF7FFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_EN 0x0080000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_SHIFT 54U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_CLRMSK 0xFFBFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_EN 0x0040000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_SHIFT 53U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_CLRMSK 0xFFDFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_EN 0x0020000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_SHIFT 52U
+#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_CLRMSK 0xFFEFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_EN 0x0010000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_SHIFT 51U
+#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_CLRMSK 0xFFF7FFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_EN 0x0008000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_SHIFT 50U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_EN 0x0004000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_SHIFT 49U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_CLRMSK 0xFFFDFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_EN 0x0002000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_SHIFT 48U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_CLRMSK 0xFFFEFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_EN 0x0001000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_SHIFT 47U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_CLRMSK 0xFFFF7FFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_EN 0x0000800000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_SHIFT 46U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_CLRMSK 0xFFFFBFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_EN 0x0000400000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_SHIFT 45U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_CLRMSK 0xFFFFDFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_EN 0x0000200000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_SHIFT 44U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_CLRMSK 0xFFFFEFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_EN 0x0000100000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_SHIFT 43U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_CLRMSK 0xFFFFF7FFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_EN 0x0000080000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_SHIFT 42U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_CLRMSK 0xFFFFFBFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_EN 0x0000040000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_SHIFT 41U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_CLRMSK 0xFFFFFDFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_EN 0x0000020000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_SHIFT 40U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_CLRMSK 0xFFFFFEFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_EN 0x0000010000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_SHIFT 39U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_CLRMSK 0xFFFFFF7FFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_EN 0x0000008000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_SHIFT 38U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_CLRMSK 0xFFFFFFBFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_EN 0x0000004000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_SHIFT 37U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_CLRMSK 0xFFFFFFDFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_EN 0x0000002000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_SHIFT 36U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_CLRMSK 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_EN 0x0000001000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_SHIFT 35U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_CLRMSK 0xFFFFFFF7FFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_EN 0x0000000800000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_SHIFT 34U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_EN 0x0000000400000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_SHIFT 33U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_EN 0x0000000200000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_SHIFT 32U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_EN 0x0000000100000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_SHIFT 31U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_EN 0x0000000080000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_SHIFT 30U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_EN 0x0000000040000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_SHIFT 29U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_EN 0x0000000020000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_SHIFT 28U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_CLRMSK 0xFFFFFFFFEFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_EN 0x0000000010000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT 27U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK 0xFFFFFFFFF7FFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN 0x0000000008000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT 26U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_EN 0x0000000004000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT 25U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK 0xFFFFFFFFFDFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN 0x0000000002000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT 24U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_EN 0x0000000001000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT 23U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN 0x0000000000800000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT 22U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_EN 0x0000000000400000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT 21U
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN 0x0000000000200000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT 20U
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK 0xFFFFFFFFFFEFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_EN 0x0000000000100000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT 19U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK 0xFFFFFFFFFFF7FFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_EN 0x0000000000080000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT 18U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_EN 0x0000000000040000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT 17U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK 0xFFFFFFFFFFFDFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_EN 0x0000000000020000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT 16U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN 0x0000000000010000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT 15U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN 0x0000000000008000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT 14U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_EN 0x0000000000004000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT 13U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_EN 0x0000000000002000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT 12U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_EN 0x0000000000001000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT 11U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN 0x0000000000000800ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT 10U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN 0x0000000000000400ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT 9U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN 0x0000000000000200ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT 8U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_EN 0x0000000000000100ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT 7U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_EN 0x0000000000000080ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT 6U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_EN 0x0000000000000040ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT 5U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN 0x0000000000000020ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT 4U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_EN 0x0000000000000010ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT 3U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN 0x0000000000000008ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT 2U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN 0x0000000000000004ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT 1U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_EN 0x0000000000000002ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SLC_STATUS1 */
+#define ROGUE_CR_SLC_STATUS1 0x3870U
+#define ROGUE_CR_SLC_STATUS1_MASKFULL 0x800003FF03FFFFFFULL
+#define ROGUE_CR_SLC_STATUS1_PAUSED_SHIFT 63U
+#define ROGUE_CR_SLC_STATUS1_PAUSED_CLRMSK 0x7FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_STATUS1_PAUSED_EN 0x8000000000000000ULL
+#define ROGUE_CR_SLC_STATUS1_READS1_SHIFT 32U
+#define ROGUE_CR_SLC_STATUS1_READS1_CLRMSK 0xFFFFFC00FFFFFFFFULL
+#define ROGUE_CR_SLC_STATUS1_READS0_SHIFT 16U
+#define ROGUE_CR_SLC_STATUS1_READS0_CLRMSK 0xFFFFFFFFFC00FFFFULL
+#define ROGUE_CR_SLC_STATUS1_READS1_EXT_SHIFT 8U
+#define ROGUE_CR_SLC_STATUS1_READS1_EXT_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_SLC_STATUS1_READS0_EXT_SHIFT 0U
+#define ROGUE_CR_SLC_STATUS1_READS0_EXT_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_SLC_IDLE */
+#define ROGUE_CR_SLC_IDLE 0x3898U
+#define ROGUE_CR_SLC_IDLE__XE_MEM__MASKFULL 0x00000000000003FFULL
+#define ROGUE_CR_SLC_IDLE_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_SHIFT 9U
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_EN 0x00000200U
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_SHIFT 8U
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_EN 0x00000100U
+#define ROGUE_CR_SLC_IDLE_IMGBV4_SHIFT 7U
+#define ROGUE_CR_SLC_IDLE_IMGBV4_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_SLC_IDLE_IMGBV4_EN 0x00000080U
+#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_SHIFT 6U
+#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_EN 0x00000040U
+#define ROGUE_CR_SLC_IDLE_RBOFIFO_SHIFT 5U
+#define ROGUE_CR_SLC_IDLE_RBOFIFO_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SLC_IDLE_RBOFIFO_EN 0x00000020U
+#define ROGUE_CR_SLC_IDLE_FRC_CONV_SHIFT 4U
+#define ROGUE_CR_SLC_IDLE_FRC_CONV_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SLC_IDLE_FRC_CONV_EN 0x00000010U
+#define ROGUE_CR_SLC_IDLE_VXE_CONV_SHIFT 3U
+#define ROGUE_CR_SLC_IDLE_VXE_CONV_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SLC_IDLE_VXE_CONV_EN 0x00000008U
+#define ROGUE_CR_SLC_IDLE_VXD_CONV_SHIFT 2U
+#define ROGUE_CR_SLC_IDLE_VXD_CONV_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SLC_IDLE_VXD_CONV_EN 0x00000004U
+#define ROGUE_CR_SLC_IDLE_BIF1_CONV_SHIFT 1U
+#define ROGUE_CR_SLC_IDLE_BIF1_CONV_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC_IDLE_BIF1_CONV_EN 0x00000002U
+#define ROGUE_CR_SLC_IDLE_CBAR_SHIFT 0U
+#define ROGUE_CR_SLC_IDLE_CBAR_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_IDLE_CBAR_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC_STATUS2 */
+#define ROGUE_CR_SLC_STATUS2 0x3908U
+#define ROGUE_CR_SLC_STATUS2_MASKFULL 0x000003FF03FFFFFFULL
+#define ROGUE_CR_SLC_STATUS2_READS3_SHIFT 32U
+#define ROGUE_CR_SLC_STATUS2_READS3_CLRMSK 0xFFFFFC00FFFFFFFFULL
+#define ROGUE_CR_SLC_STATUS2_READS2_SHIFT 16U
+#define ROGUE_CR_SLC_STATUS2_READS2_CLRMSK 0xFFFFFFFFFC00FFFFULL
+#define ROGUE_CR_SLC_STATUS2_READS3_EXT_SHIFT 8U
+#define ROGUE_CR_SLC_STATUS2_READS3_EXT_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_SLC_STATUS2_READS2_EXT_SHIFT 0U
+#define ROGUE_CR_SLC_STATUS2_READS2_EXT_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_SLC_CTRL_MISC2 */
+#define ROGUE_CR_SLC_CTRL_MISC2 0x3930U
+#define ROGUE_CR_SLC_CTRL_MISC2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE */
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE 0x3938U
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT 0U
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN 0x00000001U
+
+/* Register ROGUE_CR_USC_UVS0_CHECKSUM */
+#define ROGUE_CR_USC_UVS0_CHECKSUM 0x5000U
+#define ROGUE_CR_USC_UVS0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS1_CHECKSUM */
+#define ROGUE_CR_USC_UVS1_CHECKSUM 0x5008U
+#define ROGUE_CR_USC_UVS1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS2_CHECKSUM */
+#define ROGUE_CR_USC_UVS2_CHECKSUM 0x5010U
+#define ROGUE_CR_USC_UVS2_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS3_CHECKSUM */
+#define ROGUE_CR_USC_UVS3_CHECKSUM 0x5018U
+#define ROGUE_CR_USC_UVS3_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PPP_SIGNATURE */
+#define ROGUE_CR_PPP_SIGNATURE 0x5020U
+#define ROGUE_CR_PPP_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PPP_SIGNATURE_VALUE_SHIFT 0U
+#define ROGUE_CR_PPP_SIGNATURE_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TE_SIGNATURE */
+#define ROGUE_CR_TE_SIGNATURE 0x5028U
+#define ROGUE_CR_TE_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TE_SIGNATURE_VALUE_SHIFT 0U
+#define ROGUE_CR_TE_SIGNATURE_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TE_CHECKSUM */
+#define ROGUE_CR_TE_CHECKSUM 0x5110U
+#define ROGUE_CR_TE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_TE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVB_CHECKSUM */
+#define ROGUE_CR_USC_UVB_CHECKSUM 0x5118U
+#define ROGUE_CR_USC_UVB_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVB_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_VCE_CHECKSUM */
+#define ROGUE_CR_VCE_CHECKSUM 0x5030U
+#define ROGUE_CR_VCE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VCE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_VCE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_ISP_PDS_CHECKSUM */
+#define ROGUE_CR_ISP_PDS_CHECKSUM 0x5038U
+#define ROGUE_CR_ISP_PDS_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_ISP_TPF_CHECKSUM */
+#define ROGUE_CR_ISP_TPF_CHECKSUM 0x5040U
+#define ROGUE_CR_ISP_TPF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TFPU_PLANE0_CHECKSUM */
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM 0x5048U
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TFPU_PLANE1_CHECKSUM */
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM 0x5050U
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PBE_CHECKSUM */
+#define ROGUE_CR_PBE_CHECKSUM 0x5058U
+#define ROGUE_CR_PBE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PBE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_PBE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PDS_DOUTM_STM_SIGNATURE */
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE 0x5060U
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT 0U
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_IFPU_ISP_CHECKSUM */
+#define ROGUE_CR_IFPU_ISP_CHECKSUM 0x5068U
+#define ROGUE_CR_IFPU_ISP_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS4_CHECKSUM */
+#define ROGUE_CR_USC_UVS4_CHECKSUM 0x5100U
+#define ROGUE_CR_USC_UVS4_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS5_CHECKSUM */
+#define ROGUE_CR_USC_UVS5_CHECKSUM 0x5108U
+#define ROGUE_CR_USC_UVS5_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PPP_CLIP_CHECKSUM */
+#define ROGUE_CR_PPP_CLIP_CHECKSUM 0x5120U
+#define ROGUE_CR_PPP_CLIP_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_TA_PHASE */
+#define ROGUE_CR_PERF_TA_PHASE 0x6008U
+#define ROGUE_CR_PERF_TA_PHASE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_TA_PHASE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_TA_PHASE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_3D_PHASE */
+#define ROGUE_CR_PERF_3D_PHASE 0x6010U
+#define ROGUE_CR_PERF_3D_PHASE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_3D_PHASE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_3D_PHASE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_COMPUTE_PHASE */
+#define ROGUE_CR_PERF_COMPUTE_PHASE 0x6018U
+#define ROGUE_CR_PERF_COMPUTE_PHASE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_TA_CYCLE */
+#define ROGUE_CR_PERF_TA_CYCLE 0x6020U
+#define ROGUE_CR_PERF_TA_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_TA_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_TA_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_3D_CYCLE */
+#define ROGUE_CR_PERF_3D_CYCLE 0x6028U
+#define ROGUE_CR_PERF_3D_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_3D_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_3D_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_COMPUTE_CYCLE */
+#define ROGUE_CR_PERF_COMPUTE_CYCLE 0x6030U
+#define ROGUE_CR_PERF_COMPUTE_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_TA_OR_3D_CYCLE */
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE 0x6038U
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_INITIAL_TA_CYCLE */
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE 0x6040U
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC0_READ_STALL */
+#define ROGUE_CR_PERF_SLC0_READ_STALL 0x60B8U
+#define ROGUE_CR_PERF_SLC0_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC0_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL 0x60C0U
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC1_READ_STALL */
+#define ROGUE_CR_PERF_SLC1_READ_STALL 0x60E0U
+#define ROGUE_CR_PERF_SLC1_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC1_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL 0x60E8U
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC2_READ_STALL */
+#define ROGUE_CR_PERF_SLC2_READ_STALL 0x6158U
+#define ROGUE_CR_PERF_SLC2_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC2_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL 0x6160U
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC3_READ_STALL */
+#define ROGUE_CR_PERF_SLC3_READ_STALL 0x6180U
+#define ROGUE_CR_PERF_SLC3_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC3_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL 0x6188U
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_3D_SPINUP */
+#define ROGUE_CR_PERF_3D_SPINUP 0x6220U
+#define ROGUE_CR_PERF_3D_SPINUP_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_3D_SPINUP_CYCLES_SHIFT 0U
+#define ROGUE_CR_PERF_3D_SPINUP_CYCLES_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_AXI_ACE_LITE_CONFIGURATION */
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION 0x38C0U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL 0x00003FFFFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT 45U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK 0xFFFFDFFFFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN 0x0000200000000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT 37U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK 0xFFFFE01FFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT 36U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK \
+ 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN \
+ 0x0000001000000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT 35U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK 0xFFFFFFF7FFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN 0x0000000800000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT 34U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN 0x0000000400000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT 30U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK 0xFFFFFFFC3FFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT 26U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK 0xFFFFFFFFC3FFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT 22U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK 0xFFFFFFFFFC3FFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT 20U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK 0xFFFFFFFFFFCFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT 18U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK 0xFFFFFFFFFFF3FFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT 16U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT 14U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK 0xFFFFFFFFFFFF3FFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT 12U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK 0xFFFFFFFFFFFFCFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT 10U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFF3FFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT 8U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT 4U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFF0FULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT 0U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFFF0ULL
+
+/* Register ROGUE_CR_POWER_ESTIMATE_RESULT */
+#define ROGUE_CR_POWER_ESTIMATE_RESULT 0x6328U
+#define ROGUE_CR_POWER_ESTIMATE_RESULT_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT 0U
+#define ROGUE_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF */
+#define ROGUE_CR_TA_PERF 0x7600U
+#define ROGUE_CR_TA_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_TA_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TA_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TA_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TA_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TA_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TA_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TA_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TA_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TA_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TA_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TA_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TA_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TA_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TA_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TA_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TA_PERF_SELECT0 */
+#define ROGUE_CR_TA_PERF_SELECT0 0x7608U
+#define ROGUE_CR_TA_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECT1 */
+#define ROGUE_CR_TA_PERF_SELECT1 0x7610U
+#define ROGUE_CR_TA_PERF_SELECT1_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT1_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECT2 */
+#define ROGUE_CR_TA_PERF_SELECT2 0x7618U
+#define ROGUE_CR_TA_PERF_SELECT2_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT2_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECT3 */
+#define ROGUE_CR_TA_PERF_SELECT3 0x7620U
+#define ROGUE_CR_TA_PERF_SELECT3_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT3_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECTED_BITS */
+#define ROGUE_CR_TA_PERF_SELECTED_BITS 0x7648U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK 0xFFFF0000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK 0xFFFFFFFF0000FFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_0 */
+#define ROGUE_CR_TA_PERF_COUNTER_0 0x7650U
+#define ROGUE_CR_TA_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_1 */
+#define ROGUE_CR_TA_PERF_COUNTER_1 0x7658U
+#define ROGUE_CR_TA_PERF_COUNTER_1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_1_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_1_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_2 */
+#define ROGUE_CR_TA_PERF_COUNTER_2 0x7660U
+#define ROGUE_CR_TA_PERF_COUNTER_2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_2_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_2_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_3 */
+#define ROGUE_CR_TA_PERF_COUNTER_3 0x7668U
+#define ROGUE_CR_TA_PERF_COUNTER_3_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_3_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_3_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_RASTERISATION_PERF */
+#define ROGUE_CR_RASTERISATION_PERF 0x7700U
+#define ROGUE_CR_RASTERISATION_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_RASTERISATION_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_RASTERISATION_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_RASTERISATION_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_RASTERISATION_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_RASTERISATION_PERF_SELECT0 */
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0 0x7708U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_RASTERISATION_PERF_COUNTER_0 */
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0 0x7750U
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF */
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF 0x7800U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0 */
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0 0x7808U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 */
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 0x7850U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF */
+#define ROGUE_CR_TPU_MCU_L0_PERF 0x7900U
+#define ROGUE_CR_TPU_MCU_L0_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF_SELECT0 */
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0 0x7908U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0 */
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0 0x7950U
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_PERF */
+#define ROGUE_CR_USC_PERF 0x8100U
+#define ROGUE_CR_USC_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_USC_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_USC_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_USC_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_USC_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_USC_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_USC_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_USC_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_USC_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_USC_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_USC_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_USC_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_USC_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_USC_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_USC_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_USC_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_USC_PERF_SELECT0 */
+#define ROGUE_CR_USC_PERF_SELECT0 0x8108U
+#define ROGUE_CR_USC_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_USC_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_USC_PERF_COUNTER_0 */
+#define ROGUE_CR_USC_PERF_COUNTER_0 0x8150U
+#define ROGUE_CR_USC_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_USC_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_JONES_IDLE */
+#define ROGUE_CR_JONES_IDLE 0x8328U
+#define ROGUE_CR_JONES_IDLE_MASKFULL 0x0000000000007FFFULL
+#define ROGUE_CR_JONES_IDLE_TDM_SHIFT 14U
+#define ROGUE_CR_JONES_IDLE_TDM_CLRMSK 0xFFFFBFFFU
+#define ROGUE_CR_JONES_IDLE_TDM_EN 0x00004000U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_SHIFT 13U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK 0xFFFFDFFFU
+#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_EN 0x00002000U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_SHIFT 12U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_CLRMSK 0xFFFFEFFFU
+#define ROGUE_CR_JONES_IDLE_FB_CDC_EN 0x00001000U
+#define ROGUE_CR_JONES_IDLE_MMU_SHIFT 11U
+#define ROGUE_CR_JONES_IDLE_MMU_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_JONES_IDLE_MMU_EN 0x00000800U
+#define ROGUE_CR_JONES_IDLE_TLA_SHIFT 10U
+#define ROGUE_CR_JONES_IDLE_TLA_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_JONES_IDLE_TLA_EN 0x00000400U
+#define ROGUE_CR_JONES_IDLE_GARTEN_SHIFT 9U
+#define ROGUE_CR_JONES_IDLE_GARTEN_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_JONES_IDLE_GARTEN_EN 0x00000200U
+#define ROGUE_CR_JONES_IDLE_HOSTIF_SHIFT 8U
+#define ROGUE_CR_JONES_IDLE_HOSTIF_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_JONES_IDLE_HOSTIF_EN 0x00000100U
+#define ROGUE_CR_JONES_IDLE_SOCIF_SHIFT 7U
+#define ROGUE_CR_JONES_IDLE_SOCIF_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_JONES_IDLE_SOCIF_EN 0x00000080U
+#define ROGUE_CR_JONES_IDLE_TILING_SHIFT 6U
+#define ROGUE_CR_JONES_IDLE_TILING_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_JONES_IDLE_TILING_EN 0x00000040U
+#define ROGUE_CR_JONES_IDLE_IPP_SHIFT 5U
+#define ROGUE_CR_JONES_IDLE_IPP_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_JONES_IDLE_IPP_EN 0x00000020U
+#define ROGUE_CR_JONES_IDLE_USCS_SHIFT 4U
+#define ROGUE_CR_JONES_IDLE_USCS_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_JONES_IDLE_USCS_EN 0x00000010U
+#define ROGUE_CR_JONES_IDLE_PM_SHIFT 3U
+#define ROGUE_CR_JONES_IDLE_PM_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_JONES_IDLE_PM_EN 0x00000008U
+#define ROGUE_CR_JONES_IDLE_CDM_SHIFT 2U
+#define ROGUE_CR_JONES_IDLE_CDM_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_JONES_IDLE_CDM_EN 0x00000004U
+#define ROGUE_CR_JONES_IDLE_VDM_SHIFT 1U
+#define ROGUE_CR_JONES_IDLE_VDM_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_JONES_IDLE_VDM_EN 0x00000002U
+#define ROGUE_CR_JONES_IDLE_BIF_SHIFT 0U
+#define ROGUE_CR_JONES_IDLE_BIF_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_JONES_IDLE_BIF_EN 0x00000001U
+
+/* Register ROGUE_CR_TORNADO_PERF */
+#define ROGUE_CR_TORNADO_PERF 0x8228U
+#define ROGUE_CR_TORNADO_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_TORNADO_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TORNADO_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TORNADO_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TORNADO_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TORNADO_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TORNADO_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TORNADO_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TORNADO_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TORNADO_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TORNADO_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TORNADO_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TORNADO_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TORNADO_PERF_SELECT0 */
+#define ROGUE_CR_TORNADO_PERF_SELECT0 0x8230U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TORNADO_PERF_COUNTER_0 */
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0 0x8268U
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TEXAS_PERF */
+#define ROGUE_CR_TEXAS_PERF 0x8290U
+#define ROGUE_CR_TEXAS_PERF_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_TEXAS_PERF_CLR_5_SHIFT 6U
+#define ROGUE_CR_TEXAS_PERF_CLR_5_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_TEXAS_PERF_CLR_5_EN 0x00000040U
+#define ROGUE_CR_TEXAS_PERF_CLR_4_SHIFT 5U
+#define ROGUE_CR_TEXAS_PERF_CLR_4_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_TEXAS_PERF_CLR_4_EN 0x00000020U
+#define ROGUE_CR_TEXAS_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TEXAS_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TEXAS_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TEXAS_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TEXAS_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TEXAS_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TEXAS_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TEXAS_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TEXAS_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TEXAS_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TEXAS_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TEXAS_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TEXAS_PERF_SELECT0 */
+#define ROGUE_CR_TEXAS_PERF_SELECT0 0x8298U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MASKFULL 0x3FFF3FFF803FFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_SHIFT 31U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_EN 0x0000000080000000ULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFC0FFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TEXAS_PERF_COUNTER_0 */
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0 0x82D8U
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_JONES_PERF */
+#define ROGUE_CR_JONES_PERF 0x8330U
+#define ROGUE_CR_JONES_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_JONES_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_JONES_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_JONES_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_JONES_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_JONES_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_JONES_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_JONES_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_JONES_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_JONES_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_JONES_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_JONES_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_JONES_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_JONES_PERF_SELECT0 */
+#define ROGUE_CR_JONES_PERF_SELECT0 0x8338U
+#define ROGUE_CR_JONES_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_JONES_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_JONES_PERF_COUNTER_0 */
+#define ROGUE_CR_JONES_PERF_COUNTER_0 0x8368U
+#define ROGUE_CR_JONES_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_JONES_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_JONES_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_BLACKPEARL_PERF */
+#define ROGUE_CR_BLACKPEARL_PERF 0x8400U
+#define ROGUE_CR_BLACKPEARL_PERF_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_SHIFT 6U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_EN 0x00000040U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_SHIFT 5U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_EN 0x00000020U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_BLACKPEARL_PERF_SELECT0 */
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0 0x8408U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MASKFULL 0x3FFF3FFF803FFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT 31U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_EN 0x0000000080000000ULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFC0FFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_BLACKPEARL_PERF_COUNTER_0 */
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0 0x8448U
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PBE_PERF */
+#define ROGUE_CR_PBE_PERF 0x8478U
+#define ROGUE_CR_PBE_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_PBE_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_PBE_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_PBE_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_PBE_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_PBE_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_PBE_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_PBE_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_PBE_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_PBE_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_PBE_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_PBE_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_PBE_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_PBE_PERF_SELECT0 */
+#define ROGUE_CR_PBE_PERF_SELECT0 0x8480U
+#define ROGUE_CR_PBE_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_PBE_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_PBE_PERF_COUNTER_0 */
+#define ROGUE_CR_PBE_PERF_COUNTER_0 0x84B0U
+#define ROGUE_CR_PBE_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PBE_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OCP_REVINFO */
+#define ROGUE_CR_OCP_REVINFO 0x9000U
+#define ROGUE_CR_OCP_REVINFO_MASKFULL 0x00000007FFFFFFFFULL
+#define ROGUE_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT 33U
+#define ROGUE_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK 0xFFFFFFF9FFFFFFFFULL
+#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT 32U
+#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_EN 0x0000000100000000ULL
+#define ROGUE_CR_OCP_REVINFO_REVISION_SHIFT 0U
+#define ROGUE_CR_OCP_REVINFO_REVISION_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_OCP_SYSCONFIG */
+#define ROGUE_CR_OCP_SYSCONFIG 0x9010U
+#define ROGUE_CR_OCP_SYSCONFIG_MASKFULL 0x0000000000000FFFULL
+#define ROGUE_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT 10U
+#define ROGUE_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK 0xFFFFF3FFU
+#define ROGUE_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT 8U
+#define ROGUE_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK 0xFFFFFCFFU
+#define ROGUE_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT 6U
+#define ROGUE_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT 4U
+#define ROGUE_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK 0xFFFFFFCFU
+#define ROGUE_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT 2U
+#define ROGUE_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT 0U
+#define ROGUE_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_0 */
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0 0x9020U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_1 */
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1 0x9028U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_2 */
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2 0x9030U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_0 */
+#define ROGUE_CR_OCP_IRQSTATUS_0 0x9038U
+#define ROGUE_CR_OCP_IRQSTATUS_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_1 */
+#define ROGUE_CR_OCP_IRQSTATUS_1 0x9040U
+#define ROGUE_CR_OCP_IRQSTATUS_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_2 */
+#define ROGUE_CR_OCP_IRQSTATUS_2 0x9048U
+#define ROGUE_CR_OCP_IRQSTATUS_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_SET_0 */
+#define ROGUE_CR_OCP_IRQENABLE_SET_0 0x9050U
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_SET_1 */
+#define ROGUE_CR_OCP_IRQENABLE_SET_1 0x9058U
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_SET_2 */
+#define ROGUE_CR_OCP_IRQENABLE_SET_2 0x9060U
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_CLR_0 */
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0 0x9068U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_CLR_1 */
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1 0x9070U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_CLR_2 */
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2 0x9078U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQ_EVENT */
+#define ROGUE_CR_OCP_IRQ_EVENT 0x9080U
+#define ROGUE_CR_OCP_IRQ_EVENT_MASKFULL 0x00000000000FFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT 19U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK 0xFFFFFFFFFFF7FFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN 0x0000000000080000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT 18U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN 0x0000000000040000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT 17U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK 0xFFFFFFFFFFFDFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN 0x0000000000020000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT 16U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN 0x0000000000010000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT 15U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000008000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT 14U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN 0x0000000000004000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT 13U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN 0x0000000000002000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT 12U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN 0x0000000000001000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT 11U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000800ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT 10U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN 0x0000000000000400ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT 9U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN 0x0000000000000200ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT 8U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN 0x0000000000000100ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT 7U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000080ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT 6U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN 0x0000000000000040ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT 5U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN 0x0000000000000020ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT 4U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN 0x0000000000000010ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT 3U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000008ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT 2U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN 0x0000000000000004ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT 1U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN 0x0000000000000002ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT 0U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_OCP_DEBUG_CONFIG */
+#define ROGUE_CR_OCP_DEBUG_CONFIG 0x9088U
+#define ROGUE_CR_OCP_DEBUG_CONFIG_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_SHIFT 0U
+#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_DEBUG_STATUS */
+#define ROGUE_CR_OCP_DEBUG_STATUS 0x9090U
+#define ROGUE_CR_OCP_DEBUG_STATUS_MASKFULL 0x001F1F77FFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT 51U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK 0xFFE7FFFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT 50U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN 0x0004000000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT 48U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK 0xFFFCFFFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT 43U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK 0xFFFFE7FFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT 42U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK 0xFFFFFBFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN 0x0000040000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT 40U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK 0xFFFFFCFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT 38U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK 0xFFFFFFBFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN 0x0000004000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT 37U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK 0xFFFFFFDFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN 0x0000002000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT 36U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN 0x0000001000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT 34U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN 0x0000000400000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT 33U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN 0x0000000200000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT 32U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN 0x0000000100000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT 31U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN 0x0000000080000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT 30U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN 0x0000000040000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT 29U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN 0x0000000020000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT 27U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK 0xFFFFFFFFE7FFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT 26U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN 0x0000000004000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT 24U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK 0xFFFFFFFFFCFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT 23U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN 0x0000000000800000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT 22U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN 0x0000000000400000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT 21U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN 0x0000000000200000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT 19U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK 0xFFFFFFFFFFE7FFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT 18U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN 0x0000000000040000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT 16U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT 15U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN 0x0000000000008000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT 14U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN 0x0000000000004000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT 13U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN 0x0000000000002000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT 11U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK 0xFFFFFFFFFFFFE7FFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT 10U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN 0x0000000000000400ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT 8U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT 7U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN 0x0000000000000080ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT 6U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN 0x0000000000000040ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT 5U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN 0x0000000000000020ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT 3U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK 0xFFFFFFFFFFFFFFE7ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT 2U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN 0x0000000000000004ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT 0U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT 6U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN 0x00000040U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT 5U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_EN 0x00000020U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_SHIFT 4U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_EN 0x00000010U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT 3U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN 0x00000008U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT 2U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_EN 0x00000004U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT 1U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN 0x00000002U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT 0U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_EN 0x00000001U
+
+#define ROGUE_CR_BIF_TRUST_DM_MASK 0x0000007FU
+
+/* Register ROGUE_CR_BIF_TRUST */
+#define ROGUE_CR_BIF_TRUST 0xA000U
+#define ROGUE_CR_BIF_TRUST_MASKFULL 0x00000000001FFFFFULL
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT 20U
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN 0x00100000U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT 19U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN 0x00080000U
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT 18U
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN 0x00040000U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT 17U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN 0x00020000U
+#define ROGUE_CR_BIF_TRUST_ENABLE_SHIFT 16U
+#define ROGUE_CR_BIF_TRUST_ENABLE_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_BIF_TRUST_ENABLE_EN 0x00010000U
+#define ROGUE_CR_BIF_TRUST_DM_TRUSTED_SHIFT 9U
+#define ROGUE_CR_BIF_TRUST_DM_TRUSTED_CLRMSK 0xFFFF01FFU
+#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT 8U
+#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN 0x00000100U
+#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT 7U
+#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN 0x00000080U
+#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT 6U
+#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN 0x00000040U
+#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT 5U
+#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN 0x00000020U
+#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT 4U
+#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN 0x00000010U
+#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT 3U
+#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN 0x00000008U
+#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT 2U
+#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN 0x00000004U
+#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT 1U
+#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN 0x00000002U
+#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT 0U
+#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN 0x00000001U
+
+/* Register ROGUE_CR_SYS_BUS_SECURE */
+#define ROGUE_CR_SYS_BUS_SECURE 0xA100U
+#define ROGUE_CR_SYS_BUS_SECURE__SECR__MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_SYS_BUS_SECURE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_SHIFT 0U
+#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_FBA_FC0_CHECKSUM */
+#define ROGUE_CR_FBA_FC0_CHECKSUM 0xD170U
+#define ROGUE_CR_FBA_FC0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_FBA_FC1_CHECKSUM */
+#define ROGUE_CR_FBA_FC1_CHECKSUM 0xD178U
+#define ROGUE_CR_FBA_FC1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_FBA_FC2_CHECKSUM */
+#define ROGUE_CR_FBA_FC2_CHECKSUM 0xD180U
+#define ROGUE_CR_FBA_FC2_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_FBA_FC3_CHECKSUM */
+#define ROGUE_CR_FBA_FC3_CHECKSUM 0xD188U
+#define ROGUE_CR_FBA_FC3_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_CLK_CTRL2 */
+#define ROGUE_CR_CLK_CTRL2 0xD200U
+#define ROGUE_CR_CLK_CTRL2_MASKFULL 0x0000000000000F33ULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_SHIFT 10U
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_CLRMSK 0xFFFFFFFFFFFFF3FFULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_ON 0x0000000000000400ULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_AUTO 0x0000000000000800ULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_SHIFT 8U
+#define ROGUE_CR_CLK_CTRL2_VRDM_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_ON 0x0000000000000100ULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_AUTO 0x0000000000000200ULL
+#define ROGUE_CR_CLK_CTRL2_SH_SHIFT 4U
+#define ROGUE_CR_CLK_CTRL2_SH_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_CLK_CTRL2_SH_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_SH_ON 0x0000000000000010ULL
+#define ROGUE_CR_CLK_CTRL2_SH_AUTO 0x0000000000000020ULL
+#define ROGUE_CR_CLK_CTRL2_FBA_SHIFT 0U
+#define ROGUE_CR_CLK_CTRL2_FBA_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+#define ROGUE_CR_CLK_CTRL2_FBA_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_FBA_ON 0x0000000000000001ULL
+#define ROGUE_CR_CLK_CTRL2_FBA_AUTO 0x0000000000000002ULL
+
+/* Register ROGUE_CR_CLK_STATUS2 */
+#define ROGUE_CR_CLK_STATUS2 0xD208U
+#define ROGUE_CR_CLK_STATUS2_MASKFULL 0x0000000000000015ULL
+#define ROGUE_CR_CLK_STATUS2_VRDM_SHIFT 4U
+#define ROGUE_CR_CLK_STATUS2_VRDM_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_CLK_STATUS2_VRDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS2_VRDM_RUNNING 0x0000000000000010ULL
+#define ROGUE_CR_CLK_STATUS2_SH_SHIFT 2U
+#define ROGUE_CR_CLK_STATUS2_SH_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_CLK_STATUS2_SH_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS2_SH_RUNNING 0x0000000000000004ULL
+#define ROGUE_CR_CLK_STATUS2_FBA_SHIFT 0U
+#define ROGUE_CR_CLK_STATUS2_FBA_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_CLK_STATUS2_FBA_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS2_FBA_RUNNING 0x0000000000000001ULL
+
+/* Register ROGUE_CR_RPM_SHF_FPL */
+#define ROGUE_CR_RPM_SHF_FPL 0xD520U
+#define ROGUE_CR_RPM_SHF_FPL_MASKFULL 0x3FFFFFFFFFFFFFFCULL
+#define ROGUE_CR_RPM_SHF_FPL_SIZE_SHIFT 40U
+#define ROGUE_CR_RPM_SHF_FPL_SIZE_CLRMSK 0xC00000FFFFFFFFFFULL
+#define ROGUE_CR_RPM_SHF_FPL_BASE_SHIFT 2U
+#define ROGUE_CR_RPM_SHF_FPL_BASE_CLRMSK 0xFFFFFF0000000003ULL
+#define ROGUE_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT 2U
+#define ROGUE_CR_RPM_SHF_FPL_BASE_ALIGNSIZE 4U
+
+/* Register ROGUE_CR_RPM_SHF_FPL_READ */
+#define ROGUE_CR_RPM_SHF_FPL_READ 0xD528U
+#define ROGUE_CR_RPM_SHF_FPL_READ_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_RPM_SHF_FPL_WRITE */
+#define ROGUE_CR_RPM_SHF_FPL_WRITE 0xD530U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_RPM_SHG_FPL */
+#define ROGUE_CR_RPM_SHG_FPL 0xD538U
+#define ROGUE_CR_RPM_SHG_FPL_MASKFULL 0x3FFFFFFFFFFFFFFCULL
+#define ROGUE_CR_RPM_SHG_FPL_SIZE_SHIFT 40U
+#define ROGUE_CR_RPM_SHG_FPL_SIZE_CLRMSK 0xC00000FFFFFFFFFFULL
+#define ROGUE_CR_RPM_SHG_FPL_BASE_SHIFT 2U
+#define ROGUE_CR_RPM_SHG_FPL_BASE_CLRMSK 0xFFFFFF0000000003ULL
+#define ROGUE_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT 2U
+#define ROGUE_CR_RPM_SHG_FPL_BASE_ALIGNSIZE 4U
+
+/* Register ROGUE_CR_RPM_SHG_FPL_READ */
+#define ROGUE_CR_RPM_SHG_FPL_READ 0xD540U
+#define ROGUE_CR_RPM_SHG_FPL_READ_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_RPM_SHG_FPL_WRITE */
+#define ROGUE_CR_RPM_SHG_FPL_WRITE 0xD548U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_SH_PERF */
+#define ROGUE_CR_SH_PERF 0xD5F8U
+#define ROGUE_CR_SH_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_SH_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_SH_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SH_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_SH_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_SH_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SH_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_SH_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_SH_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SH_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_SH_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_SH_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SH_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_SH_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_SH_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SH_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_SH_PERF_SELECT0 */
+#define ROGUE_CR_SH_PERF_SELECT0 0xD600U
+#define ROGUE_CR_SH_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_SH_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_SH_PERF_COUNTER_0 */
+#define ROGUE_CR_SH_PERF_COUNTER_0 0xD628U
+#define ROGUE_CR_SH_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SH_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_SH_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHF_SHG_CHECKSUM */
+#define ROGUE_CR_SHF_SHG_CHECKSUM 0xD1C0U
+#define ROGUE_CR_SHF_SHG_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM */
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM 0xD1C8U
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHF_VARY_BIF_CHECKSUM */
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM 0xD1D0U
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_RPM_BIF_CHECKSUM */
+#define ROGUE_CR_RPM_BIF_CHECKSUM 0xD1D8U
+#define ROGUE_CR_RPM_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHG_BIF_CHECKSUM */
+#define ROGUE_CR_SHG_BIF_CHECKSUM 0xD1E0U
+#define ROGUE_CR_SHG_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHG_FE_BE_CHECKSUM */
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM 0xD1E8U
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register DPX_CR_BF_PERF */
+#define DPX_CR_BF_PERF 0xC458U
+#define DPX_CR_BF_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_BF_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_BF_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BF_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_BF_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_BF_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_BF_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_BF_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_BF_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BF_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_BF_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_BF_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BF_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BF_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_BF_PERF_SELECT0 */
+#define DPX_CR_BF_PERF_SELECT0 0xC460U
+#define DPX_CR_BF_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_BF_PERF_COUNTER_0 */
+#define DPX_CR_BF_PERF_COUNTER_0 0xC488U
+#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_BT_PERF */
+#define DPX_CR_BT_PERF 0xC3D0U
+#define DPX_CR_BT_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_BT_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_BT_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BT_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_BT_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_BT_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_BT_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_BT_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_BT_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BT_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_BT_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_BT_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BT_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BT_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_BT_PERF_SELECT0 */
+#define DPX_CR_BT_PERF_SELECT0 0xC3D8U
+#define DPX_CR_BT_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_BT_PERF_COUNTER_0 */
+#define DPX_CR_BT_PERF_COUNTER_0 0xC420U
+#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_RQ_USC_DEBUG */
+#define DPX_CR_RQ_USC_DEBUG 0xC110U
+#define DPX_CR_RQ_USC_DEBUG_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT 0U
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS */
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS 0xC5C8U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT 0U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS */
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS 0xC5D0U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL 0x03FFFFFFFFFFFFF0ULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT 57U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK 0xFDFFFFFFFFFFFFFFULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN 0x0200000000000000ULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT 44U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK 0xFE000FFFFFFFFFFFULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT 40U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT 4U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register DPX_CR_BIF_MMU_STATUS */
+#define DPX_CR_BIF_MMU_STATUS 0xC5D8U
+#define DPX_CR_BIF_MMU_STATUS_MASKFULL 0x000000000FFFFFF7ULL
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT 20U
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT 12U
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT 4U
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU
+#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT 2U
+#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BIF_MMU_STATUS_STALLED_EN 0x00000004U
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT 1U
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN 0x00000002U
+#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT 0U
+#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BIF_MMU_STATUS_BUSY_EN 0x00000001U
+
+/* Register DPX_CR_RT_PERF */
+#define DPX_CR_RT_PERF 0xC700U
+#define DPX_CR_RT_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_RT_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_RT_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_RT_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_RT_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_RT_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_RT_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_RT_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_RT_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_RT_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_RT_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_RT_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_RT_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_RT_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_RT_PERF_SELECT0 */
+#define DPX_CR_RT_PERF_SELECT0 0xC708U
+#define DPX_CR_RT_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_RT_PERF_COUNTER_0 */
+#define DPX_CR_RT_PERF_COUNTER_0 0xC730U
+#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_BX_TU_PERF */
+#define DPX_CR_BX_TU_PERF 0xC908U
+#define DPX_CR_BX_TU_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BX_TU_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_BX_TU_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BX_TU_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BX_TU_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_BX_TU_PERF_SELECT0 */
+#define DPX_CR_BX_TU_PERF_SELECT0 0xC910U
+#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_BX_TU_PERF_COUNTER_0 */
+#define DPX_CR_BX_TU_PERF_COUNTER_0 0xC938U
+#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_RS_PDS_RR_CHECKSUM */
+#define DPX_CR_RS_PDS_RR_CHECKSUM 0xC0F0U
+#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT 0U
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT */
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT 0xE140U
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT 0U
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MMU_CBASE_MAPPING */
+#define ROGUE_CR_MMU_CBASE_MAPPING 0xE148U
+#define ROGUE_CR_MMU_CBASE_MAPPING_MASKFULL 0x000000000FFFFFFFULL
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT 0U
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK 0xF0000000U
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_MMU_FAULT_STATUS */
+#define ROGUE_CR_MMU_FAULT_STATUS 0xE150U
+#define ROGUE_CR_MMU_FAULT_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT 28U
+#define ROGUE_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK 0x000000000FFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT 20U
+#define ROGUE_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK 0xFFFFFFFFF00FFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT 12U
+#define ROGUE_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK 0xFFFFFFFFFFF00FFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT 6U
+#define ROGUE_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK 0xFFFFFFFFFFFFF03FULL
+#define ROGUE_CR_MMU_FAULT_STATUS_LEVEL_SHIFT 4U
+#define ROGUE_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_RNW_SHIFT 3U
+#define ROGUE_CR_MMU_FAULT_STATUS_RNW_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_RNW_EN 0x0000000000000008ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_TYPE_SHIFT 1U
+#define ROGUE_CR_MMU_FAULT_STATUS_TYPE_CLRMSK 0xFFFFFFFFFFFFFFF9ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MMU_FAULT_STATUS_META */
+#define ROGUE_CR_MMU_FAULT_STATUS_META 0xE158U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT 28U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK 0x000000000FFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT 20U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK 0xFFFFFFFFF00FFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT 12U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK 0xFFFFFFFFFFF00FFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT 6U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK 0xFFFFFFFFFFFFF03FULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT 4U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_SHIFT 3U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_EN 0x0000000000000008ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT 1U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK 0xFFFFFFFFFFFFFFF9ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT 0U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SLC3_CTRL_MISC */
+#define ROGUE_CR_SLC3_CTRL_MISC 0xE200U
+#define ROGUE_CR_SLC3_CTRL_MISC_MASKFULL 0x0000000000000107ULL
+#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT 8U
+#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN 0x00000100U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT 0U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK 0xFFFFFFF8U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR 0x00000000U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH 0x00000001U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH 0x00000002U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH 0x00000003U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH 0x00000004U
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE */
+#define ROGUE_CR_SLC3_SCRAMBLE 0xE208U
+#define ROGUE_CR_SLC3_SCRAMBLE_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE2 */
+#define ROGUE_CR_SLC3_SCRAMBLE2 0xE210U
+#define ROGUE_CR_SLC3_SCRAMBLE2_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE2_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE2_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE3 */
+#define ROGUE_CR_SLC3_SCRAMBLE3 0xE218U
+#define ROGUE_CR_SLC3_SCRAMBLE3_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE3_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE3_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE4 */
+#define ROGUE_CR_SLC3_SCRAMBLE4 0xE260U
+#define ROGUE_CR_SLC3_SCRAMBLE4_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE4_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE4_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_STATUS */
+#define ROGUE_CR_SLC3_STATUS 0xE220U
+#define ROGUE_CR_SLC3_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_STATUS_WRITES1_SHIFT 48U
+#define ROGUE_CR_SLC3_STATUS_WRITES1_CLRMSK 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_STATUS_WRITES0_SHIFT 32U
+#define ROGUE_CR_SLC3_STATUS_WRITES0_CLRMSK 0xFFFF0000FFFFFFFFULL
+#define ROGUE_CR_SLC3_STATUS_READS1_SHIFT 16U
+#define ROGUE_CR_SLC3_STATUS_READS1_CLRMSK 0xFFFFFFFF0000FFFFULL
+#define ROGUE_CR_SLC3_STATUS_READS0_SHIFT 0U
+#define ROGUE_CR_SLC3_STATUS_READS0_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_SLC3_IDLE */
+#define ROGUE_CR_SLC3_IDLE 0xE228U
+#define ROGUE_CR_SLC3_IDLE_MASKFULL 0x00000000000FFFFFULL
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT 18U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK 0xFFF3FFFFU
+#define ROGUE_CR_SLC3_IDLE_MMU_SHIFT 17U
+#define ROGUE_CR_SLC3_IDLE_MMU_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_SLC3_IDLE_MMU_EN 0x00020000U
+#define ROGUE_CR_SLC3_IDLE_RDI_SHIFT 16U
+#define ROGUE_CR_SLC3_IDLE_RDI_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_SLC3_IDLE_RDI_EN 0x00010000U
+#define ROGUE_CR_SLC3_IDLE_IMGBV4_SHIFT 12U
+#define ROGUE_CR_SLC3_IDLE_IMGBV4_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_SLC3_IDLE_CACHE_BANKS_SHIFT 4U
+#define ROGUE_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK 0xFFFFF00FU
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT 2U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT 1U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_EN 0x00000002U
+#define ROGUE_CR_SLC3_IDLE_XBAR_SHIFT 0U
+#define ROGUE_CR_SLC3_IDLE_XBAR_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC3_IDLE_XBAR_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC3_FAULT_STOP_STATUS */
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS 0xE248U
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_MASKFULL 0x0000000000001FFFULL
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT 0U
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK 0xFFFFE000U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_MODE */
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE 0xF048U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK 0xFFFFFFFCU
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX 0x00000000U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE 0x00000001U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST 0x00000002U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING0 */
+#define ROGUE_CR_CONTEXT_MAPPING0 0xF078U
+#define ROGUE_CR_CONTEXT_MAPPING0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING0_2D_SHIFT 24U
+#define ROGUE_CR_CONTEXT_MAPPING0_2D_CLRMSK 0x00FFFFFFU
+#define ROGUE_CR_CONTEXT_MAPPING0_CDM_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING0_CDM_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CONTEXT_MAPPING0_3D_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING0_3D_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING0_TA_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING0_TA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING1 */
+#define ROGUE_CR_CONTEXT_MAPPING1 0xF080U
+#define ROGUE_CR_CONTEXT_MAPPING1_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING1_HOST_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING1_HOST_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING1_TLA_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING1_TLA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING2 */
+#define ROGUE_CR_CONTEXT_MAPPING2 0xF088U
+#define ROGUE_CR_CONTEXT_MAPPING2_MASKFULL 0x0000000000FFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING2_ALIST0_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CONTEXT_MAPPING2_TE0_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING2_TE0_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING2_VCE0_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING2_VCE0_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING3 */
+#define ROGUE_CR_CONTEXT_MAPPING3 0xF090U
+#define ROGUE_CR_CONTEXT_MAPPING3_MASKFULL 0x0000000000FFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING3_ALIST1_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CONTEXT_MAPPING3_TE1_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING3_TE1_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING3_VCE1_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING3_VCE1_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_BIF_JONES_OUTSTANDING_READ */
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ 0xF098U
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT 0U
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ */
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ 0xF0A0U
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT 0U
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIF_DUST_OUTSTANDING_READ */
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ 0xF0A8U
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT 0U
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING4 */
+#define ROGUE_CR_CONTEXT_MAPPING4 0xF210U
+#define ROGUE_CR_CONTEXT_MAPPING4_MASKFULL 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT 40U
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK 0xFFFF00FFFFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT 32U
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK 0xFFFFFF00FFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT 24U
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK 0xFFFFFFFF00FFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK 0xFFFFFFFFFF00FFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_MULTICORE_GPU */
+#define ROGUE_CR_MULTICORE_GPU 0xF300U
+#define ROGUE_CR_MULTICORE_GPU_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT 6U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN 0x00000040U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT 5U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN 0x00000020U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT 4U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN 0x00000010U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT 3U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN 0x00000008U
+#define ROGUE_CR_MULTICORE_GPU_ID_SHIFT 0U
+#define ROGUE_CR_MULTICORE_GPU_ID_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_MULTICORE_SYSTEM */
+#define ROGUE_CR_MULTICORE_SYSTEM 0xF308U
+#define ROGUE_CR_MULTICORE_SYSTEM_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT 0U
+#define ROGUE_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON */
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON 0xF310U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON */
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON 0xF320U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON */
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON 0xF330U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_ECC_RAM_ERR_INJ */
+#define ROGUE_CR_ECC_RAM_ERR_INJ 0xF340U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT 4U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN 0x00000010U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_SHIFT 3U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_EN 0x00000008U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT 2U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN 0x00000004U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT 1U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_EN 0x00000002U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_SHIFT 0U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_EN 0x00000001U
+
+/* Register ROGUE_CR_ECC_RAM_INIT_KICK */
+#define ROGUE_CR_ECC_RAM_INIT_KICK 0xF348U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_SHIFT 4U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_EN 0x00000010U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_SHIFT 3U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_EN 0x00000008U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_SHIFT 2U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_EN 0x00000004U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_SHIFT 1U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_EN 0x00000002U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_SHIFT 0U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_EN 0x00000001U
+
+/* Register ROGUE_CR_ECC_RAM_INIT_DONE */
+#define ROGUE_CR_ECC_RAM_INIT_DONE 0xF350U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_SHIFT 4U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_EN 0x00000010U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_SHIFT 3U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_EN 0x00000008U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_SHIFT 2U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_EN 0x00000004U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_SHIFT 1U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_EN 0x00000002U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_SHIFT 0U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_EN 0x00000001U
+
+/* Register ROGUE_CR_SAFETY_EVENT_ENABLE */
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE 0xF390U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_SAFETY_EVENT_STATUS */
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE 0xF398U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_SAFETY_EVENT_CLEAR */
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE 0xF3A0U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_MTS_SAFETY_EVENT_ENABLE */
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE 0xF3D8U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* clang-format on */
+
+#endif /* PVR_ROGUE_CR_DEFS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h
new file mode 100644
index 000000000000..46186b56effc
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_CR_DEFS_CLIENT_H
+#define PVR_ROGUE_CR_DEFS_CLIENT_H
+
+/* clang-format off */
+
+/*
+ * This register controls the anti-aliasing mode of the Tiling Co-Processor, independent control is
+ * provided in both X & Y axis.
+ * This register needs to be set based on the ISP Samples Per Pixel a core supports.
+ *
+ * When ISP Samples Per Pixel = 1:
+ * 2xmsaa is achieved by enabling Y - TE does AA on Y plane only
+ * 4xmsaa is achieved by enabling Y and X - TE does AA on X and Y plane
+ * 8xmsaa not supported by XE cores
+ *
+ * When ISP Samples Per Pixel = 2:
+ * 2xmsaa is achieved by enabling X2 - does not affect TE
+ * 4xmsaa is achieved by enabling Y and X2 - TE does AA on Y plane only
+ * 8xmsaa is achieved by enabling Y, X and X2 - TE does AA on X and Y plane
+ * 8xmsaa not supported by XE cores
+ *
+ * When ISP Samples Per Pixel = 4:
+ * 2xmsaa is achieved by enabling X2 - does not affect TE
+ * 4xmsaa is achieved by enabling Y2 and X2 - TE does AA on Y plane only
+ * 8xmsaa not supported by XE cores
+ */
+/* Register ROGUE_CR_TE_AA */
+#define ROGUE_CR_TE_AA 0x0C00U
+#define ROGUE_CR_TE_AA_MASKFULL 0x000000000000000Full
+/* Y2
+ * Indicates 4xmsaa when X2 and Y2 are set to 1. This does not affect TE and is only used within
+ * TPW.
+ */
+#define ROGUE_CR_TE_AA_Y2_SHIFT 3
+#define ROGUE_CR_TE_AA_Y2_CLRMSK 0xFFFFFFF7
+#define ROGUE_CR_TE_AA_Y2_EN 0x00000008
+/* Y
+ * Anti-Aliasing in Y Plane Enabled
+ */
+#define ROGUE_CR_TE_AA_Y_SHIFT 2
+#define ROGUE_CR_TE_AA_Y_CLRMSK 0xFFFFFFFB
+#define ROGUE_CR_TE_AA_Y_EN 0x00000004
+/* X
+ * Anti-Aliasing in X Plane Enabled
+ */
+#define ROGUE_CR_TE_AA_X_SHIFT 1
+#define ROGUE_CR_TE_AA_X_CLRMSK 0xFFFFFFFD
+#define ROGUE_CR_TE_AA_X_EN 0x00000002
+/* X2
+ * 2x Anti-Aliasing Enabled, affects PPP only
+ */
+#define ROGUE_CR_TE_AA_X2_SHIFT (0U)
+#define ROGUE_CR_TE_AA_X2_CLRMSK (0xFFFFFFFEU)
+#define ROGUE_CR_TE_AA_X2_EN (0x00000001U)
+
+/* MacroTile Boundaries X Plane */
+/* Register ROGUE_CR_TE_MTILE1 */
+#define ROGUE_CR_TE_MTILE1 0x0C08
+#define ROGUE_CR_TE_MTILE1_MASKFULL 0x0000000007FFFFFFull
+/* X1 default: 0x00000004
+ * X1 MacroTile boundary, left tile X for second column of macrotiles (16MT mode) - 32 pixels across
+ * tile
+ */
+#define ROGUE_CR_TE_MTILE1_X1_SHIFT 18
+#define ROGUE_CR_TE_MTILE1_X1_CLRMSK 0xF803FFFF
+/* X2 default: 0x00000008
+ * X2 MacroTile boundary, left tile X for third(16MT) column of macrotiles - 32 pixels across tile
+ */
+#define ROGUE_CR_TE_MTILE1_X2_SHIFT 9U
+#define ROGUE_CR_TE_MTILE1_X2_CLRMSK 0xFFFC01FF
+/* X3 default: 0x0000000c
+ * X3 MacroTile boundary, left tile X for fourth column of macrotiles (16MT) - 32 pixels across tile
+ */
+#define ROGUE_CR_TE_MTILE1_X3_SHIFT 0
+#define ROGUE_CR_TE_MTILE1_X3_CLRMSK 0xFFFFFE00
+
+/* MacroTile Boundaries Y Plane. */
+/* Register ROGUE_CR_TE_MTILE2 */
+#define ROGUE_CR_TE_MTILE2 0x0C10
+#define ROGUE_CR_TE_MTILE2_MASKFULL 0x0000000007FFFFFFull
+/* Y1 default: 0x00000004
+ * X1 MacroTile boundary, ltop tile Y for second column of macrotiles (16MT mode) - 32 pixels tile
+ * height
+ */
+#define ROGUE_CR_TE_MTILE2_Y1_SHIFT 18
+#define ROGUE_CR_TE_MTILE2_Y1_CLRMSK 0xF803FFFF
+/* Y2 default: 0x00000008
+ * X2 MacroTile boundary, top tile Y for third(16MT) column of macrotiles - 32 pixels tile height
+ */
+#define ROGUE_CR_TE_MTILE2_Y2_SHIFT 9
+#define ROGUE_CR_TE_MTILE2_Y2_CLRMSK 0xFFFC01FF
+/* Y3 default: 0x0000000c
+ * X3 MacroTile boundary, top tile Y for fourth column of macrotiles (16MT) - 32 pixels tile height
+ */
+#define ROGUE_CR_TE_MTILE2_Y3_SHIFT 0
+#define ROGUE_CR_TE_MTILE2_Y3_CLRMSK 0xFFFFFE00
+
+/*
+ * In order to perform the tiling operation and generate the display list the maximum screen size
+ * must be configured in terms of the number of tiles in X & Y axis.
+ */
+
+/* Register ROGUE_CR_TE_SCREEN */
+#define ROGUE_CR_TE_SCREEN 0x0C18U
+#define ROGUE_CR_TE_SCREEN_MASKFULL 0x00000000001FF1FFull
+/* YMAX default: 0x00000010
+ * Maximum Y tile address visible on screen, 32 pixel tile height, 16Kx16K max screen size
+ */
+#define ROGUE_CR_TE_SCREEN_YMAX_SHIFT 12
+#define ROGUE_CR_TE_SCREEN_YMAX_CLRMSK 0xFFE00FFF
+/* XMAX default: 0x00000010
+ * Maximum X tile address visible on screen, 32 pixel tile width, 16Kx16K max screen size
+ */
+#define ROGUE_CR_TE_SCREEN_XMAX_SHIFT 0
+#define ROGUE_CR_TE_SCREEN_XMAX_CLRMSK 0xFFFFFE00
+
+/*
+ * In order to perform the tiling operation and generate the display list the maximum screen size
+ * must be configured in terms of the number of pixels in X & Y axis since this may not be the same
+ * as the number of tiles defined in the RGX_CR_TE_SCREEN register.
+ */
+/* Register ROGUE_CR_PPP_SCREEN */
+#define ROGUE_CR_PPP_SCREEN 0x0C98
+#define ROGUE_CR_PPP_SCREEN_MASKFULL 0x000000007FFF7FFFull
+/* PIXYMAX
+ * Screen height in pixels. (16K x 16K max screen size)
+ */
+#define ROGUE_CR_PPP_SCREEN_PIXYMAX_SHIFT 16
+#define ROGUE_CR_PPP_SCREEN_PIXYMAX_CLRMSK 0x8000FFFF
+/* PIXXMAX
+ * Screen width in pixels.(16K x 16K max screen size)
+ */
+#define ROGUE_CR_PPP_SCREEN_PIXXMAX_SHIFT 0
+#define ROGUE_CR_PPP_SCREEN_PIXXMAX_CLRMSK 0xFFFF8000
+
+/* Register ROGUE_CR_ISP_MTILE_SIZE */
+#define ROGUE_CR_ISP_MTILE_SIZE 0x0F18
+#define ROGUE_CR_ISP_MTILE_SIZE_MASKFULL 0x0000000003FF03FFull
+/* X
+ * Macrotile width, in tiles. A value of zero corresponds to the maximum size
+ */
+#define ROGUE_CR_ISP_MTILE_SIZE_X_SHIFT 16
+#define ROGUE_CR_ISP_MTILE_SIZE_X_CLRMSK 0xFC00FFFF
+#define ROGUE_CR_ISP_MTILE_SIZE_X_ALIGNSHIFT 0
+#define ROGUE_CR_ISP_MTILE_SIZE_X_ALIGNSIZE 1
+/* Y
+ * Macrotile height, in tiles. A value of zero corresponds to the maximum size
+ */
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_SHIFT 0
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_CLRMSK 0xFFFFFC00
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_ALIGNSHIFT 0
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_ALIGNSIZE 1
+
+/* clang-format on */
+
+#endif /* PVR_ROGUE_CR_DEFS_CLIENT_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_defs.h
new file mode 100644
index 000000000000..932b01686008
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_defs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_DEFS_H
+#define PVR_ROGUE_DEFS_H
+
+#include "pvr_rogue_cr_defs.h"
+
+#include <linux/bits.h>
+
+/*
+ ******************************************************************************
+ * ROGUE Defines
+ ******************************************************************************
+ */
+
+#define ROGUE_FW_MAX_NUM_OS (8U)
+#define ROGUE_FW_HOST_OS (0U)
+#define ROGUE_FW_GUEST_OSID_START (1U)
+
+#define ROGUE_FW_THREAD_0 (0U)
+#define ROGUE_FW_THREAD_1 (1U)
+
+#define GET_ROGUE_CACHE_LINE_SIZE(x) ((((s32)(x)) > 0) ? ((x) / 8) : (0))
+
+#define MAX_HW_GEOM_FRAG_CONTEXTS 2U
+
+#define ROGUE_CR_CLK_CTRL_ALL_ON \
+ (0x5555555555555555ull & ROGUE_CR_CLK_CTRL_MASKFULL)
+#define ROGUE_CR_CLK_CTRL_ALL_AUTO \
+ (0xaaaaaaaaaaaaaaaaull & ROGUE_CR_CLK_CTRL_MASKFULL)
+#define ROGUE_CR_CLK_CTRL2_ALL_ON \
+ (0x5555555555555555ull & ROGUE_CR_CLK_CTRL2_MASKFULL)
+#define ROGUE_CR_CLK_CTRL2_ALL_AUTO \
+ (0xaaaaaaaaaaaaaaaaull & ROGUE_CR_CLK_CTRL2_MASKFULL)
+
+#define ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN \
+ (ROGUE_CR_SOFT_RESET_DUST_A_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_B_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_C_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_D_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_E_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_F_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_G_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_H_CORE_EN)
+
+/* SOFT_RESET Rascal and DUSTs bits */
+#define ROGUE_CR_SOFT_RESET_RASCALDUSTS_EN \
+ (ROGUE_CR_SOFT_RESET_RASCAL_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+/* SOFT_RESET steps as defined in the TRM */
+#define ROGUE_S7_SOFT_RESET_DUSTS (ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+#define ROGUE_S7_SOFT_RESET_JONES \
+ (ROGUE_CR_SOFT_RESET_PM_EN | ROGUE_CR_SOFT_RESET_VDM_EN | \
+ ROGUE_CR_SOFT_RESET_ISP_EN)
+
+#define ROGUE_S7_SOFT_RESET_JONES_ALL \
+ (ROGUE_S7_SOFT_RESET_JONES | ROGUE_CR_SOFT_RESET_BIF_EN | \
+ ROGUE_CR_SOFT_RESET_SLC_EN | ROGUE_CR_SOFT_RESET_GARTEN_EN)
+
+#define ROGUE_S7_SOFT_RESET2 \
+ (ROGUE_CR_SOFT_RESET2_BLACKPEARL_EN | ROGUE_CR_SOFT_RESET2_PIXEL_EN | \
+ ROGUE_CR_SOFT_RESET2_CDM_EN | ROGUE_CR_SOFT_RESET2_VERTEX_EN)
+
+#define ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U)
+#define ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE \
+ BIT(ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+
+#define ROGUE_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14U)
+#define ROGUE_BIF_PM_VIRTUAL_PAGE_SIZE BIT(ROGUE_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT)
+
+#define ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (16U)
+
+/*
+ * To get the number of required Dusts, divide the number of
+ * clusters by 2 and round up
+ */
+#define ROGUE_REQ_NUM_DUSTS(CLUSTERS) (((CLUSTERS) + 1U) / 2U)
+
+/*
+ * To get the number of required Bernado/Phantom(s), divide
+ * the number of clusters by 4 and round up
+ */
+#define ROGUE_REQ_NUM_PHANTOMS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+#define ROGUE_REQ_NUM_BERNADOS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+#define ROGUE_REQ_NUM_BLACKPEARLS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+
+/*
+ * FW MMU contexts
+ */
+#define MMU_CONTEXT_MAPPING_FWPRIV (0x0) /* FW code/private data */
+#define MMU_CONTEXT_MAPPING_FWIF (0x0) /* Host/FW data */
+
+/*
+ * Utility macros to calculate CAT_BASE register addresses
+ */
+#define BIF_CAT_BASEX(n) \
+ (ROGUE_CR_BIF_CAT_BASE0 + \
+ (n) * (ROGUE_CR_BIF_CAT_BASE1 - ROGUE_CR_BIF_CAT_BASE0))
+
+#define FWCORE_MEM_CAT_BASEX(n) \
+ (ROGUE_CR_FWCORE_MEM_CAT_BASE0 + \
+ (n) * (ROGUE_CR_FWCORE_MEM_CAT_BASE1 - \
+ ROGUE_CR_FWCORE_MEM_CAT_BASE0))
+
+/*
+ * FWCORE wrapper register defines
+ */
+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT \
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT
+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK \
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK
+#define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT (12U)
+
+#define ROGUE_MAX_COMPUTE_SHARED_REGISTERS (2 * 1024)
+#define ROGUE_MAX_VERTEX_SHARED_REGISTERS 1024
+#define ROGUE_MAX_PIXEL_SHARED_REGISTERS 1024
+#define ROGUE_CSRM_LINE_SIZE_IN_DWORDS (64 * 4 * 4)
+
+#define ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE 64
+#define ROGUE_CDMCTRL_USC_COMMON_SIZE_UPPER 256
+
+/*
+ * The maximum amount of local memory which can be allocated by a single kernel
+ * (in dwords/32-bit registers).
+ *
+ * ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE is in bytes so we divide by four.
+ */
+#define ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS ((ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE * \
+ ROGUE_CDMCTRL_USC_COMMON_SIZE_UPPER) >> 2)
+
+/*
+ ******************************************************************************
+ * WA HWBRNs
+ ******************************************************************************
+ */
+
+/* GPU CR timer tick in GPU cycles */
+#define ROGUE_CRTIME_TICK_IN_CYCLES (256U)
+
+/* for nohw multicore return max cores possible to client */
+#define ROGUE_MULTICORE_MAX_NOHW_CORES (4U)
+
+/*
+ * If the size of the SLC is less than this value then the TPU bypasses the SLC.
+ */
+#define ROGUE_TPU_CACHED_SLC_SIZE_THRESHOLD (128U * 1024U)
+
+/*
+ * If the size of the SLC is bigger than this value then the TCU must not be
+ * bypassed in the SLC.
+ * In XE_MEMORY_HIERARCHY cores, the TCU is bypassed by default.
+ */
+#define ROGUE_TCU_CACHED_SLC_SIZE_THRESHOLD (32U * 1024U)
+
+/*
+ * Register used by the FW to track the current boot stage (not used in MIPS)
+ */
+#define ROGUE_FW_BOOT_STAGE_REGISTER (ROGUE_CR_POWER_ESTIMATE_RESULT)
+
+/*
+ * Virtualisation definitions
+ */
+#define ROGUE_VIRTUALISATION_REG_SIZE_PER_OS \
+ (ROGUE_CR_MTS_SCHEDULE1 - ROGUE_CR_MTS_SCHEDULE)
+
+/*
+ * Macro used to indicate which version of HWPerf is active
+ */
+#define ROGUE_FEATURE_HWPERF_ROGUE
+
+/*
+ * Maximum number of cores supported by TRP
+ */
+#define ROGUE_TRP_MAX_NUM_CORES (4U)
+
+#endif /* PVR_ROGUE_DEFS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif.h
new file mode 100644
index 000000000000..172886be4c82
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif.h
@@ -0,0 +1,2188 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_H
+#define PVR_ROGUE_FWIF_H
+
+#include <linux/bits.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_defs.h"
+#include "pvr_rogue_fwif_common.h"
+#include "pvr_rogue_fwif_shared.h"
+
+/*
+ ****************************************************************************
+ * Logging type
+ ****************************************************************************
+ */
+#define ROGUE_FWIF_LOG_TYPE_NONE 0x00000000U
+#define ROGUE_FWIF_LOG_TYPE_TRACE 0x00000001U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MAIN 0x00000002U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MTS 0x00000004U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_CSW 0x00000010U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_BIF 0x00000020U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_PM 0x00000040U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_RTD 0x00000080U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_SPM 0x00000100U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_POW 0x00000200U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_HWR 0x00000400U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_HWP 0x00000800U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_RPM 0x00001000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_DMA 0x00002000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MISC 0x00004000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU
+#define ROGUE_FWIF_LOG_TYPE_MASK 0x80007FFFU
+
+/* String used in pvrdebug -h output */
+#define ROGUE_FWIF_LOG_GROUPS_STRING_LIST \
+ "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug"
+
+/* Table entry to map log group strings to log type value */
+struct rogue_fwif_log_group_map_entry {
+ const char *log_group_name;
+ u32 log_group_type;
+};
+
+/*
+ ****************************************************************************
+ * ROGUE FW signature checks
+ ****************************************************************************
+ */
+#define ROGUE_FW_SIG_BUFFER_SIZE_MIN (8192)
+
+#define ROGUE_FWIF_TIMEDIFF_ID ((0x1UL << 28) | ROGUE_CR_TIMER)
+
+/*
+ ****************************************************************************
+ * Trace Buffer
+ ****************************************************************************
+ */
+
+/* Default size of ROGUE_FWIF_TRACEBUF_SPACE in DWords */
+#define ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U
+#define ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE 200U
+#define ROGUE_FW_THREAD_NUM 1U
+#define ROGUE_FW_THREAD_MAX 2U
+
+#define ROGUE_FW_POLL_TYPE_SET 0x80000000U
+
+struct rogue_fwif_file_info_buf {
+ char path[ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE];
+ char info[ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE];
+ u32 line_num;
+ u32 padding;
+} __aligned(8);
+
+struct rogue_fwif_tracebuf_space {
+ u32 trace_pointer;
+
+ u32 trace_buffer_fw_addr;
+
+ /* To be used by host when reading from trace buffer */
+ u32 *trace_buffer;
+
+ struct rogue_fwif_file_info_buf assert_buf;
+} __aligned(8);
+
+/* Total number of FW fault logs stored */
+#define ROGUE_FWIF_FWFAULTINFO_MAX (8U)
+
+struct rogue_fw_fault_info {
+ aligned_u64 cr_timer;
+ aligned_u64 os_timer;
+
+ u32 data __aligned(8);
+ u32 reserved;
+ struct rogue_fwif_file_info_buf fault_buf;
+} __aligned(8);
+
+enum rogue_fwif_pow_state {
+ ROGUE_FWIF_POW_OFF, /* idle and ready to full power down */
+ ROGUE_FWIF_POW_ON, /* running HW commands */
+ ROGUE_FWIF_POW_FORCED_IDLE, /* forced idle */
+ ROGUE_FWIF_POW_IDLE, /* idle waiting for host handshake */
+};
+
+/* Firmware HWR states */
+/* The HW state is ok or locked up */
+#define ROGUE_FWIF_HWR_HARDWARE_OK BIT(0)
+/* Tells if a HWR reset is in progress */
+#define ROGUE_FWIF_HWR_RESET_IN_PROGRESS BIT(1)
+/* A DM unrelated lockup has been detected */
+#define ROGUE_FWIF_HWR_GENERAL_LOCKUP BIT(3)
+/* At least one DM is running without being close to a lockup */
+#define ROGUE_FWIF_HWR_DM_RUNNING_OK BIT(4)
+/* At least one DM is close to lockup */
+#define ROGUE_FWIF_HWR_DM_STALLING BIT(5)
+/* The FW has faulted and needs to restart */
+#define ROGUE_FWIF_HWR_FW_FAULT BIT(6)
+/* The FW has requested the host to restart it */
+#define ROGUE_FWIF_HWR_RESTART_REQUESTED BIT(7)
+
+#define ROGUE_FWIF_PHR_STATE_SHIFT (8U)
+/* The FW has requested the host to restart it, per PHR configuration */
+#define ROGUE_FWIF_PHR_RESTART_REQUESTED ((1) << ROGUE_FWIF_PHR_STATE_SHIFT)
+/* A PHR triggered GPU reset has just finished */
+#define ROGUE_FWIF_PHR_RESTART_FINISHED ((2) << ROGUE_FWIF_PHR_STATE_SHIFT)
+#define ROGUE_FWIF_PHR_RESTART_MASK \
+ (ROGUE_FWIF_PHR_RESTART_REQUESTED | ROGUE_FWIF_PHR_RESTART_FINISHED)
+
+#define ROGUE_FWIF_PHR_MODE_OFF (0UL)
+#define ROGUE_FWIF_PHR_MODE_RD_RESET (1UL)
+#define ROGUE_FWIF_PHR_MODE_FULL_RESET (2UL)
+
+/* Firmware per-DM HWR states */
+/* DM is working if all flags are cleared */
+#define ROGUE_FWIF_DM_STATE_WORKING (0)
+/* DM is idle and ready for HWR */
+#define ROGUE_FWIF_DM_STATE_READY_FOR_HWR BIT(0)
+/* DM need to skip to next cmd before resuming processing */
+#define ROGUE_FWIF_DM_STATE_NEEDS_SKIP BIT(2)
+/* DM need partial render cleanup before resuming processing */
+#define ROGUE_FWIF_DM_STATE_NEEDS_PR_CLEANUP BIT(3)
+/* DM need to increment Recovery Count once fully recovered */
+#define ROGUE_FWIF_DM_STATE_NEEDS_TRACE_CLEAR BIT(4)
+/* DM was identified as locking up and causing HWR */
+#define ROGUE_FWIF_DM_STATE_GUILTY_LOCKUP BIT(5)
+/* DM was innocently affected by another lockup which caused HWR */
+#define ROGUE_FWIF_DM_STATE_INNOCENT_LOCKUP BIT(6)
+/* DM was identified as over-running and causing HWR */
+#define ROGUE_FWIF_DM_STATE_GUILTY_OVERRUNING BIT(7)
+/* DM was innocently affected by another DM over-running which caused HWR */
+#define ROGUE_FWIF_DM_STATE_INNOCENT_OVERRUNING BIT(8)
+/* DM was forced into HWR as it delayed more important workloads */
+#define ROGUE_FWIF_DM_STATE_HARD_CONTEXT_SWITCH BIT(9)
+/* DM was forced into HWR due to an uncorrected GPU ECC error */
+#define ROGUE_FWIF_DM_STATE_GPU_ECC_HWR BIT(10)
+
+/* Firmware's connection state */
+enum rogue_fwif_connection_fw_state {
+ /* Firmware is offline */
+ ROGUE_FW_CONNECTION_FW_OFFLINE = 0,
+ /* Firmware is initialised */
+ ROGUE_FW_CONNECTION_FW_READY,
+ /* Firmware connection is fully established */
+ ROGUE_FW_CONNECTION_FW_ACTIVE,
+ /* Firmware is clearing up connection data*/
+ ROGUE_FW_CONNECTION_FW_OFFLOADING,
+ ROGUE_FW_CONNECTION_FW_STATE_COUNT
+};
+
+/* OS' connection state */
+enum rogue_fwif_connection_os_state {
+ /* OS is offline */
+ ROGUE_FW_CONNECTION_OS_OFFLINE = 0,
+ /* OS's KM driver is setup and waiting */
+ ROGUE_FW_CONNECTION_OS_READY,
+ /* OS connection is fully established */
+ ROGUE_FW_CONNECTION_OS_ACTIVE,
+ ROGUE_FW_CONNECTION_OS_STATE_COUNT
+};
+
+struct rogue_fwif_os_runtime_flags {
+ unsigned int os_state : 3;
+ unsigned int fl_ok : 1;
+ unsigned int fl_grow_pending : 1;
+ unsigned int isolated_os : 1;
+ unsigned int reserved : 26;
+};
+
+#define PVR_SLR_LOG_ENTRIES 10
+/* MAX_CLIENT_CCB_NAME not visible to this header */
+#define PVR_SLR_LOG_STRLEN 30
+
+struct rogue_fwif_slr_entry {
+ aligned_u64 timestamp;
+ u32 fw_ctx_addr;
+ u32 num_ufos;
+ char ccb_name[PVR_SLR_LOG_STRLEN];
+ char padding[2];
+} __aligned(8);
+
+#define MAX_THREAD_NUM 2
+
+/* firmware trace control data */
+struct rogue_fwif_tracebuf {
+ u32 log_type;
+ struct rogue_fwif_tracebuf_space tracebuf[MAX_THREAD_NUM];
+ /*
+ * Member initialised only when sTraceBuf is actually allocated (in
+ * ROGUETraceBufferInitOnDemandResources)
+ */
+ u32 tracebuf_size_in_dwords;
+ /* Compatibility and other flags */
+ u32 tracebuf_flags;
+} __aligned(8);
+
+/* firmware system data shared with the Host driver */
+struct rogue_fwif_sysdata {
+ /* Configuration flags from host */
+ u32 config_flags;
+ /* Extended configuration flags from host */
+ u32 config_flags_ext;
+ enum rogue_fwif_pow_state pow_state;
+ u32 hw_perf_ridx;
+ u32 hw_perf_widx;
+ u32 hw_perf_wrap_count;
+ /* Constant after setup, needed in FW */
+ u32 hw_perf_size;
+ /* The number of times the FW drops a packet due to buffer full */
+ u32 hw_perf_drop_count;
+
+ /*
+ * ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid
+ * when FW is built with ROGUE_HWPERF_UTILIZATION &
+ * ROGUE_HWPERF_DROP_TRACKING defined in rogue_fw_hwperf.c
+ */
+ /* Buffer utilisation, high watermark of bytes in use */
+ u32 hw_perf_ut;
+ /* The ordinal of the first packet the FW dropped */
+ u32 first_drop_ordinal;
+ /* The ordinal of the last packet the FW dropped */
+ u32 last_drop_ordinal;
+ /* State flags for each Operating System mirrored from Fw coremem */
+ struct rogue_fwif_os_runtime_flags
+ os_runtime_flags_mirror[ROGUE_FW_MAX_NUM_OS];
+
+ struct rogue_fw_fault_info fault_info[ROGUE_FWIF_FWFAULTINFO_MAX];
+ u32 fw_faults;
+ u32 cr_poll_addr[MAX_THREAD_NUM];
+ u32 cr_poll_mask[MAX_THREAD_NUM];
+ u32 cr_poll_count[MAX_THREAD_NUM];
+ aligned_u64 start_idle_time;
+
+#if defined(SUPPORT_ROGUE_FW_STATS_FRAMEWORK)
+# define ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE (8)
+# define ROGUE_FWIF_STATS_FRAMEWORK_MAX \
+ (2048 * ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE)
+ u32 fw_stats_buf[ROGUE_FWIF_STATS_FRAMEWORK_MAX] __aligned(8);
+#endif
+ u32 hwr_state_flags;
+ u32 hwr_recovery_flags[PVR_FWIF_DM_MAX];
+ /* Compatibility and other flags */
+ u32 fw_sys_data_flags;
+ /* Identify whether MC config is P-P or P-S */
+ u32 mc_config;
+} __aligned(8);
+
+/* per-os firmware shared data */
+struct rogue_fwif_osdata {
+ /* Configuration flags from an OS */
+ u32 fw_os_config_flags;
+ /* Markers to signal that the host should perform a full sync check */
+ u32 fw_sync_check_mark;
+ u32 host_sync_check_mark;
+
+ u32 forced_updates_requested;
+ u8 slr_log_wp;
+ struct rogue_fwif_slr_entry slr_log_first;
+ struct rogue_fwif_slr_entry slr_log[PVR_SLR_LOG_ENTRIES];
+ aligned_u64 last_forced_update_time;
+
+ /* Interrupt count from Threads > */
+ u32 interrupt_count[MAX_THREAD_NUM];
+ u32 kccb_cmds_executed;
+ u32 power_sync_fw_addr;
+ /* Compatibility and other flags */
+ u32 fw_os_data_flags;
+ u32 padding;
+} __aligned(8);
+
+/* Firmware trace time-stamp field breakup */
+
+/* ROGUE_CR_TIMER register read (48 bits) value*/
+#define ROGUE_FWT_TIMESTAMP_TIME_SHIFT (0U)
+#define ROGUE_FWT_TIMESTAMP_TIME_CLRMSK (0xFFFF000000000000ull)
+
+/* Extra debug-info (16 bits) */
+#define ROGUE_FWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U)
+#define ROGUE_FWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK
+
+/* Debug-info sub-fields */
+/*
+ * Bit 0: ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from ROGUE_CR_EVENT_STATUS
+ * register
+ */
+#define ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U)
+#define ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SET \
+ BIT(ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT)
+
+/* Bit 1: ROGUE_CR_BIF_MMU_ENTRY_PENDING bit from ROGUE_CR_BIF_MMU_ENTRY register */
+#define ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U)
+#define ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET \
+ BIT(ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT)
+
+/* Bit 2: ROGUE_CR_SLAVE_EVENT register is non-zero */
+#define ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U)
+#define ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SET \
+ BIT(ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT)
+
+/* Bit 3-15: Unused bits */
+
+#define ROGUE_FWT_DEBUG_INFO_STR_MAXLEN 64
+#define ROGUE_FWT_DEBUG_INFO_STR_PREPEND " (debug info: "
+#define ROGUE_FWT_DEBUG_INFO_STR_APPEND ")"
+
+/*
+ ******************************************************************************
+ * HWR Data
+ ******************************************************************************
+ */
+enum rogue_hwrtype {
+ ROGUE_HWRTYPE_UNKNOWNFAILURE = 0,
+ ROGUE_HWRTYPE_OVERRUN = 1,
+ ROGUE_HWRTYPE_POLLFAILURE = 2,
+ ROGUE_HWRTYPE_BIF0FAULT = 3,
+ ROGUE_HWRTYPE_BIF1FAULT = 4,
+ ROGUE_HWRTYPE_TEXASBIF0FAULT = 5,
+ ROGUE_HWRTYPE_MMUFAULT = 6,
+ ROGUE_HWRTYPE_MMUMETAFAULT = 7,
+ ROGUE_HWRTYPE_MIPSTLBFAULT = 8,
+ ROGUE_HWRTYPE_ECCFAULT = 9,
+ ROGUE_HWRTYPE_MMURISCVFAULT = 10,
+};
+
+#define ROGUE_FWIF_HWRTYPE_BIF_BANK_GET(hwr_type) \
+ (((hwr_type) == ROGUE_HWRTYPE_BIF0FAULT) ? 0 : 1)
+
+#define ROGUE_FWIF_HWRTYPE_PAGE_FAULT_GET(hwr_type) \
+ ((((hwr_type) == ROGUE_HWRTYPE_BIF0FAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_BIF1FAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_TEXASBIF0FAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_MMUFAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_MMUMETAFAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_MIPSTLBFAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_MMURISCVFAULT)) \
+ ? true \
+ : false)
+
+struct rogue_bifinfo {
+ aligned_u64 bif_req_status;
+ aligned_u64 bif_mmu_status;
+ aligned_u64 pc_address; /* phys address of the page catalogue */
+ aligned_u64 reserved;
+};
+
+struct rogue_eccinfo {
+ u32 fault_gpu;
+};
+
+struct rogue_mmuinfo {
+ aligned_u64 mmu_status[2];
+ aligned_u64 pc_address; /* phys address of the page catalogue */
+ aligned_u64 reserved;
+};
+
+struct rogue_pollinfo {
+ u32 thread_num;
+ u32 cr_poll_addr;
+ u32 cr_poll_mask;
+ u32 cr_poll_last_value;
+ aligned_u64 reserved;
+} __aligned(8);
+
+struct rogue_tlbinfo {
+ u32 bad_addr;
+ u32 entry_lo;
+};
+
+struct rogue_hwrinfo {
+ union {
+ struct rogue_bifinfo bif_info;
+ struct rogue_mmuinfo mmu_info;
+ struct rogue_pollinfo poll_info;
+ struct rogue_tlbinfo tlb_info;
+ struct rogue_eccinfo ecc_info;
+ } hwr_data;
+
+ aligned_u64 cr_timer;
+ aligned_u64 os_timer;
+ u32 frame_num;
+ u32 pid;
+ u32 active_hwrt_data;
+ u32 hwr_number;
+ u32 event_status;
+ u32 hwr_recovery_flags;
+ enum rogue_hwrtype hwr_type;
+ u32 dm;
+ u32 core_id;
+ aligned_u64 cr_time_of_kick;
+ aligned_u64 cr_time_hw_reset_start;
+ aligned_u64 cr_time_hw_reset_finish;
+ aligned_u64 cr_time_freelist_ready;
+ aligned_u64 reserved[2];
+} __aligned(8);
+
+/* Number of first HWR logs recorded (never overwritten by newer logs) */
+#define ROGUE_FWIF_HWINFO_MAX_FIRST 8U
+/* Number of latest HWR logs (older logs are overwritten by newer logs) */
+#define ROGUE_FWIF_HWINFO_MAX_LAST 8U
+/* Total number of HWR logs stored in a buffer */
+#define ROGUE_FWIF_HWINFO_MAX \
+ (ROGUE_FWIF_HWINFO_MAX_FIRST + ROGUE_FWIF_HWINFO_MAX_LAST)
+/* Index of the last log in the HWR log buffer */
+#define ROGUE_FWIF_HWINFO_LAST_INDEX (ROGUE_FWIF_HWINFO_MAX - 1U)
+
+struct rogue_fwif_hwrinfobuf {
+ struct rogue_hwrinfo hwr_info[ROGUE_FWIF_HWINFO_MAX];
+ u32 hwr_counter;
+ u32 write_index;
+ u32 dd_req_count;
+ u32 hwr_info_buf_flags; /* Compatibility and other flags */
+ u32 hwr_dm_locked_up_count[PVR_FWIF_DM_MAX];
+ u32 hwr_dm_overran_count[PVR_FWIF_DM_MAX];
+ u32 hwr_dm_recovered_count[PVR_FWIF_DM_MAX];
+ u32 hwr_dm_false_detect_count[PVR_FWIF_DM_MAX];
+} __aligned(8);
+
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_FAST_EN (1)
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_MEDIUM_EN (2)
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_SLOW_EN (3)
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_NODELAY_EN (4)
+
+#define ROGUE_FWIF_CDM_ARBITRATION_TASK_DEMAND_EN (1)
+#define ROGUE_FWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (2)
+
+#define ROGUE_FWIF_ISP_SCHEDMODE_VER1_IPP (1)
+#define ROGUE_FWIF_ISP_SCHEDMODE_VER2_ISP (2)
+/*
+ ******************************************************************************
+ * ROGUE firmware Init Config Data
+ ******************************************************************************
+ */
+
+/* Flag definitions affecting the firmware globally */
+#define ROGUE_FWIF_INICFG_CTXSWITCH_MODE_RAND BIT(0)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_SRESET_EN BIT(1)
+#define ROGUE_FWIF_INICFG_HWPERF_EN BIT(2)
+#define ROGUE_FWIF_INICFG_DM_KILL_MODE_RAND_EN BIT(3)
+#define ROGUE_FWIF_INICFG_POW_RASCALDUST BIT(4)
+/* Bit 5 is reserved. */
+#define ROGUE_FWIF_INICFG_FBCDC_V3_1_EN BIT(6)
+#define ROGUE_FWIF_INICFG_CHECK_MLIST_EN BIT(7)
+#define ROGUE_FWIF_INICFG_DISABLE_CLKGATING_EN BIT(8)
+/* Bit 9 is reserved. */
+/* Bit 10 is reserved. */
+/* Bit 11 is reserved. */
+#define ROGUE_FWIF_INICFG_REGCONFIG_EN BIT(12)
+#define ROGUE_FWIF_INICFG_ASSERT_ON_OUTOFMEMORY BIT(13)
+#define ROGUE_FWIF_INICFG_HWP_DISABLE_FILTER BIT(14)
+/* Bit 15 is reserved. */
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_FAST \
+ (ROGUE_FWIF_CTXSWITCH_PROFILE_FAST_EN \
+ << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM \
+ (ROGUE_FWIF_CTXSWITCH_PROFILE_MEDIUM_EN \
+ << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SLOW \
+ (ROGUE_FWIF_CTXSWITCH_PROFILE_SLOW_EN \
+ << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_NODELAY \
+ (ROGUE_FWIF_CTXSWITCH_PROFILE_NODELAY_EN \
+ << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_MASK \
+ (7 << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP BIT(19)
+#define ROGUE_FWIF_INICFG_ASSERT_ON_HWR_TRIGGER BIT(20)
+#define ROGUE_FWIF_INICFG_FABRIC_COHERENCY_ENABLED BIT(21)
+#define ROGUE_FWIF_INICFG_VALIDATE_IRQ BIT(22)
+#define ROGUE_FWIF_INICFG_DISABLE_PDP_EN BIT(23)
+#define ROGUE_FWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN BIT(24)
+#define ROGUE_FWIF_INICFG_WORKEST BIT(25)
+#define ROGUE_FWIF_INICFG_PDVFS BIT(26)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT (27)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND \
+ (ROGUE_FWIF_CDM_ARBITRATION_TASK_DEMAND_EN \
+ << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN \
+ (ROGUE_FWIF_CDM_ARBITRATION_ROUND_ROBIN_EN \
+ << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_MASK \
+ (3 << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT (29)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_NONE (0)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER1_IPP \
+ (ROGUE_FWIF_ISP_SCHEDMODE_VER1_IPP \
+ << ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER2_ISP \
+ (ROGUE_FWIF_ISP_SCHEDMODE_VER2_ISP \
+ << ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_MASK \
+ (ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER1_IPP | \
+ ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER2_ISP)
+#define ROGUE_FWIF_INICFG_VALIDATE_SOCUSC_TIMER BIT(31)
+
+#define ROGUE_FWIF_INICFG_ALL (0xFFFFFFFFU)
+
+/* Extended Flag definitions affecting the firmware globally */
+#define ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_SHIFT (0)
+/* [7] YUV10 override
+ * [6:4] Quality
+ * [3] Quality enable
+ * [2:1] Compression scheme
+ * [0] Lossy group
+ */
+#define ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_MASK (0xFF)
+#define ROGUE_FWIF_INICFG_EXT_ALL (ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_MASK)
+
+/* Flag definitions affecting only workloads submitted by a particular OS */
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN BIT(0)
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN BIT(1)
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN BIT(2)
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN BIT(3)
+
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_TDM BIT(4)
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_GEOM BIT(5)
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_FRAG BIT(6)
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_CDM BIT(7)
+
+#define ROGUE_FWIF_INICFG_OS_ALL (0xFF)
+
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_DM_ALL \
+ (ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN | \
+ ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \
+ ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN | \
+ ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN)
+
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CLRMSK \
+ ~(ROGUE_FWIF_INICFG_OS_CTXSWITCH_DM_ALL)
+
+#define ROGUE_FWIF_FILTCFG_TRUNCATE_HALF BIT(3)
+#define ROGUE_FWIF_FILTCFG_TRUNCATE_INT BIT(2)
+#define ROGUE_FWIF_FILTCFG_NEW_FILTER_MODE BIT(1)
+
+enum rogue_activepm_conf {
+ ROGUE_ACTIVEPM_FORCE_OFF = 0,
+ ROGUE_ACTIVEPM_FORCE_ON = 1,
+ ROGUE_ACTIVEPM_DEFAULT = 2
+};
+
+enum rogue_rd_power_island_conf {
+ ROGUE_RD_POWER_ISLAND_FORCE_OFF = 0,
+ ROGUE_RD_POWER_ISLAND_FORCE_ON = 1,
+ ROGUE_RD_POWER_ISLAND_DEFAULT = 2
+};
+
+struct rogue_fw_register_list {
+ /* Register number */
+ u16 reg_num;
+ /* Indirect register number (or 0 if not used) */
+ u16 indirect_reg_num;
+ /* Start value for indirect register */
+ u16 indirect_start_val;
+ /* End value for indirect register */
+ u16 indirect_end_val;
+};
+
+struct rogue_fwif_dllist_node {
+ u32 p;
+ u32 n;
+};
+
+/*
+ * This number is used to represent an invalid page catalogue physical address
+ */
+#define ROGUE_FWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
+
+/* This number is used to represent unallocated page catalog base register */
+#define ROGUE_FW_BIF_INVALID_PCSET 0xFFFFFFFFU
+
+/* Firmware memory context. */
+struct rogue_fwif_fwmemcontext {
+ /* device physical address of context's page catalogue */
+ aligned_u64 pc_dev_paddr;
+ /*
+ * associated page catalog base register (ROGUE_FW_BIF_INVALID_PCSET ==
+ * unallocated)
+ */
+ u32 page_cat_base_reg_set;
+ /* breakpoint address */
+ u32 breakpoint_addr;
+ /* breakpoint handler address */
+ u32 bp_handler_addr;
+ /* DM and enable control for BP */
+ u32 breakpoint_ctl;
+ /* Compatibility and other flags */
+ u32 fw_mem_ctx_flags;
+ u32 padding;
+} __aligned(8);
+
+/*
+ * FW context state flags
+ */
+#define ROGUE_FWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U)
+#define ROGUE_FWIF_CONTEXT_FLAGS_MC_NEED_RESUME_MASKFULL (0x000000FFU)
+#define ROGUE_FWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000100U)
+#define ROGUE_FWIF_CONTEXT_FLAGS_LAST_KICK_SECURE (0x00000200U)
+
+#define ROGUE_NUM_GEOM_CORES_MAX 4
+
+/*
+ * FW-accessible TA state which must be written out to memory on context store
+ */
+struct rogue_fwif_geom_ctx_state_per_geom {
+ /* To store in mid-TA */
+ aligned_u64 geom_reg_vdm_call_stack_pointer;
+ /* Initial value (in case is 'lost' due to a lock-up */
+ aligned_u64 geom_reg_vdm_call_stack_pointer_init;
+ u32 geom_reg_vbs_so_prim[4];
+ u16 geom_current_idx;
+ u16 padding[3];
+} __aligned(8);
+
+struct rogue_fwif_geom_ctx_state {
+ /* FW-accessible TA state which must be written out to memory on context store */
+ struct rogue_fwif_geom_ctx_state_per_geom geom_core[ROGUE_NUM_GEOM_CORES_MAX];
+} __aligned(8);
+
+/*
+ * FW-accessible ISP state which must be written out to memory on context store
+ */
+struct rogue_fwif_frag_ctx_state {
+ u32 frag_reg_pm_deallocated_mask_status;
+ u32 frag_reg_dm_pds_mtilefree_status;
+ /* Compatibility and other flags */
+ u32 ctx_state_flags;
+ /*
+ * frag_reg_isp_store should be the last element of the structure as this
+ * is an array whose size is determined at runtime after detecting the
+ * ROGUE core
+ */
+ u32 frag_reg_isp_store[];
+} __aligned(8);
+
+#define ROGUE_FWIF_CTX_USING_BUFFER_A (0)
+#define ROGUE_FWIF_CTX_USING_BUFFER_B (1U)
+
+struct rogue_fwif_compute_ctx_state {
+ u32 ctx_state_flags; /* Target buffer and other flags */
+};
+
+struct rogue_fwif_fwcommoncontext {
+ /* CCB details for this firmware context */
+ u32 ccbctl_fw_addr; /* CCB control */
+ u32 ccb_fw_addr; /* CCB base */
+ struct rogue_fwif_dma_addr ccb_meta_dma_addr;
+
+ /* Context suspend state */
+ /* geom/frag context suspend state, read/written by FW */
+ u32 context_state_addr __aligned(8);
+
+ /* Flags e.g. for context switching */
+ u32 fw_com_ctx_flags;
+ u32 priority;
+ u32 priority_seq_num;
+
+ /* Framework state */
+ /* Register updates for Framework */
+ u32 rf_cmd_addr __aligned(8);
+
+ /* Statistic updates waiting to be passed back to the host... */
+ /* True when some stats are pending */
+ bool stats_pending __aligned(4);
+ /* Number of stores on this context since last update */
+ s32 stats_num_stores;
+ /* Number of OOMs on this context since last update */
+ s32 stats_num_out_of_memory;
+ /* Number of PRs on this context since last update */
+ s32 stats_num_partial_renders;
+ /* Data Master type */
+ u32 dm;
+ /* Device Virtual Address of the signal the context is waiting on */
+ aligned_u64 wait_signal_address;
+ /* List entry for the wait-signal list */
+ struct rogue_fwif_dllist_node wait_signal_node __aligned(8);
+ /* List entry for the buffer stalled list */
+ struct rogue_fwif_dllist_node buf_stalled_node __aligned(8);
+ /* Address of the circular buffer queue pointers */
+ aligned_u64 cbuf_queue_ctrl_addr;
+
+ aligned_u64 robustness_address;
+ /* Max HWR deadline limit in ms */
+ u32 max_deadline_ms;
+ /* Following HWR circular buffer read-offset needs resetting */
+ bool read_offset_needs_reset;
+
+ /* List entry for the waiting list */
+ struct rogue_fwif_dllist_node waiting_node __aligned(8);
+ /* List entry for the run list */
+ struct rogue_fwif_dllist_node run_node __aligned(8);
+ /* UFO that last failed (or NULL) */
+ struct rogue_fwif_ufo last_failed_ufo;
+
+ /* Memory context */
+ u32 fw_mem_context_fw_addr;
+
+ /* References to the host side originators */
+ /* the Server Common Context */
+ u32 server_common_context_id;
+ /* associated process ID */
+ u32 pid;
+
+ /* True when Geom DM OOM is not allowed */
+ bool geom_oom_disabled __aligned(4);
+} __aligned(8);
+
+/* Firmware render context. */
+struct rogue_fwif_fwrendercontext {
+ /* Geometry firmware context. */
+ struct rogue_fwif_fwcommoncontext geom_context;
+ /* Fragment firmware context. */
+ struct rogue_fwif_fwcommoncontext frag_context;
+
+ struct rogue_fwif_static_rendercontext_state static_render_context_state;
+
+ /* Number of commands submitted to the WorkEst FW CCB */
+ u32 work_est_ccb_submitted;
+
+ /* Compatibility and other flags */
+ u32 fw_render_ctx_flags;
+} __aligned(8);
+
+/* Firmware compute context. */
+struct rogue_fwif_fwcomputecontext {
+ /* Firmware context for the CDM */
+ struct rogue_fwif_fwcommoncontext cdm_context;
+
+ struct rogue_fwif_static_computecontext_state
+ static_compute_context_state;
+
+ /* Number of commands submitted to the WorkEst FW CCB */
+ u32 work_est_ccb_submitted;
+
+ /* Compatibility and other flags */
+ u32 compute_ctx_flags;
+
+ u32 wgp_state;
+ u32 wgp_checksum;
+ u32 core_mask_a;
+ u32 core_mask_b;
+} __aligned(8);
+
+/* Firmware TDM context. */
+struct rogue_fwif_fwtdmcontext {
+ /* Firmware context for the TDM */
+ struct rogue_fwif_fwcommoncontext tdm_context;
+
+ /* Number of commands submitted to the WorkEst FW CCB */
+ u32 work_est_ccb_submitted;
+} __aligned(8);
+
+/* Firmware TQ3D context. */
+struct rogue_fwif_fwtransfercontext {
+ /* Firmware context for TQ3D. */
+ struct rogue_fwif_fwcommoncontext tq_context;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Defines for CMD_TYPE corruption detection and forward compatibility check
+ ******************************************************************************
+ */
+
+/*
+ * CMD_TYPE 32bit contains:
+ * 31:16 Reserved for magic value to detect corruption (16 bits)
+ * 15 Reserved for ROGUE_CCB_TYPE_TASK (1 bit)
+ * 14:0 Bits available for CMD_TYPEs (15 bits)
+ */
+
+/* Magic value to detect corruption */
+#define ROGUE_CMD_MAGIC_DWORD (0x2ABC)
+#define ROGUE_CMD_MAGIC_DWORD_MASK (0xFFFF0000U)
+#define ROGUE_CMD_MAGIC_DWORD_SHIFT (16U)
+#define ROGUE_CMD_MAGIC_DWORD_SHIFTED \
+ (ROGUE_CMD_MAGIC_DWORD << ROGUE_CMD_MAGIC_DWORD_SHIFT)
+
+/* Kernel CCB control for ROGUE */
+struct rogue_fwif_ccb_ctl {
+ /* write offset into array of commands (MUST be aligned to 16 bytes!) */
+ u32 write_offset;
+ /* Padding to ensure read and write offsets are in separate cache lines. */
+ u8 padding[128 - sizeof(u32)];
+ /* read offset into array of commands */
+ u32 read_offset;
+ /* Offset wrapping mask (Total capacity of the CCB - 1) */
+ u32 wrap_mask;
+ /* size of each command in bytes */
+ u32 cmd_size;
+ u32 padding2;
+} __aligned(8);
+
+/* Kernel CCB command structure for ROGUE */
+
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */
+
+/*
+ * can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT
+ * bit from BIF_CTRL reg
+ */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10)
+/* BIF_CTRL_INVAL_TLB1_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_TLB \
+ (ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8)
+/* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800)
+
+/* indicates FW should interrupt the host */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U)
+
+struct rogue_fwif_mmucachedata {
+ u32 cache_flags;
+ u32 mmu_cache_sync_fw_addr;
+ u32 mmu_cache_sync_update_value;
+};
+
+#define ROGUE_FWIF_BPDATA_FLAGS_ENABLE BIT(0)
+#define ROGUE_FWIF_BPDATA_FLAGS_WRITE BIT(1)
+#define ROGUE_FWIF_BPDATA_FLAGS_CTL BIT(2)
+#define ROGUE_FWIF_BPDATA_FLAGS_REGS BIT(3)
+
+struct rogue_fwif_bpdata {
+ /* Memory context */
+ u32 fw_mem_context_fw_addr;
+ /* Breakpoint address */
+ u32 bp_addr;
+ /* Breakpoint handler */
+ u32 bp_handler_addr;
+ /* Breakpoint control */
+ u32 bp_dm;
+ u32 bp_data_flags;
+ /* Number of temporary registers to overallocate */
+ u32 temp_regs;
+ /* Number of shared registers to overallocate */
+ u32 shared_regs;
+ /* DM associated with the breakpoint */
+ u32 dm;
+};
+
+#define ROGUE_FWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS \
+ (ROGUE_FWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */
+
+struct rogue_fwif_kccb_cmd_kick_data {
+ /* address of the firmware context */
+ u32 context_fw_addr;
+ /* Client CCB woff update */
+ u32 client_woff_update;
+ /* Client CCB wrap mask update after CCCB growth */
+ u32 client_wrap_mask_update;
+ /* number of CleanupCtl pointers attached */
+ u32 num_cleanup_ctl;
+ /* CleanupCtl structures associated with command */
+ u32 cleanup_ctl_fw_addr
+ [ROGUE_FWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS];
+ /*
+ * offset to the CmdHeader which houses the workload estimation kick
+ * data.
+ */
+ u32 work_est_cmd_header_offset;
+};
+
+struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data {
+ struct rogue_fwif_kccb_cmd_kick_data geom_cmd_kick_data;
+ struct rogue_fwif_kccb_cmd_kick_data frag_cmd_kick_data;
+};
+
+struct rogue_fwif_kccb_cmd_force_update_data {
+ /* address of the firmware context */
+ u32 context_fw_addr;
+ /* Client CCB fence offset */
+ u32 ccb_fence_offset;
+};
+
+enum rogue_fwif_cleanup_type {
+ /* FW common context cleanup */
+ ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT,
+ /* FW HW RT data cleanup */
+ ROGUE_FWIF_CLEANUP_HWRTDATA,
+ /* FW freelist cleanup */
+ ROGUE_FWIF_CLEANUP_FREELIST,
+ /* FW ZS Buffer cleanup */
+ ROGUE_FWIF_CLEANUP_ZSBUFFER,
+};
+
+struct rogue_fwif_cleanup_request {
+ /* Cleanup type */
+ enum rogue_fwif_cleanup_type cleanup_type;
+ union {
+ /* FW common context to cleanup */
+ u32 context_fw_addr;
+ /* HW RT to cleanup */
+ u32 hwrt_data_fw_addr;
+ /* Freelist to cleanup */
+ u32 freelist_fw_addr;
+ /* ZS Buffer to cleanup */
+ u32 zs_buffer_fw_addr;
+ } cleanup_data;
+};
+
+enum rogue_fwif_power_type {
+ ROGUE_FWIF_POW_OFF_REQ = 1,
+ ROGUE_FWIF_POW_FORCED_IDLE_REQ,
+ ROGUE_FWIF_POW_NUM_UNITS_CHANGE,
+ ROGUE_FWIF_POW_APM_LATENCY_CHANGE
+};
+
+enum rogue_fwif_power_force_idle_type {
+ ROGUE_FWIF_POWER_FORCE_IDLE = 1,
+ ROGUE_FWIF_POWER_CANCEL_FORCED_IDLE,
+ ROGUE_FWIF_POWER_HOST_TIMEOUT,
+};
+
+struct rogue_fwif_power_request {
+ /* Type of power request */
+ enum rogue_fwif_power_type pow_type;
+ union {
+ /* Number of active Dusts */
+ u32 num_of_dusts;
+ /* If the operation is mandatory */
+ bool forced __aligned(4);
+ /*
+ * Type of Request. Consolidating Force Idle, Cancel Forced
+ * Idle, Host Timeout
+ */
+ enum rogue_fwif_power_force_idle_type pow_request_type;
+ } power_req_data;
+};
+
+struct rogue_fwif_slcflushinvaldata {
+ /* Context to fence on (only useful when bDMContext == TRUE) */
+ u32 context_fw_addr;
+ /* Invalidate the cache as well as flushing */
+ bool inval __aligned(4);
+ /* The data to flush/invalidate belongs to a specific DM context */
+ bool dm_context __aligned(4);
+ /* Optional address of range (only useful when bDMContext == FALSE) */
+ aligned_u64 address;
+ /* Optional size of range (only useful when bDMContext == FALSE) */
+ aligned_u64 size;
+};
+
+enum rogue_fwif_hwperf_update_config {
+ ROGUE_FWIF_HWPERF_CTRL_TOGGLE = 0,
+ ROGUE_FWIF_HWPERF_CTRL_SET = 1,
+ ROGUE_FWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2
+};
+
+struct rogue_fwif_hwperf_ctrl {
+ enum rogue_fwif_hwperf_update_config opcode; /* Control operation code */
+ aligned_u64 mask; /* Mask of events to toggle */
+};
+
+struct rogue_fwif_hwperf_config_enable_blks {
+ /* Number of ROGUE_HWPERF_CONFIG_MUX_CNTBLK in the array */
+ u32 num_blocks;
+ /* Address of the ROGUE_HWPERF_CONFIG_MUX_CNTBLK array */
+ u32 block_configs_fw_addr;
+};
+
+struct rogue_fwif_hwperf_config_da_blks {
+ /* Number of ROGUE_HWPERF_CONFIG_CNTBLK in the array */
+ u32 num_blocks;
+ /* Address of the ROGUE_HWPERF_CONFIG_CNTBLK array */
+ u32 block_configs_fw_addr;
+};
+
+struct rogue_fwif_coreclkspeedchange_data {
+ u32 new_clock_speed; /* New clock speed */
+};
+
+#define ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX 16
+
+struct rogue_fwif_hwperf_ctrl_blks {
+ bool enable;
+ /* Number of block IDs in the array */
+ u32 num_blocks;
+ /* Array of ROGUE_HWPERF_CNTBLK_ID values */
+ u16 block_ids[ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX];
+};
+
+struct rogue_fwif_hwperf_select_custom_cntrs {
+ u16 custom_block;
+ u16 num_counters;
+ u32 custom_counter_ids_fw_addr;
+};
+
+struct rogue_fwif_zsbuffer_backing_data {
+ u32 zs_buffer_fw_addr; /* ZS-Buffer FW address */
+
+ bool done __aligned(4); /* action backing/unbacking succeeded */
+};
+
+struct rogue_fwif_freelist_gs_data {
+ /* Freelist FW address */
+ u32 freelist_fw_addr;
+ /* Amount of the Freelist change */
+ u32 delta_pages;
+ /* New amount of pages on the freelist (including ready pages) */
+ u32 new_pages;
+ /* Number of ready pages to be held in reserve until OOM */
+ u32 ready_pages;
+};
+
+#define MAX_FREELISTS_SIZE 3
+#define MAX_HW_GEOM_FRAG_CONTEXTS_SIZE 3
+
+#define ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT \
+ (MAX_HW_GEOM_FRAG_CONTEXTS_SIZE * MAX_FREELISTS_SIZE * 2U)
+#define ROGUE_FWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U
+
+struct rogue_fwif_freelists_reconstruction_data {
+ u32 freelist_count;
+ u32 freelist_ids[ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT];
+};
+
+struct rogue_fwif_write_offset_update_data {
+ /*
+ * Context to that may need to be resumed following write offset update
+ */
+ u32 context_fw_addr;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Proactive DVFS Structures
+ ******************************************************************************
+ */
+#define NUM_OPP_VALUES 16
+
+struct pdvfs_opp {
+ u32 volt; /* V */
+ u32 freq; /* Hz */
+} __aligned(8);
+
+struct rogue_fwif_pdvfs_opp {
+ struct pdvfs_opp opp_values[NUM_OPP_VALUES];
+ u32 min_opp_point;
+ u32 max_opp_point;
+} __aligned(8);
+
+struct rogue_fwif_pdvfs_max_freq_data {
+ u32 max_opp_point;
+} __aligned(8);
+
+struct rogue_fwif_pdvfs_min_freq_data {
+ u32 min_opp_point;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Register configuration structures
+ ******************************************************************************
+ */
+
+#define ROGUE_FWIF_REG_CFG_MAX_SIZE 512
+
+enum rogue_fwif_regdata_cmd_type {
+ ROGUE_FWIF_REGCFG_CMD_ADD = 101,
+ ROGUE_FWIF_REGCFG_CMD_CLEAR = 102,
+ ROGUE_FWIF_REGCFG_CMD_ENABLE = 103,
+ ROGUE_FWIF_REGCFG_CMD_DISABLE = 104
+};
+
+enum rogue_fwif_reg_cfg_type {
+ /* Sidekick power event */
+ ROGUE_FWIF_REG_CFG_TYPE_PWR_ON = 0,
+ /* Rascal / dust power event */
+ ROGUE_FWIF_REG_CFG_TYPE_DUST_CHANGE,
+ /* Geometry kick */
+ ROGUE_FWIF_REG_CFG_TYPE_GEOM,
+ /* Fragment kick */
+ ROGUE_FWIF_REG_CFG_TYPE_FRAG,
+ /* Compute kick */
+ ROGUE_FWIF_REG_CFG_TYPE_CDM,
+ /* TLA kick */
+ ROGUE_FWIF_REG_CFG_TYPE_TLA,
+ /* TDM kick */
+ ROGUE_FWIF_REG_CFG_TYPE_TDM,
+ /* Applies to all types. Keep as last element */
+ ROGUE_FWIF_REG_CFG_TYPE_ALL
+};
+
+struct rogue_fwif_reg_cfg_rec {
+ u64 sddr;
+ u64 mask;
+ u64 value;
+};
+
+struct rogue_fwif_regconfig_data {
+ enum rogue_fwif_regdata_cmd_type cmd_type;
+ enum rogue_fwif_reg_cfg_type reg_config_type;
+ struct rogue_fwif_reg_cfg_rec reg_config __aligned(8);
+};
+
+struct rogue_fwif_reg_cfg {
+ /*
+ * PDump WRW command write granularity is 32 bits.
+ * Add padding to ensure array size is 32 bit granular.
+ */
+ u8 num_regs_type[ALIGN((u32)ROGUE_FWIF_REG_CFG_TYPE_ALL,
+ sizeof(u32))] __aligned(8);
+ struct rogue_fwif_reg_cfg_rec
+ reg_configs[ROGUE_FWIF_REG_CFG_MAX_SIZE] __aligned(8);
+} __aligned(8);
+
+enum rogue_fwif_os_state_change {
+ ROGUE_FWIF_OS_ONLINE = 1,
+ ROGUE_FWIF_OS_OFFLINE
+};
+
+struct rogue_fwif_os_state_change_data {
+ u32 osid;
+ enum rogue_fwif_os_state_change new_os_state;
+} __aligned(8);
+
+enum rogue_fwif_counter_dump_request {
+ ROGUE_FWIF_PWR_COUNTER_DUMP_START = 1,
+ ROGUE_FWIF_PWR_COUNTER_DUMP_STOP,
+ ROGUE_FWIF_PWR_COUNTER_DUMP_SAMPLE,
+};
+
+struct rogue_fwif_counter_dump_data {
+ enum rogue_fwif_counter_dump_request counter_dump_request;
+} __aligned(8);
+
+enum rogue_fwif_kccb_cmd_type {
+ /* Common commands */
+ ROGUE_FWIF_KCCB_CMD_KICK = 101U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ ROGUE_FWIF_KCCB_CMD_MMUCACHE = 102U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ ROGUE_FWIF_KCCB_CMD_BP = 103U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* SLC flush and invalidation request */
+ ROGUE_FWIF_KCCB_CMD_SLCFLUSHINVAL = 105U |
+ ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /*
+ * Requests cleanup of a FW resource (type specified in the command
+ * data)
+ */
+ ROGUE_FWIF_KCCB_CMD_CLEANUP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Power request */
+ ROGUE_FWIF_KCCB_CMD_POW = 107U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Backing for on-demand ZS-Buffer done */
+ ROGUE_FWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE =
+ 108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Unbacking for on-demand ZS-Buffer done */
+ ROGUE_FWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE =
+ 109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Freelist Grow done */
+ ROGUE_FWIF_KCCB_CMD_FREELIST_GROW_UPDATE =
+ 110U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Freelists Reconstruction done */
+ ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE =
+ 112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /*
+ * Informs the firmware that the host has added more data to a CDM2
+ * Circular Buffer
+ */
+ ROGUE_FWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE =
+ 114U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Health check request */
+ ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK = 115U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Forcing signalling of all unmet UFOs for a given CCB offset */
+ ROGUE_FWIF_KCCB_CMD_FORCE_UPDATE = 116U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* There is a geometry and a fragment command in this single kick */
+ ROGUE_FWIF_KCCB_CMD_COMBINED_GEOM_FRAG_KICK = 117U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Informs the FW that a Guest OS has come online / offline. */
+ ROGUE_FWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Commands only permitted to the native or host OS */
+ ROGUE_FWIF_KCCB_CMD_REGCONFIG = 200U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
+ ROGUE_FWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+ ROGUE_FWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Core clock speed change event */
+ ROGUE_FWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /*
+ * Ask the firmware to update its cached ui32LogType value from the (shared)
+ * tracebuf control structure
+ */
+ ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Set a maximum frequency/OPP point */
+ ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /*
+ * Changes the relative scheduling priority for a particular OSid. It can
+ * only be serviced for the Host DDK
+ */
+ ROGUE_FWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Set or clear firmware state flags */
+ ROGUE_FWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Set a minimum frequency/OPP point */
+ ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Configure Periodic Hardware Reset behaviour */
+ ROGUE_FWIF_KCCB_CMD_PHR_CFG = 213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Configure Safety Firmware Watchdog */
+ ROGUE_FWIF_KCCB_CMD_WDG_CFG = 215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Controls counter dumping in the FW */
+ ROGUE_FWIF_KCCB_CMD_COUNTER_DUMP = 216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Configure, clear and enable multiple HWPerf blocks */
+ ROGUE_FWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Configure the custom counters for HWPerf */
+ ROGUE_FWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Configure directly addressable counters for HWPerf */
+ ROGUE_FWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 220U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+};
+
+#define ROGUE_FWIF_LAST_ALLOWED_GUEST_KCCB_CMD \
+ (ROGUE_FWIF_KCCB_CMD_REGCONFIG - 1)
+
+/* Kernel CCB command packet */
+struct rogue_fwif_kccb_cmd {
+ /* Command type */
+ enum rogue_fwif_kccb_cmd_type cmd_type;
+ /* Compatibility and other flags */
+ u32 kccb_flags;
+
+ /*
+ * NOTE: Make sure that uCmdData is the last member of this struct
+ * This is to calculate actual command size for device mem copy.
+ * (Refer ROGUEGetCmdMemCopySize())
+ */
+ union {
+ /* Data for Kick command */
+ struct rogue_fwif_kccb_cmd_kick_data cmd_kick_data;
+ /* Data for combined geom/frag Kick command */
+ struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data
+ combined_geom_frag_cmd_kick_data;
+ /* Data for MMU cache command */
+ struct rogue_fwif_mmucachedata mmu_cache_data;
+ /* Data for Breakpoint Commands */
+ struct rogue_fwif_bpdata bp_data;
+ /* Data for SLC Flush/Inval commands */
+ struct rogue_fwif_slcflushinvaldata slc_flush_inval_data;
+ /* Data for cleanup commands */
+ struct rogue_fwif_cleanup_request cleanup_data;
+ /* Data for power request commands */
+ struct rogue_fwif_power_request pow_data;
+ /* Data for HWPerf control command */
+ struct rogue_fwif_hwperf_ctrl hw_perf_ctrl;
+ /*
+ * Data for HWPerf configure, clear and enable performance
+ * counter block command
+ */
+ struct rogue_fwif_hwperf_config_enable_blks
+ hw_perf_cfg_enable_blks;
+ /*
+ * Data for HWPerf enable or disable performance counter block
+ * commands
+ */
+ struct rogue_fwif_hwperf_ctrl_blks hw_perf_ctrl_blks;
+ /* Data for HWPerf configure the custom counters to read */
+ struct rogue_fwif_hwperf_select_custom_cntrs
+ hw_perf_select_cstm_cntrs;
+ /* Data for HWPerf configure Directly Addressable blocks */
+ struct rogue_fwif_hwperf_config_da_blks hw_perf_cfg_da_blks;
+ /* Data for core clock speed change */
+ struct rogue_fwif_coreclkspeedchange_data
+ core_clk_speed_change_data;
+ /* Feedback for Z/S Buffer backing/unbacking */
+ struct rogue_fwif_zsbuffer_backing_data zs_buffer_backing_data;
+ /* Feedback for Freelist grow/shrink */
+ struct rogue_fwif_freelist_gs_data free_list_gs_data;
+ /* Feedback for Freelists reconstruction*/
+ struct rogue_fwif_freelists_reconstruction_data
+ free_lists_reconstruction_data;
+ /* Data for custom register configuration */
+ struct rogue_fwif_regconfig_data reg_config_data;
+ /* Data for informing the FW about the write offset update */
+ struct rogue_fwif_write_offset_update_data
+ write_offset_update_data;
+ /* Data for setting the max frequency/OPP */
+ struct rogue_fwif_pdvfs_max_freq_data pdvfs_max_freq_data;
+ /* Data for setting the min frequency/OPP */
+ struct rogue_fwif_pdvfs_min_freq_data pdvfs_min_freq_data;
+ /* Data for updating the Guest Online states */
+ struct rogue_fwif_os_state_change_data cmd_os_online_state_data;
+ /* Dev address for TBI buffer allocated on demand */
+ u32 tbi_buffer_fw_addr;
+ /* Data for dumping of register ranges */
+ struct rogue_fwif_counter_dump_data counter_dump_config_data;
+ /* Data for signalling all unmet fences for a given CCB */
+ struct rogue_fwif_kccb_cmd_force_update_data force_update_data;
+ } cmd_data __aligned(8);
+} __aligned(8);
+
+PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_fwif_kccb_cmd);
+
+/*
+ ******************************************************************************
+ * Firmware CCB command structure for ROGUE
+ ******************************************************************************
+ */
+
+struct rogue_fwif_fwccb_cmd_zsbuffer_backing_data {
+ u32 zs_buffer_id;
+};
+
+struct rogue_fwif_fwccb_cmd_freelist_gs_data {
+ u32 freelist_id;
+};
+
+struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data {
+ u32 freelist_count;
+ u32 hwr_counter;
+ u32 freelist_ids[ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT];
+};
+
+/* 1 if a page fault happened */
+#define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF BIT(0)
+/* 1 if applicable to all contexts */
+#define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS BIT(1)
+
+struct rogue_fwif_fwccb_cmd_context_reset_data {
+ /* Context affected by the reset */
+ u32 server_common_context_id;
+ /* Reason for reset */
+ enum rogue_context_reset_reason reset_reason;
+ /* Data Master affected by the reset */
+ u32 dm;
+ /* Job ref running at the time of reset */
+ u32 reset_job_ref;
+ /* ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */
+ u32 flags;
+ /* At what page catalog address */
+ aligned_u64 pc_address;
+ /* Page fault address (only when applicable) */
+ aligned_u64 fault_address;
+};
+
+struct rogue_fwif_fwccb_cmd_fw_pagefault_data {
+ /* Page fault address */
+ u64 fw_fault_addr;
+};
+
+enum rogue_fwif_fwccb_cmd_type {
+ /* Requests ZSBuffer to be backed with physical pages */
+ ROGUE_FWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U |
+ ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Requests ZSBuffer to be unbacked */
+ ROGUE_FWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U |
+ ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Requests an on-demand freelist grow/shrink */
+ ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW = 103U |
+ ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Requests freelists reconstruction */
+ ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION =
+ 104U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Notifies host of a HWR event on a context */
+ ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION =
+ 105U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Requests an on-demand debug dump */
+ ROGUE_FWIF_FWCCB_CMD_DEBUG_DUMP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Requests an on-demand update on process stats */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_STATS = 107U |
+ ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ ROGUE_FWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE =
+ 108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART =
+ 109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Notifies host of a FW pagefault */
+ ROGUE_FWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION =
+ 112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+};
+
+enum rogue_fwif_fwccb_cmd_update_stats_type {
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32TotalNumPartialRenders stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS = 1,
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32TotalNumOutOfMemory stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY,
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32NumGeomStores stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_GEOM_STORES,
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32NumFragStores stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_FRAG_STORES,
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32NumCDMStores stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES,
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32NumTDMStores stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES
+};
+
+struct rogue_fwif_fwccb_cmd_update_stats_data {
+ /* Element to update */
+ enum rogue_fwif_fwccb_cmd_update_stats_type element_to_update;
+ /* The pid of the process whose stats are being updated */
+ u32 pid_owner;
+ /* Adjustment to be made to the statistic */
+ s32 adjustment_value;
+};
+
+struct rogue_fwif_fwccb_cmd_core_clk_rate_change_data {
+ u32 core_clk_rate;
+} __aligned(8);
+
+struct rogue_fwif_fwccb_cmd {
+ /* Command type */
+ enum rogue_fwif_fwccb_cmd_type cmd_type;
+ /* Compatibility and other flags */
+ u32 fwccb_flags;
+
+ union {
+ /* Data for Z/S-Buffer on-demand (un)backing*/
+ struct rogue_fwif_fwccb_cmd_zsbuffer_backing_data
+ cmd_zs_buffer_backing;
+ /* Data for on-demand freelist grow/shrink */
+ struct rogue_fwif_fwccb_cmd_freelist_gs_data cmd_free_list_gs;
+ /* Data for freelists reconstruction */
+ struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data
+ cmd_freelists_reconstruction;
+ /* Data for context reset notification */
+ struct rogue_fwif_fwccb_cmd_context_reset_data
+ cmd_context_reset_notification;
+ /* Data for updating process stats */
+ struct rogue_fwif_fwccb_cmd_update_stats_data
+ cmd_update_stats_data;
+ struct rogue_fwif_fwccb_cmd_core_clk_rate_change_data
+ cmd_core_clk_rate_change;
+ struct rogue_fwif_fwccb_cmd_fw_pagefault_data cmd_fw_pagefault;
+ } cmd_data __aligned(8);
+} __aligned(8);
+
+PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_fwif_fwccb_cmd);
+
+/*
+ ******************************************************************************
+ * Workload estimation Firmware CCB command structure for ROGUE
+ ******************************************************************************
+ */
+struct rogue_fwif_workest_fwccb_cmd {
+ /* Index for return data array */
+ u16 return_data_index;
+ /* The cycles the workload took on the hardware */
+ u32 cycles_taken;
+};
+
+/*
+ ******************************************************************************
+ * Client CCB commands for ROGUE
+ ******************************************************************************
+ */
+
+/*
+ * Required memory alignment for 64-bit variables accessible by Meta
+ * (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared
+ * between the host and meta that contains 64-bit variables has to maintain
+ * this alignment)
+ */
+#define ROGUE_FWIF_FWALLOC_ALIGN sizeof(u64)
+
+#define ROGUE_CCB_TYPE_TASK BIT(15)
+#define ROGUE_CCB_FWALLOC_ALIGN(size) \
+ (((size) + (ROGUE_FWIF_FWALLOC_ALIGN - 1)) & \
+ ~(ROGUE_FWIF_FWALLOC_ALIGN - 1))
+
+#define ROGUE_FWIF_CCB_CMD_TYPE_GEOM \
+ (201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D \
+ (202U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FRAG \
+ (203U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR \
+ (204U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_CDM \
+ (205U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_TDM \
+ (206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FBSC_INVALIDATE \
+ (207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_2D \
+ (208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_PRE_TIMESTAMP \
+ (209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_NULL \
+ (210U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_ABORT \
+ (211U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+
+/* Leave a gap between CCB specific commands and generic commands */
+#define ROGUE_FWIF_CCB_CMD_TYPE_FENCE (212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_UPDATE (213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_RMW_UPDATE \
+ (214U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR (215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_PRIORITY (216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+/*
+ * Pre and Post timestamp commands are supposed to sandwich the DM cmd. The
+ * padding code with the CCB wrap upsets the FW if we don't have the task type
+ * bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+ */
+#define ROGUE_FWIF_CCB_CMD_TYPE_POST_TIMESTAMP \
+ (217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_UPDATE \
+ (218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE \
+ (219U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+
+#define ROGUE_FWIF_CCB_CMD_TYPE_PADDING (221U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+
+struct rogue_fwif_workest_kick_data {
+ /* Index for the KM Workload estimation return data array */
+ u16 return_data_index __aligned(8);
+ /* Predicted time taken to do the work in cycles */
+ u32 cycles_prediction __aligned(8);
+ /* Deadline for the workload */
+ aligned_u64 deadline;
+};
+
+struct rogue_fwif_ccb_cmd_header {
+ u32 cmd_type;
+ u32 cmd_size;
+ /*
+ * external job reference - provided by client and used in debug for
+ * tracking submitted work
+ */
+ u32 ext_job_ref;
+ /*
+ * internal job reference - generated by services and used in debug for
+ * tracking submitted work
+ */
+ u32 int_job_ref;
+ /* Workload Estimation - Workload Estimation Data */
+ struct rogue_fwif_workest_kick_data work_est_kick_data __aligned(8);
+};
+
+/*
+ ******************************************************************************
+ * Client CCB commands which are only required by the kernel
+ ******************************************************************************
+ */
+struct rogue_fwif_cmd_priority {
+ s32 priority;
+};
+
+/*
+ ******************************************************************************
+ * Signature and Checksums Buffer
+ ******************************************************************************
+ */
+struct rogue_fwif_sigbuf_ctl {
+ /* Ptr to Signature Buffer memory */
+ u32 buffer_fw_addr;
+ /* Amount of space left for storing regs in the buffer */
+ u32 left_size_in_regs;
+} __aligned(8);
+
+struct rogue_fwif_counter_dump_ctl {
+ /* Ptr to counter dump buffer */
+ u32 buffer_fw_addr;
+ /* Amount of space for storing in the buffer */
+ u32 size_in_dwords;
+} __aligned(8);
+
+struct rogue_fwif_firmware_gcov_ctl {
+ /* Ptr to firmware gcov buffer */
+ u32 buffer_fw_addr;
+ /* Amount of space for storing in the buffer */
+ u32 size;
+} __aligned(8);
+
+/*
+ *****************************************************************************
+ * ROGUE Compatibility checks
+ *****************************************************************************
+ */
+
+/*
+ * WARNING: Whenever the layout of ROGUE_FWIF_COMPCHECKS_BVNC changes, the
+ * following define should be increased by 1 to indicate to the compatibility
+ * logic that layout has changed.
+ */
+#define ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION 3
+
+struct rogue_fwif_compchecks_bvnc {
+ /* WARNING: This field must be defined as first one in this structure */
+ u32 layout_version;
+ aligned_u64 bvnc;
+} __aligned(8);
+
+struct rogue_fwif_init_options {
+ u8 os_count_support;
+ u8 padding[7];
+} __aligned(8);
+
+#define ROGUE_FWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \
+ struct rogue_fwif_compchecks_bvnc(name) = { \
+ ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION, \
+ 0, \
+ }
+
+static inline void rogue_fwif_compchecks_bvnc_init(struct rogue_fwif_compchecks_bvnc *compchecks)
+{
+ compchecks->layout_version = ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION;
+ compchecks->bvnc = 0;
+}
+
+struct rogue_fwif_compchecks {
+ /* hardware BVNC (from the ROGUE registers) */
+ struct rogue_fwif_compchecks_bvnc hw_bvnc;
+ /* firmware BVNC */
+ struct rogue_fwif_compchecks_bvnc fw_bvnc;
+ /* identifier of the FW processor version */
+ u32 fw_processor_version;
+ /* software DDK version */
+ u32 ddk_version;
+ /* software DDK build no. */
+ u32 ddk_build;
+ /* build options bit-field */
+ u32 build_options;
+ /* initialisation options bit-field */
+ struct rogue_fwif_init_options init_options;
+ /* Information is valid */
+ bool updated __aligned(4);
+ u32 padding;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Updated configuration post FW data init.
+ ******************************************************************************
+ */
+struct rogue_fwif_runtime_cfg {
+ /* APM latency in ms before signalling IDLE to the host */
+ u32 active_pm_latency_ms;
+ /* Compatibility and other flags */
+ u32 runtime_cfg_flags;
+ /*
+ * If set, APM latency does not reset to system default each GPU power
+ * transition
+ */
+ bool active_pm_latency_persistant __aligned(4);
+ /* Core clock speed, currently only used to calculate timer ticks */
+ u32 core_clock_speed;
+ /* Last number of dusts change requested by the host */
+ u32 default_dusts_num_init;
+ /* Periodic Hardware Reset configuration values */
+ u32 phr_mode;
+ /* New number of milliseconds C/S is allowed to last */
+ u32 hcs_deadline_ms;
+ /* The watchdog period in microseconds */
+ u32 wdg_period_us;
+ /* Array of priorities per OS */
+ u32 osid_priority[ROGUE_FW_MAX_NUM_OS];
+ /* On-demand allocated HWPerf buffer address, to be passed to the FW */
+ u32 hwperf_buf_fw_addr;
+
+ bool padding __aligned(4);
+};
+
+/*
+ *****************************************************************************
+ * Control data for ROGUE
+ *****************************************************************************
+ */
+
+#define ROGUE_FWIF_HWR_DEBUG_DUMP_ALL (99999U)
+
+enum rogue_fwif_tpu_dm {
+ ROGUE_FWIF_TPU_DM_PDM = 0,
+ ROGUE_FWIF_TPU_DM_VDM = 1,
+ ROGUE_FWIF_TPU_DM_CDM = 2,
+ ROGUE_FWIF_TPU_DM_TDM = 3,
+ ROGUE_FWIF_TPU_DM_LAST
+};
+
+enum rogue_fwif_gpio_val_mode {
+ /* No GPIO validation */
+ ROGUE_FWIF_GPIO_VAL_OFF = 0,
+ /*
+ * Simple test case that initiates by sending data via the GPIO and then
+ * sends back any data received over the GPIO
+ */
+ ROGUE_FWIF_GPIO_VAL_GENERAL = 1,
+ /*
+ * More complex test case that writes and reads data across the entire
+ * GPIO AP address range.
+ */
+ ROGUE_FWIF_GPIO_VAL_AP = 2,
+ /* Validates the GPIO Testbench. */
+ ROGUE_FWIF_GPIO_VAL_TESTBENCH = 5,
+ /* Send and then receive each byte in the range 0-255. */
+ ROGUE_FWIF_GPIO_VAL_LOOPBACK = 6,
+ /* Send and then receive each power-of-2 byte in the range 0-255. */
+ ROGUE_FWIF_GPIO_VAL_LOOPBACK_LITE = 7,
+ ROGUE_FWIF_GPIO_VAL_LAST
+};
+
+enum fw_perf_conf {
+ FW_PERF_CONF_NONE = 0,
+ FW_PERF_CONF_ICACHE = 1,
+ FW_PERF_CONF_DCACHE = 2,
+ FW_PERF_CONF_JTLB_INSTR = 5,
+ FW_PERF_CONF_INSTRUCTIONS = 6
+};
+
+enum fw_boot_stage {
+ FW_BOOT_STAGE_TLB_INIT_FAILURE = -2,
+ FW_BOOT_STAGE_NOT_AVAILABLE = -1,
+ FW_BOOT_NOT_STARTED = 0,
+ FW_BOOT_BLDR_STARTED = 1,
+ FW_BOOT_CACHE_DONE,
+ FW_BOOT_TLB_DONE,
+ FW_BOOT_MAIN_STARTED,
+ FW_BOOT_ALIGNCHECKS_DONE,
+ FW_BOOT_INIT_DONE,
+};
+
+/*
+ * Kernel CCB return slot responses. Usage of bit-fields instead of bare
+ * integers allows FW to possibly pack-in several responses for each single kCCB
+ * command.
+ */
+/* Command executed (return status from FW) */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED BIT(0)
+/* A cleanup was requested but resource busy */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY BIT(1)
+/* Poll failed in FW for a HW operation to complete */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_POLL_FAILURE BIT(2)
+/* Reset value of a kCCB return slot (set by host) */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U
+
+struct rogue_fwif_connection_ctl {
+ /* Fw-Os connection states */
+ enum rogue_fwif_connection_fw_state connection_fw_state;
+ enum rogue_fwif_connection_os_state connection_os_state;
+ u32 alive_fw_token;
+ u32 alive_os_token;
+} __aligned(8);
+
+struct rogue_fwif_osinit {
+ /* Kernel CCB */
+ u32 kernel_ccbctl_fw_addr;
+ u32 kernel_ccb_fw_addr;
+ u32 kernel_ccb_rtn_slots_fw_addr;
+
+ /* Firmware CCB */
+ u32 firmware_ccbctl_fw_addr;
+ u32 firmware_ccb_fw_addr;
+
+ /* Workload Estimation Firmware CCB */
+ u32 work_est_firmware_ccbctl_fw_addr;
+ u32 work_est_firmware_ccb_fw_addr;
+
+ u32 rogue_fwif_hwr_info_buf_ctl_fw_addr;
+
+ u32 hwr_debug_dump_limit;
+
+ u32 fw_os_data_fw_addr;
+
+ /* Compatibility checks to be populated by the Firmware */
+ struct rogue_fwif_compchecks rogue_comp_checks;
+} __aligned(8);
+
+/* BVNC Features */
+struct rogue_hwperf_bvnc_block {
+ /* Counter block ID, see ROGUE_HWPERF_CNTBLK_ID */
+ u16 block_id;
+
+ /* Number of counters in this block type */
+ u16 num_counters;
+
+ /* Number of blocks of this type */
+ u16 num_blocks;
+
+ u16 reserved;
+};
+
+#define ROGUE_HWPERF_MAX_BVNC_LEN (24)
+
+#define ROGUE_HWPERF_MAX_BVNC_BLOCK_LEN (16U)
+
+/* BVNC Features */
+struct rogue_hwperf_bvnc {
+ /* BVNC string */
+ char bvnc_string[ROGUE_HWPERF_MAX_BVNC_LEN];
+ /* See ROGUE_HWPERF_FEATURE_FLAGS */
+ u32 bvnc_km_feature_flags;
+ /* Number of blocks described in aBvncBlocks */
+ u16 num_bvnc_blocks;
+ /* Number of GPU cores present */
+ u16 bvnc_gpu_cores;
+ /* Supported Performance Blocks for BVNC */
+ struct rogue_hwperf_bvnc_block
+ bvnc_blocks[ROGUE_HWPERF_MAX_BVNC_BLOCK_LEN];
+};
+
+PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_hwperf_bvnc);
+
+struct rogue_fwif_sysinit {
+ /* Fault read address */
+ aligned_u64 fault_phys_addr;
+
+ /* PDS execution base */
+ aligned_u64 pds_exec_base;
+ /* UCS execution base */
+ aligned_u64 usc_exec_base;
+ /* FBCDC bindless texture state table base */
+ aligned_u64 fbcdc_state_table_base;
+ aligned_u64 fbcdc_large_state_table_base;
+ /* Texture state base */
+ aligned_u64 texture_heap_base;
+
+ /* Event filter for Firmware events */
+ u64 hw_perf_filter;
+
+ aligned_u64 slc3_fence_dev_addr;
+
+ u32 tpu_trilinear_frac_mask[ROGUE_FWIF_TPU_DM_LAST] __aligned(8);
+
+ /* Signature and Checksum Buffers for DMs */
+ struct rogue_fwif_sigbuf_ctl sigbuf_ctl[PVR_FWIF_DM_MAX];
+
+ struct rogue_fwif_pdvfs_opp pdvfs_opp_info;
+
+ struct rogue_fwif_dma_addr coremem_data_store;
+
+ struct rogue_fwif_counter_dump_ctl counter_dump_ctl;
+
+ u32 filter_flags;
+
+ u32 runtime_cfg_fw_addr;
+
+ u32 trace_buf_ctl_fw_addr;
+ u32 fw_sys_data_fw_addr;
+
+ u32 gpu_util_fw_cb_ctl_fw_addr;
+ u32 reg_cfg_fw_addr;
+ u32 hwperf_ctl_fw_addr;
+
+ u32 align_checks;
+
+ /* Core clock speed at FW boot time */
+ u32 initial_core_clock_speed;
+
+ /* APM latency in ms before signalling IDLE to the host */
+ u32 active_pm_latency_ms;
+
+ /* Flag to be set by the Firmware after successful start */
+ bool firmware_started __aligned(4);
+
+ /* Host/FW Trace synchronisation Partition Marker */
+ u32 marker_val;
+
+ /* Firmware initialization complete time */
+ u32 firmware_started_timestamp;
+
+ u32 jones_disable_mask;
+
+ /* Firmware performance counter config */
+ enum fw_perf_conf firmware_perf;
+
+ /*
+ * FW Pointer to memory containing core clock rate in Hz.
+ * Firmware (PDVFS) updates the memory when running on non primary FW
+ * thread to communicate to host driver.
+ */
+ u32 core_clock_rate_fw_addr;
+
+ enum rogue_fwif_gpio_val_mode gpio_validation_mode;
+
+ /* Used in HWPerf for decoding BVNC Features */
+ struct rogue_hwperf_bvnc bvnc_km_feature_flags;
+
+ /* Value to write into ROGUE_CR_TFBC_COMPRESSION_CONTROL */
+ u32 tfbc_compression_control;
+} __aligned(8);
+
+/*
+ *****************************************************************************
+ * Timer correlation shared data and defines
+ *****************************************************************************
+ */
+
+struct rogue_fwif_time_corr {
+ aligned_u64 os_timestamp;
+ aligned_u64 os_mono_timestamp;
+ aligned_u64 cr_timestamp;
+
+ /*
+ * Utility variable used to convert CR timer deltas to OS timer deltas
+ * (nS), where the deltas are relative to the timestamps above:
+ * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below
+ */
+ aligned_u64 cr_delta_to_os_delta_kns;
+
+ u32 core_clock_speed;
+ u32 reserved;
+} __aligned(8);
+
+/*
+ * The following macros are used to help converting FW timestamps to the Host
+ * time domain. On the FW the ROGUE_CR_TIMER counter is used to keep track of
+ * time; it increments by 1 every 256 GPU clock ticks, so the general
+ * formula to perform the conversion is:
+ *
+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS,
+ * otherwise if (scale == 10^6) then deltaOS is in uS ]
+ *
+ * deltaCR * 256 256 * scale
+ * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ]
+ * GPUclockspeed GPUclockspeed
+ *
+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20)
+ * to get some better accuracy and to avoid returning 0 in the integer
+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz.
+ * This is the same as keeping K as a decimal number.
+ *
+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies
+ * (deltaCR * K is more or less a constant), and it's relative to the base
+ * OS timestamp sampled as a part of the timer correlation data.
+ * This base is refreshed on GPU power-on, DVFS transition and periodic
+ * frequency calibration (executed every few seconds if the FW is doing
+ * some work), so as long as the GPU is doing something and one of these
+ * events is triggered then deltaCR * K will not overflow and deltaOS will be
+ * correct.
+ */
+
+#define ROGUE_FWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20)
+
+#define ROGUE_FWIF_GET_DELTA_OSTIME_NS(delta_cr, k) \
+ (((delta_cr) * (k)) >> ROGUE_FWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT)
+
+/*
+ ******************************************************************************
+ * GPU Utilisation
+ ******************************************************************************
+ */
+
+/* See rogue_common.h for a list of GPU states */
+#define ROGUE_FWIF_GPU_UTIL_TIME_MASK \
+ (0xFFFFFFFFFFFFFFFFull & ~ROGUE_FWIF_GPU_UTIL_STATE_MASK)
+
+#define ROGUE_FWIF_GPU_UTIL_GET_TIME(word) \
+ ((word)(&ROGUE_FWIF_GPU_UTIL_TIME_MASK))
+#define ROGUE_FWIF_GPU_UTIL_GET_STATE(word) \
+ ((word)(&ROGUE_FWIF_GPU_UTIL_STATE_MASK))
+
+/*
+ * The OS timestamps computed by the FW are approximations of the real time,
+ * which means they could be slightly behind or ahead the real timer on the
+ * Host. In some cases we can perform subtractions between FW approximated
+ * timestamps and real OS timestamps, so we need a form of protection against
+ * negative results if for instance the FW one is a bit ahead of time.
+ */
+#define ROGUE_FWIF_GPU_UTIL_GET_PERIOD(newtime, oldtime) \
+ (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U)
+
+#define ROGUE_FWIF_GPU_UTIL_MAKE_WORD(time, state) \
+ (ROGUE_FWIF_GPU_UTIL_GET_TIME(time) | \
+ ROGUE_FWIF_GPU_UTIL_GET_STATE(state))
+
+/*
+ * The timer correlation array must be big enough to ensure old entries won't be
+ * overwritten before all the HWPerf events linked to those entries are
+ * processed by the MISR. The update frequency of this array depends on how fast
+ * the system can change state (basically how small the APM latency is) and
+ * perform DVFS transitions.
+ *
+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading
+ * an entry while the Host is updating it. With 2 entries in the worst case the
+ * FW will read old data, which is still quite ok if the Host is updating the
+ * timer correlation at that time.
+ */
+#define ROGUE_FWIF_TIME_CORR_ARRAY_SIZE 256U
+#define ROGUE_FWIF_TIME_CORR_CURR_INDEX(seqcount) \
+ ((seqcount) % ROGUE_FWIF_TIME_CORR_ARRAY_SIZE)
+
+/* Make sure the timer correlation array size is a power of 2 */
+static_assert((ROGUE_FWIF_TIME_CORR_ARRAY_SIZE &
+ (ROGUE_FWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U,
+ "ROGUE_FWIF_TIME_CORR_ARRAY_SIZE must be a power of two");
+
+struct rogue_fwif_gpu_util_fwcb {
+ struct rogue_fwif_time_corr time_corr[ROGUE_FWIF_TIME_CORR_ARRAY_SIZE];
+ u32 time_corr_seq_count;
+
+ /* Compatibility and other flags */
+ u32 gpu_util_flags;
+
+ /* Last GPU state + OS time of the last state update */
+ aligned_u64 last_word;
+
+ /* Counters for the amount of time the GPU was active/idle/blocked */
+ aligned_u64 stats_counters[PVR_FWIF_GPU_UTIL_STATE_NUM];
+} __aligned(8);
+
+struct rogue_fwif_rta_ctl {
+ /* Render number */
+ u32 render_target_index;
+ /* index in RTA */
+ u32 current_render_target;
+ /* total active RTs */
+ u32 active_render_targets;
+ /* total active RTs from the first TA kick, for OOM */
+ u32 cumul_active_render_targets;
+ /* Array of valid RT indices */
+ u32 valid_render_targets_fw_addr;
+ /* Array of number of occurred partial renders per render target */
+ u32 rta_num_partial_renders_fw_addr;
+ /* Number of render targets in the array */
+ u32 max_rts;
+ /* Compatibility and other flags */
+ u32 rta_ctl_flags;
+} __aligned(8);
+
+struct rogue_fwif_freelist {
+ aligned_u64 freelist_dev_addr;
+ aligned_u64 current_dev_addr;
+ u32 current_stack_top;
+ u32 max_pages;
+ u32 grow_pages;
+ /* HW pages */
+ u32 current_pages;
+ u32 allocated_page_count;
+ u32 allocated_mmu_page_count;
+ u32 freelist_id;
+
+ bool grow_pending __aligned(4);
+ /* Pages that should be used only when OOM is reached */
+ u32 ready_pages;
+ /* Compatibility and other flags */
+ u32 freelist_flags;
+ /* PM Global PB on which Freelist is loaded */
+ u32 pm_global_pb;
+ u32 padding;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * HWRTData
+ ******************************************************************************
+ */
+
+/* HWRTData flags */
+/* Deprecated flags 1:0 */
+#define HWRTDATA_HAS_LAST_GEOM BIT(2)
+#define HWRTDATA_PARTIAL_RENDERED BIT(3)
+#define HWRTDATA_DISABLE_TILE_REORDERING BIT(4)
+#define HWRTDATA_NEED_BRN65101_BLIT BIT(5)
+#define HWRTDATA_FIRST_BRN65101_STRIP BIT(6)
+#define HWRTDATA_NEED_BRN67182_2ND_RENDER BIT(7)
+
+enum rogue_fwif_rtdata_state {
+ ROGUE_FWIF_RTDATA_STATE_NONE = 0,
+ ROGUE_FWIF_RTDATA_STATE_KICK_GEOM,
+ ROGUE_FWIF_RTDATA_STATE_KICK_GEOM_FIRST,
+ ROGUE_FWIF_RTDATA_STATE_GEOM_FINISHED,
+ ROGUE_FWIF_RTDATA_STATE_KICK_FRAG,
+ ROGUE_FWIF_RTDATA_STATE_FRAG_FINISHED,
+ ROGUE_FWIF_RTDATA_STATE_FRAG_CONTEXT_STORED,
+ ROGUE_FWIF_RTDATA_STATE_GEOM_OUTOFMEM,
+ ROGUE_FWIF_RTDATA_STATE_PARTIALRENDERFINISHED,
+ /*
+ * In case of HWR, we can't set the RTDATA state to NONE, as this will
+ * cause any TA to become a first TA. To ensure all related TA's are
+ * skipped, we use the HWR state
+ */
+ ROGUE_FWIF_RTDATA_STATE_HWR,
+ ROGUE_FWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU
+};
+
+struct rogue_fwif_hwrtdata_common {
+ bool geom_caches_need_zeroing __aligned(4);
+
+ u32 screen_pixel_max;
+ aligned_u64 multi_sample_ctl;
+ u64 flipped_multi_sample_ctl;
+ u32 tpc_stride;
+ u32 tpc_size;
+ u32 te_screen;
+ u32 mtile_stride;
+ u32 teaa;
+ u32 te_mtile1;
+ u32 te_mtile2;
+ u32 isp_merge_lower_x;
+ u32 isp_merge_lower_y;
+ u32 isp_merge_upper_x;
+ u32 isp_merge_upper_y;
+ u32 isp_merge_scale_x;
+ u32 isp_merge_scale_y;
+ u32 rgn_header_size;
+ u32 isp_mtile_size;
+ u32 padding;
+} __aligned(8);
+
+struct rogue_fwif_hwrtdata {
+ /* MList Data Store */
+ aligned_u64 pm_mlist_dev_addr;
+
+ aligned_u64 vce_cat_base[4];
+ aligned_u64 vce_last_cat_base[4];
+ aligned_u64 te_cat_base[4];
+ aligned_u64 te_last_cat_base[4];
+ aligned_u64 alist_cat_base;
+ aligned_u64 alist_last_cat_base;
+
+ aligned_u64 pm_alist_stack_pointer;
+ u32 pm_mlist_stack_pointer;
+
+ u32 hwrt_data_common_fw_addr;
+
+ u32 hwrt_data_flags;
+ enum rogue_fwif_rtdata_state state;
+
+ u32 freelists_fw_addr[MAX_FREELISTS_SIZE] __aligned(8);
+ u32 freelist_hwr_snapshot[MAX_FREELISTS_SIZE];
+
+ aligned_u64 vheap_table_dev_addr;
+
+ struct rogue_fwif_rta_ctl rta_ctl;
+
+ aligned_u64 tail_ptrs_dev_addr;
+ aligned_u64 macrotile_array_dev_addr;
+ aligned_u64 rgn_header_dev_addr;
+ aligned_u64 rtc_dev_addr;
+
+ u32 owner_geom_not_used_by_host __aligned(8);
+
+ bool geom_caches_need_zeroing __aligned(4);
+
+ struct rogue_fwif_cleanup_ctl cleanup_state __aligned(64);
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Sync checkpoints
+ ******************************************************************************
+ */
+
+#define PVR_SYNC_CHECKPOINT_UNDEF 0x000
+#define PVR_SYNC_CHECKPOINT_ACTIVE 0xac1 /* Checkpoint has not signaled. */
+#define PVR_SYNC_CHECKPOINT_SIGNALED 0x519 /* Checkpoint has signaled. */
+#define PVR_SYNC_CHECKPOINT_ERRORED 0xeff /* Checkpoint has been errored. */
+
+#include "pvr_rogue_fwif_check.h"
+
+#endif /* PVR_ROGUE_FWIF_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h
new file mode 100644
index 000000000000..51dc37e78f41
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h
@@ -0,0 +1,493 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_CHECK_H
+#define PVR_ROGUE_FWIF_CHECK_H
+
+#include <linux/build_bug.h>
+
+#define OFFSET_CHECK(type, member, offset) \
+ static_assert(offsetof(type, member) == (offset), \
+ "offsetof(" #type ", " #member ") incorrect")
+
+#define SIZE_CHECK(type, size) \
+ static_assert(sizeof(type) == (size), #type " is incorrect size")
+
+OFFSET_CHECK(struct rogue_fwif_file_info_buf, path, 0);
+OFFSET_CHECK(struct rogue_fwif_file_info_buf, info, 200);
+OFFSET_CHECK(struct rogue_fwif_file_info_buf, line_num, 400);
+SIZE_CHECK(struct rogue_fwif_file_info_buf, 408);
+
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_pointer, 0);
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_buffer_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_buffer, 8);
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, assert_buf, 16);
+SIZE_CHECK(struct rogue_fwif_tracebuf_space, 424);
+
+OFFSET_CHECK(struct rogue_fwif_tracebuf, log_type, 0);
+OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf, 8);
+OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf_size_in_dwords, 856);
+OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf_flags, 860);
+SIZE_CHECK(struct rogue_fwif_tracebuf, 864);
+
+OFFSET_CHECK(struct rogue_fw_fault_info, cr_timer, 0);
+OFFSET_CHECK(struct rogue_fw_fault_info, os_timer, 8);
+OFFSET_CHECK(struct rogue_fw_fault_info, data, 16);
+OFFSET_CHECK(struct rogue_fw_fault_info, reserved, 20);
+OFFSET_CHECK(struct rogue_fw_fault_info, fault_buf, 24);
+SIZE_CHECK(struct rogue_fw_fault_info, 432);
+
+OFFSET_CHECK(struct rogue_fwif_sysdata, config_flags, 0);
+OFFSET_CHECK(struct rogue_fwif_sysdata, config_flags_ext, 4);
+OFFSET_CHECK(struct rogue_fwif_sysdata, pow_state, 8);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_ridx, 12);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_widx, 16);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_wrap_count, 20);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_size, 24);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_drop_count, 28);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_ut, 32);
+OFFSET_CHECK(struct rogue_fwif_sysdata, first_drop_ordinal, 36);
+OFFSET_CHECK(struct rogue_fwif_sysdata, last_drop_ordinal, 40);
+OFFSET_CHECK(struct rogue_fwif_sysdata, os_runtime_flags_mirror, 44);
+OFFSET_CHECK(struct rogue_fwif_sysdata, fault_info, 80);
+OFFSET_CHECK(struct rogue_fwif_sysdata, fw_faults, 3536);
+OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_addr, 3540);
+OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_mask, 3548);
+OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_count, 3556);
+OFFSET_CHECK(struct rogue_fwif_sysdata, start_idle_time, 3568);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hwr_state_flags, 3576);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hwr_recovery_flags, 3580);
+OFFSET_CHECK(struct rogue_fwif_sysdata, fw_sys_data_flags, 3616);
+OFFSET_CHECK(struct rogue_fwif_sysdata, mc_config, 3620);
+SIZE_CHECK(struct rogue_fwif_sysdata, 3624);
+
+OFFSET_CHECK(struct rogue_fwif_slr_entry, timestamp, 0);
+OFFSET_CHECK(struct rogue_fwif_slr_entry, fw_ctx_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_slr_entry, num_ufos, 12);
+OFFSET_CHECK(struct rogue_fwif_slr_entry, ccb_name, 16);
+SIZE_CHECK(struct rogue_fwif_slr_entry, 48);
+
+OFFSET_CHECK(struct rogue_fwif_osdata, fw_os_config_flags, 0);
+OFFSET_CHECK(struct rogue_fwif_osdata, fw_sync_check_mark, 4);
+OFFSET_CHECK(struct rogue_fwif_osdata, host_sync_check_mark, 8);
+OFFSET_CHECK(struct rogue_fwif_osdata, forced_updates_requested, 12);
+OFFSET_CHECK(struct rogue_fwif_osdata, slr_log_wp, 16);
+OFFSET_CHECK(struct rogue_fwif_osdata, slr_log_first, 24);
+OFFSET_CHECK(struct rogue_fwif_osdata, slr_log, 72);
+OFFSET_CHECK(struct rogue_fwif_osdata, last_forced_update_time, 552);
+OFFSET_CHECK(struct rogue_fwif_osdata, interrupt_count, 560);
+OFFSET_CHECK(struct rogue_fwif_osdata, kccb_cmds_executed, 568);
+OFFSET_CHECK(struct rogue_fwif_osdata, power_sync_fw_addr, 572);
+OFFSET_CHECK(struct rogue_fwif_osdata, fw_os_data_flags, 576);
+SIZE_CHECK(struct rogue_fwif_osdata, 584);
+
+OFFSET_CHECK(struct rogue_bifinfo, bif_req_status, 0);
+OFFSET_CHECK(struct rogue_bifinfo, bif_mmu_status, 8);
+OFFSET_CHECK(struct rogue_bifinfo, pc_address, 16);
+OFFSET_CHECK(struct rogue_bifinfo, reserved, 24);
+SIZE_CHECK(struct rogue_bifinfo, 32);
+
+OFFSET_CHECK(struct rogue_eccinfo, fault_gpu, 0);
+SIZE_CHECK(struct rogue_eccinfo, 4);
+
+OFFSET_CHECK(struct rogue_mmuinfo, mmu_status, 0);
+OFFSET_CHECK(struct rogue_mmuinfo, pc_address, 16);
+OFFSET_CHECK(struct rogue_mmuinfo, reserved, 24);
+SIZE_CHECK(struct rogue_mmuinfo, 32);
+
+OFFSET_CHECK(struct rogue_pollinfo, thread_num, 0);
+OFFSET_CHECK(struct rogue_pollinfo, cr_poll_addr, 4);
+OFFSET_CHECK(struct rogue_pollinfo, cr_poll_mask, 8);
+OFFSET_CHECK(struct rogue_pollinfo, cr_poll_last_value, 12);
+OFFSET_CHECK(struct rogue_pollinfo, reserved, 16);
+SIZE_CHECK(struct rogue_pollinfo, 24);
+
+OFFSET_CHECK(struct rogue_tlbinfo, bad_addr, 0);
+OFFSET_CHECK(struct rogue_tlbinfo, entry_lo, 4);
+SIZE_CHECK(struct rogue_tlbinfo, 8);
+
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_data, 0);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_timer, 32);
+OFFSET_CHECK(struct rogue_hwrinfo, os_timer, 40);
+OFFSET_CHECK(struct rogue_hwrinfo, frame_num, 48);
+OFFSET_CHECK(struct rogue_hwrinfo, pid, 52);
+OFFSET_CHECK(struct rogue_hwrinfo, active_hwrt_data, 56);
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_number, 60);
+OFFSET_CHECK(struct rogue_hwrinfo, event_status, 64);
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_recovery_flags, 68);
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_type, 72);
+OFFSET_CHECK(struct rogue_hwrinfo, dm, 76);
+OFFSET_CHECK(struct rogue_hwrinfo, core_id, 80);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_of_kick, 88);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_hw_reset_start, 96);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_hw_reset_finish, 104);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_freelist_ready, 112);
+OFFSET_CHECK(struct rogue_hwrinfo, reserved, 120);
+SIZE_CHECK(struct rogue_hwrinfo, 136);
+
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_info, 0);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_counter, 2176);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, write_index, 2180);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, dd_req_count, 2184);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_info_buf_flags, 2188);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_locked_up_count, 2192);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_overran_count, 2228);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_recovered_count, 2264);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_false_detect_count, 2300);
+SIZE_CHECK(struct rogue_fwif_hwrinfobuf, 2336);
+
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, pc_dev_paddr, 0);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, page_cat_base_reg_set, 8);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, breakpoint_addr, 12);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, bp_handler_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, breakpoint_ctl, 20);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, fw_mem_ctx_flags, 24);
+SIZE_CHECK(struct rogue_fwif_fwmemcontext, 32);
+
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vdm_call_stack_pointer, 0);
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vdm_call_stack_pointer_init, 8);
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vbs_so_prim, 16);
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_current_idx, 32);
+SIZE_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, 40);
+
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state, geom_core, 0);
+SIZE_CHECK(struct rogue_fwif_geom_ctx_state, 160);
+
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_pm_deallocated_mask_status, 0);
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_dm_pds_mtilefree_status, 4);
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, ctx_state_flags, 8);
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_isp_store, 12);
+SIZE_CHECK(struct rogue_fwif_frag_ctx_state, 16);
+
+OFFSET_CHECK(struct rogue_fwif_compute_ctx_state, ctx_state_flags, 0);
+SIZE_CHECK(struct rogue_fwif_compute_ctx_state, 4);
+
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccbctl_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccb_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccb_meta_dma_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, context_state_addr, 24);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, fw_com_ctx_flags, 28);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, priority, 32);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, priority_seq_num, 36);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, rf_cmd_addr, 40);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_pending, 44);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_stores, 48);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_out_of_memory, 52);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_partial_renders, 56);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, dm, 60);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, wait_signal_address, 64);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, wait_signal_node, 72);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, buf_stalled_node, 80);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, cbuf_queue_ctrl_addr, 88);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, robustness_address, 96);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, max_deadline_ms, 104);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, read_offset_needs_reset, 108);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, waiting_node, 112);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, run_node, 120);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, last_failed_ufo, 128);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, fw_mem_context_fw_addr, 136);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, server_common_context_id, 140);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, pid, 144);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, geom_oom_disabled, 148);
+SIZE_CHECK(struct rogue_fwif_fwcommoncontext, 152);
+
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, write_offset, 0);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, padding, 4);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, read_offset, 128);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, wrap_mask, 132);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, cmd_size, 136);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, padding2, 140);
+SIZE_CHECK(struct rogue_fwif_ccb_ctl, 144);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, context_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, client_woff_update, 4);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, client_wrap_mask_update, 8);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, num_cleanup_ctl, 12);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, cleanup_ctl_fw_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, work_est_cmd_header_offset, 28);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd_kick_data, 32);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, geom_cmd_kick_data, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, frag_cmd_kick_data, 32);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, 64);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, context_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, ccb_fence_offset, 4);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cleanup_request, cleanup_type, 0);
+OFFSET_CHECK(struct rogue_fwif_cleanup_request, cleanup_data, 4);
+SIZE_CHECK(struct rogue_fwif_cleanup_request, 8);
+
+OFFSET_CHECK(struct rogue_fwif_power_request, pow_type, 0);
+OFFSET_CHECK(struct rogue_fwif_power_request, power_req_data, 4);
+SIZE_CHECK(struct rogue_fwif_power_request, 8);
+
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, context_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, inval, 4);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, dm_context, 8);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, address, 16);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, size, 24);
+SIZE_CHECK(struct rogue_fwif_slcflushinvaldata, 32);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl, opcode, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl, mask, 8);
+SIZE_CHECK(struct rogue_fwif_hwperf_ctrl, 16);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_enable_blks, num_blocks, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_enable_blks, block_configs_fw_addr, 4);
+SIZE_CHECK(struct rogue_fwif_hwperf_config_enable_blks, 8);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_da_blks, num_blocks, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_da_blks, block_configs_fw_addr, 4);
+SIZE_CHECK(struct rogue_fwif_hwperf_config_da_blks, 8);
+
+OFFSET_CHECK(struct rogue_fwif_coreclkspeedchange_data, new_clock_speed, 0);
+SIZE_CHECK(struct rogue_fwif_coreclkspeedchange_data, 4);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, enable, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, num_blocks, 4);
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, block_ids, 8);
+SIZE_CHECK(struct rogue_fwif_hwperf_ctrl_blks, 40);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, custom_block, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, num_counters, 2);
+OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, custom_counter_ids_fw_addr, 4);
+SIZE_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, 8);
+
+OFFSET_CHECK(struct rogue_fwif_zsbuffer_backing_data, zs_buffer_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_zsbuffer_backing_data, done, 4);
+SIZE_CHECK(struct rogue_fwif_zsbuffer_backing_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, freelist_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, delta_pages, 4);
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, new_pages, 8);
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, ready_pages, 12);
+SIZE_CHECK(struct rogue_fwif_freelist_gs_data, 16);
+
+OFFSET_CHECK(struct rogue_fwif_freelists_reconstruction_data, freelist_count, 0);
+OFFSET_CHECK(struct rogue_fwif_freelists_reconstruction_data, freelist_ids, 4);
+SIZE_CHECK(struct rogue_fwif_freelists_reconstruction_data, 76);
+
+OFFSET_CHECK(struct rogue_fwif_write_offset_update_data, context_fw_addr, 0);
+SIZE_CHECK(struct rogue_fwif_write_offset_update_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd, cmd_type, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd, kccb_flags, 4);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd, cmd_data, 8);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd, 88);
+
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, server_common_context_id, 0);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, reset_reason, 4);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, dm, 8);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, reset_job_ref, 12);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, flags, 16);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, pc_address, 24);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, fault_address, 32);
+SIZE_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, 40);
+
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_fw_pagefault_data, fw_fault_addr, 0);
+SIZE_CHECK(struct rogue_fwif_fwccb_cmd_fw_pagefault_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, cmd_type, 0);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, fwccb_flags, 4);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, cmd_data, 8);
+SIZE_CHECK(struct rogue_fwif_fwccb_cmd, 88);
+
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, cmd_type, 0);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, cmd_size, 4);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, ext_job_ref, 8);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, int_job_ref, 12);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, work_est_kick_data, 16);
+SIZE_CHECK(struct rogue_fwif_ccb_cmd_header, 40);
+
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, active_pm_latency_ms, 0);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, runtime_cfg_flags, 4);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, active_pm_latency_persistant, 8);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, core_clock_speed, 12);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, default_dusts_num_init, 16);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, phr_mode, 20);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, hcs_deadline_ms, 24);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, wdg_period_us, 28);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, osid_priority, 32);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, hwperf_buf_fw_addr, 64);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, padding, 68);
+SIZE_CHECK(struct rogue_fwif_runtime_cfg, 72);
+
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, connection_fw_state, 0);
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, connection_os_state, 4);
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, alive_fw_token, 8);
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, alive_os_token, 12);
+SIZE_CHECK(struct rogue_fwif_connection_ctl, 16);
+
+OFFSET_CHECK(struct rogue_fwif_compchecks_bvnc, layout_version, 0);
+OFFSET_CHECK(struct rogue_fwif_compchecks_bvnc, bvnc, 8);
+SIZE_CHECK(struct rogue_fwif_compchecks_bvnc, 16);
+
+OFFSET_CHECK(struct rogue_fwif_init_options, os_count_support, 0);
+SIZE_CHECK(struct rogue_fwif_init_options, 8);
+
+OFFSET_CHECK(struct rogue_fwif_compchecks, hw_bvnc, 0);
+OFFSET_CHECK(struct rogue_fwif_compchecks, fw_bvnc, 16);
+OFFSET_CHECK(struct rogue_fwif_compchecks, fw_processor_version, 32);
+OFFSET_CHECK(struct rogue_fwif_compchecks, ddk_version, 36);
+OFFSET_CHECK(struct rogue_fwif_compchecks, ddk_build, 40);
+OFFSET_CHECK(struct rogue_fwif_compchecks, build_options, 44);
+OFFSET_CHECK(struct rogue_fwif_compchecks, init_options, 48);
+OFFSET_CHECK(struct rogue_fwif_compchecks, updated, 56);
+SIZE_CHECK(struct rogue_fwif_compchecks, 64);
+
+OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccbctl_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccb_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccb_rtn_slots_fw_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_osinit, firmware_ccbctl_fw_addr, 12);
+OFFSET_CHECK(struct rogue_fwif_osinit, firmware_ccb_fw_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_osinit, work_est_firmware_ccbctl_fw_addr, 20);
+OFFSET_CHECK(struct rogue_fwif_osinit, work_est_firmware_ccb_fw_addr, 24);
+OFFSET_CHECK(struct rogue_fwif_osinit, rogue_fwif_hwr_info_buf_ctl_fw_addr, 28);
+OFFSET_CHECK(struct rogue_fwif_osinit, hwr_debug_dump_limit, 32);
+OFFSET_CHECK(struct rogue_fwif_osinit, fw_os_data_fw_addr, 36);
+OFFSET_CHECK(struct rogue_fwif_osinit, rogue_comp_checks, 40);
+SIZE_CHECK(struct rogue_fwif_osinit, 104);
+
+OFFSET_CHECK(struct rogue_fwif_sigbuf_ctl, buffer_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_sigbuf_ctl, left_size_in_regs, 4);
+SIZE_CHECK(struct rogue_fwif_sigbuf_ctl, 8);
+
+OFFSET_CHECK(struct pdvfs_opp, volt, 0);
+OFFSET_CHECK(struct pdvfs_opp, freq, 4);
+SIZE_CHECK(struct pdvfs_opp, 8);
+
+OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, opp_values, 0);
+OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, min_opp_point, 128);
+OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, max_opp_point, 132);
+SIZE_CHECK(struct rogue_fwif_pdvfs_opp, 136);
+
+OFFSET_CHECK(struct rogue_fwif_counter_dump_ctl, buffer_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_counter_dump_ctl, size_in_dwords, 4);
+SIZE_CHECK(struct rogue_fwif_counter_dump_ctl, 8);
+
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_string, 0);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_km_feature_flags, 24);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, num_bvnc_blocks, 28);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_gpu_cores, 30);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_blocks, 32);
+SIZE_CHECK(struct rogue_hwperf_bvnc, 160);
+
+OFFSET_CHECK(struct rogue_fwif_sysinit, fault_phys_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_sysinit, pds_exec_base, 8);
+OFFSET_CHECK(struct rogue_fwif_sysinit, usc_exec_base, 16);
+OFFSET_CHECK(struct rogue_fwif_sysinit, fbcdc_state_table_base, 24);
+OFFSET_CHECK(struct rogue_fwif_sysinit, fbcdc_large_state_table_base, 32);
+OFFSET_CHECK(struct rogue_fwif_sysinit, texture_heap_base, 40);
+OFFSET_CHECK(struct rogue_fwif_sysinit, hw_perf_filter, 48);
+OFFSET_CHECK(struct rogue_fwif_sysinit, slc3_fence_dev_addr, 56);
+OFFSET_CHECK(struct rogue_fwif_sysinit, tpu_trilinear_frac_mask, 64);
+OFFSET_CHECK(struct rogue_fwif_sysinit, sigbuf_ctl, 80);
+OFFSET_CHECK(struct rogue_fwif_sysinit, pdvfs_opp_info, 152);
+OFFSET_CHECK(struct rogue_fwif_sysinit, coremem_data_store, 288);
+OFFSET_CHECK(struct rogue_fwif_sysinit, counter_dump_ctl, 304);
+OFFSET_CHECK(struct rogue_fwif_sysinit, filter_flags, 312);
+OFFSET_CHECK(struct rogue_fwif_sysinit, runtime_cfg_fw_addr, 316);
+OFFSET_CHECK(struct rogue_fwif_sysinit, trace_buf_ctl_fw_addr, 320);
+OFFSET_CHECK(struct rogue_fwif_sysinit, fw_sys_data_fw_addr, 324);
+OFFSET_CHECK(struct rogue_fwif_sysinit, gpu_util_fw_cb_ctl_fw_addr, 328);
+OFFSET_CHECK(struct rogue_fwif_sysinit, reg_cfg_fw_addr, 332);
+OFFSET_CHECK(struct rogue_fwif_sysinit, hwperf_ctl_fw_addr, 336);
+OFFSET_CHECK(struct rogue_fwif_sysinit, align_checks, 340);
+OFFSET_CHECK(struct rogue_fwif_sysinit, initial_core_clock_speed, 344);
+OFFSET_CHECK(struct rogue_fwif_sysinit, active_pm_latency_ms, 348);
+OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_started, 352);
+OFFSET_CHECK(struct rogue_fwif_sysinit, marker_val, 356);
+OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_started_timestamp, 360);
+OFFSET_CHECK(struct rogue_fwif_sysinit, jones_disable_mask, 364);
+OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_perf, 368);
+OFFSET_CHECK(struct rogue_fwif_sysinit, core_clock_rate_fw_addr, 372);
+OFFSET_CHECK(struct rogue_fwif_sysinit, gpio_validation_mode, 376);
+OFFSET_CHECK(struct rogue_fwif_sysinit, bvnc_km_feature_flags, 380);
+OFFSET_CHECK(struct rogue_fwif_sysinit, tfbc_compression_control, 540);
+SIZE_CHECK(struct rogue_fwif_sysinit, 544);
+
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, time_corr, 0);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, time_corr_seq_count, 10240);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, gpu_util_flags, 10244);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, last_word, 10248);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, stats_counters, 10256);
+SIZE_CHECK(struct rogue_fwif_gpu_util_fwcb, 10280);
+
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, render_target_index, 0);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, current_render_target, 4);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, active_render_targets, 8);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, cumul_active_render_targets, 12);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, valid_render_targets_fw_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, rta_num_partial_renders_fw_addr, 20);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, max_rts, 24);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, rta_ctl_flags, 28);
+SIZE_CHECK(struct rogue_fwif_rta_ctl, 32);
+
+OFFSET_CHECK(struct rogue_fwif_freelist, freelist_dev_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_freelist, current_dev_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_freelist, current_stack_top, 16);
+OFFSET_CHECK(struct rogue_fwif_freelist, max_pages, 20);
+OFFSET_CHECK(struct rogue_fwif_freelist, grow_pages, 24);
+OFFSET_CHECK(struct rogue_fwif_freelist, current_pages, 28);
+OFFSET_CHECK(struct rogue_fwif_freelist, allocated_page_count, 32);
+OFFSET_CHECK(struct rogue_fwif_freelist, allocated_mmu_page_count, 36);
+OFFSET_CHECK(struct rogue_fwif_freelist, freelist_id, 40);
+OFFSET_CHECK(struct rogue_fwif_freelist, grow_pending, 44);
+OFFSET_CHECK(struct rogue_fwif_freelist, ready_pages, 48);
+OFFSET_CHECK(struct rogue_fwif_freelist, freelist_flags, 52);
+OFFSET_CHECK(struct rogue_fwif_freelist, pm_global_pb, 56);
+SIZE_CHECK(struct rogue_fwif_freelist, 64);
+
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, geom_caches_need_zeroing, 0);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, screen_pixel_max, 4);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, multi_sample_ctl, 8);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, flipped_multi_sample_ctl, 16);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, tpc_stride, 24);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, tpc_size, 28);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_screen, 32);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, mtile_stride, 36);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, teaa, 40);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_mtile1, 44);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_mtile2, 48);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_lower_x, 52);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_lower_y, 56);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_upper_x, 60);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_upper_y, 64);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_scale_x, 68);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_scale_y, 72);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, rgn_header_size, 76);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_mtile_size, 80);
+SIZE_CHECK(struct rogue_fwif_hwrtdata_common, 88);
+
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_mlist_dev_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, vce_cat_base, 8);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, vce_last_cat_base, 40);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, te_cat_base, 72);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, te_last_cat_base, 104);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, alist_cat_base, 136);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, alist_last_cat_base, 144);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_alist_stack_pointer, 152);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_mlist_stack_pointer, 160);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, hwrt_data_common_fw_addr, 164);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, hwrt_data_flags, 168);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, state, 172);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, freelists_fw_addr, 176);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, freelist_hwr_snapshot, 188);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, vheap_table_dev_addr, 200);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, rta_ctl, 208);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, tail_ptrs_dev_addr, 240);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, macrotile_array_dev_addr, 248);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, rgn_header_dev_addr, 256);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, rtc_dev_addr, 264);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, owner_geom_not_used_by_host, 272);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, geom_caches_need_zeroing, 276);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, cleanup_state, 320);
+SIZE_CHECK(struct rogue_fwif_hwrtdata, 384);
+
+OFFSET_CHECK(struct rogue_fwif_sync_checkpoint, state, 0);
+OFFSET_CHECK(struct rogue_fwif_sync_checkpoint, fw_ref_count, 4);
+SIZE_CHECK(struct rogue_fwif_sync_checkpoint, 8);
+
+#endif /* PVR_ROGUE_FWIF_CHECK_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h
new file mode 100644
index 000000000000..6e224400083a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_CLIENT_H
+#define PVR_ROGUE_FWIF_CLIENT_H
+
+#include <linux/bits.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_fwif_shared.h"
+
+/*
+ * Page size used for Parameter Management.
+ */
+#define ROGUE_PM_PAGE_SIZE SZ_4K
+
+/*
+ * Minimum/Maximum PB size.
+ *
+ * Base page size is dependent on core:
+ * S6/S6XT/S7 = 50 pages
+ * S8XE = 40 pages
+ * S8XE with BRN66011 fixed = 25 pages
+ *
+ * Minimum PB = Base Pages + (NUM_TE_PIPES-1)*16K + (NUM_VCE_PIPES-1)*64K +
+ * IF_PM_PREALLOC(NUM_TE_PIPES*16K + NUM_VCE_PIPES*16K)
+ *
+ * Maximum PB size must ensure that no PM address space can be fully used,
+ * because if the full address space was used it would wrap and corrupt itself.
+ * Since there are two freelists (local is always minimum sized) this can be
+ * described as following three conditions being met:
+ *
+ * (Minimum PB + Maximum PB) < ALIST PM address space size (16GB)
+ * (Minimum PB + Maximum PB) < TE PM address space size (16GB) / NUM_TE_PIPES
+ * (Minimum PB + Maximum PB) < VCE PM address space size (16GB) / NUM_VCE_PIPES
+ *
+ * Since the max of NUM_TE_PIPES and NUM_VCE_PIPES is 4, we have a hard limit
+ * of 4GB minus the Minimum PB. For convenience we take the smaller power-of-2
+ * value of 2GB. This is far more than any current applications use.
+ */
+#define ROGUE_PM_MAX_FREELIST_SIZE SZ_2G
+
+/*
+ * Flags supported by the geometry DM command i.e. &struct rogue_fwif_cmd_geom.
+ */
+
+#define ROGUE_GEOM_FLAGS_FIRSTKICK BIT_MASK(0)
+#define ROGUE_GEOM_FLAGS_LASTKICK BIT_MASK(1)
+/* Use single core in a multi core setup. */
+#define ROGUE_GEOM_FLAGS_SINGLE_CORE BIT_MASK(3)
+
+/*
+ * Flags supported by the fragment DM command i.e. &struct rogue_fwif_cmd_frag.
+ */
+
+/* Use single core in a multi core setup. */
+#define ROGUE_FRAG_FLAGS_SINGLE_CORE BIT_MASK(3)
+/* Indicates whether this render produces visibility results. */
+#define ROGUE_FRAG_FLAGS_GET_VIS_RESULTS BIT_MASK(5)
+/* Indicates whether a depth buffer is present. */
+#define ROGUE_FRAG_FLAGS_DEPTHBUFFER BIT_MASK(7)
+/* Indicates whether a stencil buffer is present. */
+#define ROGUE_FRAG_FLAGS_STENCILBUFFER BIT_MASK(8)
+/* Disable pixel merging for this render. */
+#define ROGUE_FRAG_FLAGS_DISABLE_PIXELMERGE BIT_MASK(15)
+/* Indicates whether a scratch buffer is present. */
+#define ROGUE_FRAG_FLAGS_SCRATCHBUFFER BIT_MASK(19)
+/* Disallow compute overlapped with this render. */
+#define ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP BIT_MASK(26)
+
+/*
+ * Flags supported by the compute DM command i.e. &struct rogue_fwif_cmd_compute.
+ */
+
+#define ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP BIT_MASK(2)
+/*!< Use single core in a multi core setup. */
+#define ROGUE_COMPUTE_FLAG_SINGLE_CORE BIT_MASK(5)
+
+/*
+ * Flags supported by the transfer DM command i.e. &struct rogue_fwif_cmd_transfer.
+ */
+
+/*!< Use single core in a multi core setup. */
+#define ROGUE_TRANSFER_FLAGS_SINGLE_CORE BIT_MASK(1)
+
+/*
+ ************************************************
+ * Parameter/HWRTData control structures.
+ ************************************************
+ */
+
+/*
+ * Configuration registers which need to be loaded by the firmware before a geometry
+ * job can be started.
+ */
+struct rogue_fwif_geom_regs {
+ u64 vdm_ctrl_stream_base;
+ u64 tpu_border_colour_table;
+
+ /* Only used when feature VDM_DRAWINDIRECT present. */
+ u64 vdm_draw_indirect0;
+ /* Only used when feature VDM_DRAWINDIRECT present. */
+ u32 vdm_draw_indirect1;
+
+ u32 ppp_ctrl;
+ u32 te_psg;
+ /* Only used when BRN 49927 present. */
+ u32 tpu;
+
+ u32 vdm_context_resume_task0_size;
+ /* Only used when feature VDM_OBJECT_LEVEL_LLS present. */
+ u32 vdm_context_resume_task3_size;
+
+ /* Only used when BRN 56279 or BRN 67381 present. */
+ u32 pds_ctrl;
+
+ u32 view_idx;
+
+ /* Only used when feature TESSELLATION present */
+ u32 pds_coeff_free_prog;
+
+ u32 padding;
+};
+
+/* Only used when BRN 44455 or BRN 63027 present. */
+struct rogue_fwif_dummy_rgnhdr_init_geom_regs {
+ u64 te_psgregion_addr;
+};
+
+/*
+ * Represents a geometry command that can be used to tile a whole scene's objects as
+ * per TA behavior.
+ */
+struct rogue_fwif_cmd_geom {
+ /*
+ * rogue_fwif_cmd_geom_frag_shared field must always be at the beginning of the
+ * struct.
+ *
+ * The command struct (rogue_fwif_cmd_geom) is shared between Client and
+ * Firmware. Kernel is unable to perform read/write operations on the
+ * command struct, the SHARED region is the only exception from this rule.
+ * This region must be the first member so that Kernel can easily access it.
+ * For more info, see rogue_fwif_cmd_geom_frag_shared definition.
+ */
+ struct rogue_fwif_cmd_geom_frag_shared cmd_shared;
+
+ struct rogue_fwif_geom_regs regs __aligned(8);
+ u32 flags __aligned(8);
+
+ /*
+ * Holds the geometry/fragment fence value to allow the fragment partial render command
+ * to go through.
+ */
+ struct rogue_fwif_ufo partial_render_geom_frag_fence;
+
+ /* Only used when BRN 44455 or BRN 63027 present. */
+ struct rogue_fwif_dummy_rgnhdr_init_geom_regs dummy_rgnhdr_init_geom_regs __aligned(8);
+
+ /* Only used when BRN 61484 or BRN 66333 present. */
+ u32 brn61484_66333_live_rt;
+
+ u32 padding;
+};
+
+/*
+ * Configuration registers which need to be loaded by the firmware before ISP
+ * can be started.
+ */
+struct rogue_fwif_frag_regs {
+ u32 usc_pixel_output_ctrl;
+
+#define ROGUE_MAXIMUM_OUTPUT_REGISTERS_PER_PIXEL 8U
+ u32 usc_clear_register[ROGUE_MAXIMUM_OUTPUT_REGISTERS_PER_PIXEL];
+
+ u32 isp_bgobjdepth;
+ u32 isp_bgobjvals;
+ u32 isp_aa;
+ /* Only used when feature S7_TOP_INFRASTRUCTURE present. */
+ u32 isp_xtp_pipe_enable;
+
+ u32 isp_ctl;
+
+ /* Only used when BRN 49927 present. */
+ u32 tpu;
+
+ u32 event_pixel_pds_info;
+
+ /* Only used when feature CLUSTER_GROUPING present. */
+ u32 pixel_phantom;
+
+ u32 view_idx;
+
+ u32 event_pixel_pds_data;
+
+ /* Only used when BRN 65101 present. */
+ u32 brn65101_event_pixel_pds_data;
+
+ /* Only used when feature GPU_MULTICORE_SUPPORT or BRN 47217 present. */
+ u32 isp_oclqry_stride;
+
+ /* Only used when feature ZLS_SUBTILE present. */
+ u32 isp_zls_pixels;
+
+ /* Only used when feature ISP_ZLS_D24_S8_PACKING_OGL_MODE present. */
+ u32 rgx_cr_blackpearl_fix;
+
+ /* All values below the ALIGN(8) must be 64 bit. */
+ aligned_u64 isp_scissor_base;
+ u64 isp_dbias_base;
+ u64 isp_oclqry_base;
+ u64 isp_zlsctl;
+ u64 isp_zload_store_base;
+ u64 isp_stencil_load_store_base;
+
+ /*
+ * Only used when feature FBCDC_ALGORITHM present and value < 3 or feature
+ * FB_CDC_V4 present. Additionally, BRNs 48754, 60227, 72310 and 72311 must
+ * not be present.
+ */
+ u64 fb_cdc_zls;
+
+#define ROGUE_PBE_WORDS_REQUIRED_FOR_RENDERS 3U
+ u64 pbe_word[8U][ROGUE_PBE_WORDS_REQUIRED_FOR_RENDERS];
+ u64 tpu_border_colour_table;
+ u64 pds_bgnd[3U];
+
+ /* Only used when BRN 65101 present. */
+ u64 pds_bgnd_brn65101[3U];
+
+ u64 pds_pr_bgnd[3U];
+
+ /* Only used when BRN 62850 or 62865 present. */
+ u64 isp_dummy_stencil_store_base;
+
+ /* Only used when BRN 66193 present. */
+ u64 isp_dummy_depth_store_base;
+
+ /* Only used when BRN 67182 present. */
+ u32 rgnhdr_single_rt_size;
+ /* Only used when BRN 67182 present. */
+ u32 rgnhdr_scratch_offset;
+};
+
+struct rogue_fwif_cmd_frag {
+ struct rogue_fwif_cmd_geom_frag_shared cmd_shared __aligned(8);
+
+ struct rogue_fwif_frag_regs regs __aligned(8);
+ /* command control flags. */
+ u32 flags;
+ /* Stride IN BYTES for Z-Buffer in case of RTAs. */
+ u32 zls_stride;
+ /* Stride IN BYTES for S-Buffer in case of RTAs. */
+ u32 sls_stride;
+
+ /* Only used if feature GPU_MULTICORE_SUPPORT present. */
+ u32 execute_count;
+};
+
+/*
+ * Configuration registers which need to be loaded by the firmware before CDM
+ * can be started.
+ */
+struct rogue_fwif_compute_regs {
+ u64 tpu_border_colour_table;
+
+ /* Only used when feature CDM_USER_MODE_QUEUE present. */
+ u64 cdm_cb_queue;
+
+ /* Only used when feature CDM_USER_MODE_QUEUE present. */
+ u64 cdm_cb_base;
+ /* Only used when feature CDM_USER_MODE_QUEUE present. */
+ u64 cdm_cb;
+
+ /* Only used when feature CDM_USER_MODE_QUEUE is not present. */
+ u64 cdm_ctrl_stream_base;
+
+ u64 cdm_context_state_base_addr;
+
+ /* Only used when BRN 49927 is present. */
+ u32 tpu;
+ u32 cdm_resume_pds1;
+
+ /* Only used when feature COMPUTE_MORTON_CAPABLE present. */
+ u32 cdm_item;
+
+ /* Only used when feature CLUSTER_GROUPING present. */
+ u32 compute_cluster;
+
+ /* Only used when feature TPU_DM_GLOBAL_REGISTERS present. */
+ u32 tpu_tag_cdm_ctrl;
+
+ u32 padding;
+};
+
+struct rogue_fwif_cmd_compute {
+ /* Common command attributes */
+ struct rogue_fwif_cmd_common common __aligned(8);
+
+ /* CDM registers */
+ struct rogue_fwif_compute_regs regs;
+
+ /* Control flags */
+ u32 flags __aligned(8);
+
+ /* Only used when feature UNIFIED_STORE_VIRTUAL_PARTITIONING present. */
+ u32 num_temp_regions;
+
+ /* Only used when feature CDM_USER_MODE_QUEUE present. */
+ u32 stream_start_offset;
+
+ /* Only used when feature GPU_MULTICORE_SUPPORT present. */
+ u32 execute_count;
+};
+
+struct rogue_fwif_transfer_regs {
+ /*
+ * All 32 bit values should be added in the top section. This then requires only a
+ * single RGXFW_ALIGN to align all the 64 bit values in the second section.
+ */
+ u32 isp_bgobjvals;
+
+ u32 usc_pixel_output_ctrl;
+ u32 usc_clear_register0;
+ u32 usc_clear_register1;
+ u32 usc_clear_register2;
+ u32 usc_clear_register3;
+
+ u32 isp_mtile_size;
+ u32 isp_render_origin;
+ u32 isp_ctl;
+
+ /* Only used when feature S7_TOP_INFRASTRUCTURE present. */
+ u32 isp_xtp_pipe_enable;
+ u32 isp_aa;
+
+ u32 event_pixel_pds_info;
+
+ u32 event_pixel_pds_code;
+ u32 event_pixel_pds_data;
+
+ u32 isp_render;
+ u32 isp_rgn;
+
+ /* Only used when feature GPU_MULTICORE_SUPPORT present. */
+ u32 frag_screen;
+
+ /* All values below the aligned_u64 must be 64 bit. */
+ aligned_u64 pds_bgnd0_base;
+ u64 pds_bgnd1_base;
+ u64 pds_bgnd3_sizeinfo;
+
+ u64 isp_mtile_base;
+#define ROGUE_PBE_WORDS_REQUIRED_FOR_TQS 3
+ /* TQ_MAX_RENDER_TARGETS * PBE_STATE_SIZE */
+ u64 pbe_wordx_mrty[3U * ROGUE_PBE_WORDS_REQUIRED_FOR_TQS];
+};
+
+struct rogue_fwif_cmd_transfer {
+ /* Common command attributes */
+ struct rogue_fwif_cmd_common common __aligned(8);
+
+ struct rogue_fwif_transfer_regs regs __aligned(8);
+
+ u32 flags;
+
+ u32 padding;
+};
+
+#include "pvr_rogue_fwif_client_check.h"
+
+#endif /* PVR_ROGUE_FWIF_CLIENT_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h
new file mode 100644
index 000000000000..54aa4474163e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_CLIENT_CHECK_H
+#define PVR_ROGUE_FWIF_CLIENT_CHECK_H
+
+#include <linux/build_bug.h>
+
+#define OFFSET_CHECK(type, member, offset) \
+ static_assert(offsetof(type, member) == (offset), \
+ "offsetof(" #type ", " #member ") incorrect")
+
+#define SIZE_CHECK(type, size) \
+ static_assert(sizeof(type) == (size), #type " is incorrect size")
+
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_ctrl_stream_base, 0);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, tpu_border_colour_table, 8);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_draw_indirect0, 16);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_draw_indirect1, 24);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, ppp_ctrl, 28);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, te_psg, 32);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, tpu, 36);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_context_resume_task0_size, 40);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_context_resume_task3_size, 44);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, pds_ctrl, 48);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, view_idx, 52);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, pds_coeff_free_prog, 56);
+SIZE_CHECK(struct rogue_fwif_geom_regs, 64);
+
+OFFSET_CHECK(struct rogue_fwif_dummy_rgnhdr_init_geom_regs, te_psgregion_addr, 0);
+SIZE_CHECK(struct rogue_fwif_dummy_rgnhdr_init_geom_regs, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, cmd_shared, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, regs, 16);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, flags, 80);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, partial_render_geom_frag_fence, 84);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, dummy_rgnhdr_init_geom_regs, 96);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, brn61484_66333_live_rt, 104);
+SIZE_CHECK(struct rogue_fwif_cmd_geom, 112);
+
+OFFSET_CHECK(struct rogue_fwif_frag_regs, usc_pixel_output_ctrl, 0);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, usc_clear_register, 4);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_bgobjdepth, 36);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_bgobjvals, 40);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_aa, 44);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_xtp_pipe_enable, 48);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_ctl, 52);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, tpu, 56);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, event_pixel_pds_info, 60);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pixel_phantom, 64);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, view_idx, 68);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, event_pixel_pds_data, 72);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, brn65101_event_pixel_pds_data, 76);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_oclqry_stride, 80);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zls_pixels, 84);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, rgx_cr_blackpearl_fix, 88);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_scissor_base, 96);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dbias_base, 104);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_oclqry_base, 112);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zlsctl, 120);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zload_store_base, 128);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_stencil_load_store_base, 136);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, fb_cdc_zls, 144);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pbe_word, 152);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, tpu_border_colour_table, 344);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_bgnd, 352);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_bgnd_brn65101, 376);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_pr_bgnd, 400);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dummy_stencil_store_base, 424);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dummy_depth_store_base, 432);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, rgnhdr_single_rt_size, 440);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, rgnhdr_scratch_offset, 444);
+SIZE_CHECK(struct rogue_fwif_frag_regs, 448);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, cmd_shared, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, regs, 16);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, flags, 464);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, zls_stride, 468);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, sls_stride, 472);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, execute_count, 476);
+SIZE_CHECK(struct rogue_fwif_cmd_frag, 480);
+
+OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu_border_colour_table, 0);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb_queue, 8);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb_base, 16);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb, 24);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_ctrl_stream_base, 32);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_context_state_base_addr, 40);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu, 48);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_resume_pds1, 52);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_item, 56);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, compute_cluster, 60);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu_tag_cdm_ctrl, 64);
+SIZE_CHECK(struct rogue_fwif_compute_regs, 72);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, common, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, regs, 8);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, flags, 80);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, num_temp_regions, 84);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, stream_start_offset, 88);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, execute_count, 92);
+SIZE_CHECK(struct rogue_fwif_cmd_compute, 96);
+
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_bgobjvals, 0);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_pixel_output_ctrl, 4);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register0, 8);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register1, 12);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register2, 16);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register3, 20);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_mtile_size, 24);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_render_origin, 28);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_ctl, 32);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_xtp_pipe_enable, 36);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_aa, 40);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_info, 44);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_code, 48);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_data, 52);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_render, 56);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_rgn, 60);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, frag_screen, 64);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd0_base, 72);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd1_base, 80);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd3_sizeinfo, 88);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_mtile_base, 96);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pbe_wordx_mrty, 104);
+SIZE_CHECK(struct rogue_fwif_transfer_regs, 176);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_transfer, common, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_transfer, regs, 8);
+OFFSET_CHECK(struct rogue_fwif_cmd_transfer, flags, 184);
+SIZE_CHECK(struct rogue_fwif_cmd_transfer, 192);
+
+#endif /* PVR_ROGUE_FWIF_CLIENT_CHECK_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h
new file mode 100644
index 000000000000..6ebb95ba98a6
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_COMMON_H
+#define PVR_ROGUE_FWIF_COMMON_H
+
+#include <linux/build_bug.h>
+
+/*
+ * This macro represents a mask of LSBs that must be zero on data structure
+ * sizes and offsets to ensure they are 8-byte granular on types shared between
+ * the FW and host driver.
+ */
+#define PVR_FW_ALIGNMENT_LSB 7U
+
+/* Macro to test structure size alignment. */
+#define PVR_FW_STRUCT_SIZE_ASSERT(_a) \
+ static_assert((sizeof(_a) & PVR_FW_ALIGNMENT_LSB) == 0U, \
+ "Size of " #_a " is not properly aligned")
+
+/* The master definition for data masters known to the firmware. */
+
+#define PVR_FWIF_DM_GP (0)
+/* Either TDM or 2D DM is present. */
+/* When the 'tla' feature is present in the hw (as per @pvr_device_features). */
+#define PVR_FWIF_DM_2D (1)
+/*
+ * When the 'fastrender_dm' feature is present in the hw (as per
+ * @pvr_device_features).
+ */
+#define PVR_FWIF_DM_TDM (1)
+
+#define PVR_FWIF_DM_GEOM (2)
+#define PVR_FWIF_DM_FRAG (3)
+#define PVR_FWIF_DM_CDM (4)
+#define PVR_FWIF_DM_RAY (5)
+#define PVR_FWIF_DM_GEOM2 (6)
+#define PVR_FWIF_DM_GEOM3 (7)
+#define PVR_FWIF_DM_GEOM4 (8)
+
+#define PVR_FWIF_DM_LAST PVR_FWIF_DM_GEOM4
+
+/* Maximum number of DM in use: GP, 2D/TDM, GEOM, 3D, CDM, RAY, GEOM2, GEOM3, GEOM4 */
+#define PVR_FWIF_DM_MAX (PVR_FWIF_DM_LAST + 1U)
+
+/* GPU Utilisation states */
+#define PVR_FWIF_GPU_UTIL_STATE_IDLE 0U
+#define PVR_FWIF_GPU_UTIL_STATE_ACTIVE 1U
+#define PVR_FWIF_GPU_UTIL_STATE_BLOCKED 2U
+#define PVR_FWIF_GPU_UTIL_STATE_NUM 3U
+#define PVR_FWIF_GPU_UTIL_STATE_MASK 0x3ULL
+
+/*
+ * Maximum amount of register writes that can be done by the register
+ * programmer (FW or META DMA). This is not a HW limitation, it is only
+ * a protection against malformed inputs to the register programmer.
+ */
+#define PVR_MAX_NUM_REGISTER_PROGRAMMER_WRITES 128U
+
+#endif /* PVR_ROGUE_FWIF_COMMON_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h
new file mode 100644
index 000000000000..168277bce948
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_DEV_INFO_H__
+#define __PVR_ROGUE_FWIF_DEV_INFO_H__
+
+enum {
+ PVR_FW_HAS_BRN_44079 = 0,
+ PVR_FW_HAS_BRN_47217,
+ PVR_FW_HAS_BRN_48492,
+ PVR_FW_HAS_BRN_48545,
+ PVR_FW_HAS_BRN_49927,
+ PVR_FW_HAS_BRN_50767,
+ PVR_FW_HAS_BRN_51764,
+ PVR_FW_HAS_BRN_62269,
+ PVR_FW_HAS_BRN_63142,
+ PVR_FW_HAS_BRN_63553,
+ PVR_FW_HAS_BRN_66011,
+ PVR_FW_HAS_BRN_71242,
+
+ PVR_FW_HAS_BRN_MAX
+};
+
+enum {
+ PVR_FW_HAS_ERN_35421 = 0,
+ PVR_FW_HAS_ERN_38020,
+ PVR_FW_HAS_ERN_38748,
+ PVR_FW_HAS_ERN_42064,
+ PVR_FW_HAS_ERN_42290,
+ PVR_FW_HAS_ERN_42606,
+ PVR_FW_HAS_ERN_47025,
+ PVR_FW_HAS_ERN_57596,
+
+ PVR_FW_HAS_ERN_MAX
+};
+
+enum {
+ PVR_FW_HAS_FEATURE_AXI_ACELITE = 0,
+ PVR_FW_HAS_FEATURE_CDM_CONTROL_STREAM_FORMAT,
+ PVR_FW_HAS_FEATURE_CLUSTER_GROUPING,
+ PVR_FW_HAS_FEATURE_COMMON_STORE_SIZE_IN_DWORDS,
+ PVR_FW_HAS_FEATURE_COMPUTE,
+ PVR_FW_HAS_FEATURE_COMPUTE_MORTON_CAPABLE,
+ PVR_FW_HAS_FEATURE_COMPUTE_OVERLAP,
+ PVR_FW_HAS_FEATURE_COREID_PER_OS,
+ PVR_FW_HAS_FEATURE_DYNAMIC_DUST_POWER,
+ PVR_FW_HAS_FEATURE_ECC_RAMS,
+ PVR_FW_HAS_FEATURE_FBCDC,
+ PVR_FW_HAS_FEATURE_FBCDC_ALGORITHM,
+ PVR_FW_HAS_FEATURE_FBCDC_ARCHITECTURE,
+ PVR_FW_HAS_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS,
+ PVR_FW_HAS_FEATURE_FBC_MAX_LARGE_DESCRIPTORS,
+ PVR_FW_HAS_FEATURE_FB_CDC_V4,
+ PVR_FW_HAS_FEATURE_GPU_MULTICORE_SUPPORT,
+ PVR_FW_HAS_FEATURE_GPU_VIRTUALISATION,
+ PVR_FW_HAS_FEATURE_GS_RTA_SUPPORT,
+ PVR_FW_HAS_FEATURE_IRQ_PER_OS,
+ PVR_FW_HAS_FEATURE_ISP_MAX_TILES_IN_FLIGHT,
+ PVR_FW_HAS_FEATURE_ISP_SAMPLES_PER_PIXEL,
+ PVR_FW_HAS_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE,
+ PVR_FW_HAS_FEATURE_LAYOUT_MARS,
+ PVR_FW_HAS_FEATURE_MAX_PARTITIONS,
+ PVR_FW_HAS_FEATURE_META,
+ PVR_FW_HAS_FEATURE_META_COREMEM_SIZE,
+ PVR_FW_HAS_FEATURE_MIPS,
+ PVR_FW_HAS_FEATURE_NUM_CLUSTERS,
+ PVR_FW_HAS_FEATURE_NUM_ISP_IPP_PIPES,
+ PVR_FW_HAS_FEATURE_NUM_OSIDS,
+ PVR_FW_HAS_FEATURE_NUM_RASTER_PIPES,
+ PVR_FW_HAS_FEATURE_PBE2_IN_XE,
+ PVR_FW_HAS_FEATURE_PBVNC_COREID_REG,
+ PVR_FW_HAS_FEATURE_PERFBUS,
+ PVR_FW_HAS_FEATURE_PERF_COUNTER_BATCH,
+ PVR_FW_HAS_FEATURE_PHYS_BUS_WIDTH,
+ PVR_FW_HAS_FEATURE_RISCV_FW_PROCESSOR,
+ PVR_FW_HAS_FEATURE_ROGUEXE,
+ PVR_FW_HAS_FEATURE_S7_TOP_INFRASTRUCTURE,
+ PVR_FW_HAS_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT,
+ PVR_FW_HAS_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2,
+ PVR_FW_HAS_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION,
+ PVR_FW_HAS_FEATURE_SLC_BANKS,
+ PVR_FW_HAS_FEATURE_SLC_CACHE_LINE_SIZE_BITS,
+ PVR_FW_HAS_FEATURE_SLC_SIZE_CONFIGURABLE,
+ PVR_FW_HAS_FEATURE_SLC_SIZE_IN_KILOBYTES,
+ PVR_FW_HAS_FEATURE_SOC_TIMER,
+ PVR_FW_HAS_FEATURE_SYS_BUS_SECURE_RESET,
+ PVR_FW_HAS_FEATURE_TESSELLATION,
+ PVR_FW_HAS_FEATURE_TILE_REGION_PROTECTION,
+ PVR_FW_HAS_FEATURE_TILE_SIZE_X,
+ PVR_FW_HAS_FEATURE_TILE_SIZE_Y,
+ PVR_FW_HAS_FEATURE_TLA,
+ PVR_FW_HAS_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS,
+ PVR_FW_HAS_FEATURE_TPU_DM_GLOBAL_REGISTERS,
+ PVR_FW_HAS_FEATURE_TPU_FILTERING_MODE_CONTROL,
+ PVR_FW_HAS_FEATURE_USC_MIN_OUTPUT_REGISTERS_PER_PIX,
+ PVR_FW_HAS_FEATURE_VDM_DRAWINDIRECT,
+ PVR_FW_HAS_FEATURE_VDM_OBJECT_LEVEL_LLS,
+ PVR_FW_HAS_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS,
+ PVR_FW_HAS_FEATURE_WATCHDOG_TIMER,
+ PVR_FW_HAS_FEATURE_WORKGROUP_PROTECTION,
+ PVR_FW_HAS_FEATURE_XE_ARCHITECTURE,
+ PVR_FW_HAS_FEATURE_XE_MEMORY_HIERARCHY,
+ PVR_FW_HAS_FEATURE_XE_TPU2,
+ PVR_FW_HAS_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH,
+ PVR_FW_HAS_FEATURE_XPU_MAX_SLAVES,
+ PVR_FW_HAS_FEATURE_XPU_REGISTER_BROADCAST,
+ PVR_FW_HAS_FEATURE_XT_TOP_INFRASTRUCTURE,
+ PVR_FW_HAS_FEATURE_ZLS_SUBTILE,
+
+ PVR_FW_HAS_FEATURE_MAX
+};
+
+#endif /* __PVR_ROGUE_FWIF_DEV_INFO_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h
new file mode 100644
index 000000000000..1db1f4c532bc
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_RESETFRAMEWORK_H
+#define PVR_ROGUE_FWIF_RESETFRAMEWORK_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_fwif_shared.h"
+
+struct rogue_fwif_rf_registers {
+ union {
+ u64 cdmreg_cdm_cb_base;
+ u64 cdmreg_cdm_ctrl_stream_base;
+ };
+ u64 cdmreg_cdm_cb_queue;
+ u64 cdmreg_cdm_cb;
+};
+
+struct rogue_fwif_rf_cmd {
+ /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */
+ struct rogue_fwif_rf_registers fw_registers __aligned(8);
+};
+
+#define ROGUE_FWIF_RF_CMD_SIZE sizeof(struct rogue_fwif_rf_cmd)
+
+#endif /* PVR_ROGUE_FWIF_RESETFRAMEWORK_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
new file mode 100644
index 000000000000..56e11009e123
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
@@ -0,0 +1,1648 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_SF_H
+#define PVR_ROGUE_FWIF_SF_H
+
+/*
+ ******************************************************************************
+ * *DO*NOT* rearrange or delete lines in rogue_fw_log_sfgroups or stid_fmts
+ * WILL BREAK fw tracing message compatibility with previous
+ * fw versions. Only add new ones, if so required.
+ ******************************************************************************
+ */
+
+/* Available log groups. */
+enum rogue_fw_log_sfgroups {
+ ROGUE_FW_GROUP_NULL,
+ ROGUE_FW_GROUP_MAIN,
+ ROGUE_FW_GROUP_CLEANUP,
+ ROGUE_FW_GROUP_CSW,
+ ROGUE_FW_GROUP_PM,
+ ROGUE_FW_GROUP_RTD,
+ ROGUE_FW_GROUP_SPM,
+ ROGUE_FW_GROUP_MTS,
+ ROGUE_FW_GROUP_BIF,
+ ROGUE_FW_GROUP_MISC,
+ ROGUE_FW_GROUP_POW,
+ ROGUE_FW_GROUP_HWR,
+ ROGUE_FW_GROUP_HWP,
+ ROGUE_FW_GROUP_RPM,
+ ROGUE_FW_GROUP_DMA,
+ ROGUE_FW_GROUP_DBG,
+};
+
+#define PVR_SF_STRING_MAX_SIZE 256U
+
+/* pair of string format id and string formats */
+struct rogue_fw_stid_fmt {
+ u32 id;
+ char name[PVR_SF_STRING_MAX_SIZE];
+};
+
+/*
+ * The symbolic names found in the table above are assigned an u32 value of
+ * the following format:
+ * 31 30 28 27 20 19 16 15 12 11 0 bits
+ * - --- ---- ---- ---- ---- ---- ---- ----
+ * 0-11: id number
+ * 12-15: group id number
+ * 16-19: number of parameters
+ * 20-27: unused
+ * 28-30: active: identify SF packet, otherwise regular int32
+ * 31: reserved for signed/unsigned compatibility
+ *
+ * The following macro assigns those values to the enum generated SF ids list.
+ */
+#define ROGUE_FW_LOG_IDMARKER (0x70000000U)
+#define ROGUE_FW_LOG_CREATESFID(a, b, e) ((u32)(a) | ((u32)(b) << 12) | ((u32)(e) << 16) | \
+ ROGUE_FW_LOG_IDMARKER)
+
+#define ROGUE_FW_LOG_IDMASK (0xFFF00000)
+#define ROGUE_FW_LOG_VALIDID(I) (((I) & ROGUE_FW_LOG_IDMASK) == ROGUE_FW_LOG_IDMARKER)
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define ROGUE_FW_SF_GID(x) (((u32)(x) >> 12) & 0xfU)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define ROGUE_FW_SF_PARAMNUM(x) (((u32)(x) >> 16) & 0xfU)
+
+/* pair of string format id and string formats */
+struct rogue_km_stid_fmt {
+ u32 id;
+ const char *name;
+};
+
+static const struct rogue_km_stid_fmt stid_fmts[] = {
+ { ROGUE_FW_LOG_CREATESFID(0, ROGUE_FW_GROUP_NULL, 0),
+ "You should not use this string" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_MAIN, 6),
+ "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_MAIN, 2),
+ "3D finished, HWRTData0State=%x, HWRTData1State=%x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_MAIN, 4),
+ "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_MAIN, 0),
+ "3D Transfer finished" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_MAIN, 3),
+ "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_MAIN, 0),
+ "Compute finished" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_MAIN, 0),
+ "TA finished" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_MAIN, 0),
+ "Restart TA after partial render" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_MAIN, 0),
+ "Resume TA without partial render" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_MAIN, 2),
+ "Out of memory! Context 0x%08x, HWRTData 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_MAIN, 3),
+ "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_MAIN, 0),
+ "TLA finished" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_MAIN, 3),
+ "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO Checks for FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_MAIN, 3),
+ "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_MAIN, 0),
+ "UFO Checks succeeded" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_MAIN, 3),
+ "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_MAIN, 1),
+ "UFO SPM PR-Checks for FWCtx 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_MAIN, 4),
+ "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO Updates for FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO Update: [0x%08.8x] = 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_MAIN, 1),
+ "ASSERT Failed: line %d of:" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_MAIN, 2),
+ "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_MAIN, 3),
+ "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_MAIN, 0),
+ "HWR: Reset HW" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_MAIN, 0),
+ "HWR: Lockup recovered." },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_MAIN, 1),
+ "HWR: False lockup detected for DM%u" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_MAIN, 3),
+ "Alignment check %d failed: host = 0x%x, fw = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_MAIN, 0),
+ "GP USC triggered" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_MAIN, 2),
+ "Overallocating %u temporary registers and %u shared registers for breakpoint handler" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_MAIN, 1),
+ "Setting breakpoint: Addr 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_MAIN, 0),
+ "Store breakpoint state" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_MAIN, 0),
+ "Unsetting BP Registers" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_MAIN, 1),
+ "Active RTs expected to be zero, actually %u" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_MAIN, 1),
+ "RTC present, %u active render targets" },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_MAIN, 1),
+ "Estimated Power 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_MAIN, 1),
+ "RTA render target %u" },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_MAIN, 2),
+ "Kick RTA render %u of %u" },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_MAIN, 3),
+ "HWR sizes check %d failed: addresses = %d, sizes = %d" },
+ { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_MAIN, 1),
+ "Pow: DUSTS_ENABLE = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_MAIN, 2),
+ "Pow: On(1)/Off(0): %d, Units: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_MAIN, 2),
+ "Pow: Changing number of dusts from %d to %d" },
+ { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_MAIN, 0),
+ "Pow: Sidekick ready to be powered down" },
+ { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_MAIN, 2),
+ "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_MAIN, 0),
+ "No ZS Buffer used for partial render (store)" },
+ { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_MAIN, 0),
+ "No Depth/Stencil Buffer used for partial render (load)" },
+ { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_MAIN, 2),
+ "HWR: Lock-up DM%d FWCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_MAIN, 7),
+ "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d" },
+ { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_MAIN, 3),
+ "MLIST%d checker: MList[%d] = 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_MAIN, 1),
+ "MLIST%d OK" },
+ { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_MAIN, 1),
+ "MLIST%d is empty" },
+ { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_MAIN, 8),
+ "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d" },
+ { ROGUE_FW_LOG_CREATESFID(55, ROGUE_FW_GROUP_MAIN, 0),
+ "3D OQ flush kick" },
+ { ROGUE_FW_LOG_CREATESFID(56, ROGUE_FW_GROUP_MAIN, 1),
+ "HWPerf block ID (0x%x) unsupported by device" },
+ { ROGUE_FW_LOG_CREATESFID(57, ROGUE_FW_GROUP_MAIN, 2),
+ "Setting breakpoint: Addr 0x%08.8x DM%u" },
+ { ROGUE_FW_LOG_CREATESFID(58, ROGUE_FW_GROUP_MAIN, 3),
+ "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d" },
+ { ROGUE_FW_LOG_CREATESFID(59, ROGUE_FW_GROUP_MAIN, 1),
+ "RDM finished on context %u" },
+ { ROGUE_FW_LOG_CREATESFID(60, ROGUE_FW_GROUP_MAIN, 3),
+ "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d" },
+ { ROGUE_FW_LOG_CREATESFID(61, ROGUE_FW_GROUP_MAIN, 0),
+ "SHG finished" },
+ { ROGUE_FW_LOG_CREATESFID(62, ROGUE_FW_GROUP_MAIN, 1),
+ "FBA finished on context %u" },
+ { ROGUE_FW_LOG_CREATESFID(63, ROGUE_FW_GROUP_MAIN, 0),
+ "UFO Checks failed" },
+ { ROGUE_FW_LOG_CREATESFID(64, ROGUE_FW_GROUP_MAIN, 1),
+ "Kill DM%d start" },
+ { ROGUE_FW_LOG_CREATESFID(65, ROGUE_FW_GROUP_MAIN, 1),
+ "Kill DM%d complete" },
+ { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_MAIN, 2),
+ "FC%u cCCB Woff update = %u" },
+ { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_MAIN, 4),
+ "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d" },
+ { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU init" },
+ { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_MAIN, 1),
+ "GPU Units init (# mask: 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(70, ROGUE_FW_GROUP_MAIN, 3),
+ "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d" },
+ { ROGUE_FW_LOG_CREATESFID(71, ROGUE_FW_GROUP_MAIN, 3),
+ "Register configuration added. Address: 0x%x Value: 0x%x%x" },
+ { ROGUE_FW_LOG_CREATESFID(72, ROGUE_FW_GROUP_MAIN, 1),
+ "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)" },
+ { ROGUE_FW_LOG_CREATESFID(73, ROGUE_FW_GROUP_MAIN, 0),
+ "Perform TPC flush." },
+ { ROGUE_FW_LOG_CREATESFID(74, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU has locked up (see HWR logs for more info)" },
+ { ROGUE_FW_LOG_CREATESFID(75, ROGUE_FW_GROUP_MAIN, 0),
+ "HWR has been triggered - GPU has overrun its deadline (see HWR logs)" },
+ { ROGUE_FW_LOG_CREATESFID(76, ROGUE_FW_GROUP_MAIN, 0),
+ "HWR has been triggered - GPU has failed a poll (see HWR logs)" },
+ { ROGUE_FW_LOG_CREATESFID(77, ROGUE_FW_GROUP_MAIN, 1),
+ "Doppler out of memory event for FC %u" },
+ { ROGUE_FW_LOG_CREATESFID(78, ROGUE_FW_GROUP_MAIN, 3),
+ "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(79, ROGUE_FW_GROUP_MAIN, 3),
+ "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(80, ROGUE_FW_GROUP_MAIN, 1),
+ "TIMESTAMP -> [0x%08.8x]" },
+ { ROGUE_FW_LOG_CREATESFID(81, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO RMW Updates for FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(82, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO Update: [0x%08.8x] = 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(83, ROGUE_FW_GROUP_MAIN, 2),
+ "Kick Null cmd: FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(84, ROGUE_FW_GROUP_MAIN, 2),
+ "RPM Out of memory! Context 0x%08x, SH requestor %d" },
+ { ROGUE_FW_LOG_CREATESFID(85, ROGUE_FW_GROUP_MAIN, 4),
+ "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d" },
+ { ROGUE_FW_LOG_CREATESFID(86, ROGUE_FW_GROUP_MAIN, 4),
+ "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(87, ROGUE_FW_GROUP_MAIN, 4),
+ "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(88, ROGUE_FW_GROUP_MAIN, 4),
+ "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(89, ROGUE_FW_GROUP_MAIN, 3),
+ "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(90, ROGUE_FW_GROUP_MAIN, 3),
+ "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(91, ROGUE_FW_GROUP_MAIN, 1),
+ "Host Sync Partition marker: %d" },
+ { ROGUE_FW_LOG_CREATESFID(92, ROGUE_FW_GROUP_MAIN, 1),
+ "Host Sync Partition repeat: %d" },
+ { ROGUE_FW_LOG_CREATESFID(93, ROGUE_FW_GROUP_MAIN, 1),
+ "Core clock set to %d Hz" },
+ { ROGUE_FW_LOG_CREATESFID(94, ROGUE_FW_GROUP_MAIN, 7),
+ "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(95, ROGUE_FW_GROUP_MAIN, 3),
+ "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(96, ROGUE_FW_GROUP_MAIN, 5),
+ "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(97, ROGUE_FW_GROUP_MAIN, 4),
+ "Signalled the previously waiting FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(98, ROGUE_FW_GROUP_MAIN, 0),
+ "Compute stalled" },
+ { ROGUE_FW_LOG_CREATESFID(99, ROGUE_FW_GROUP_MAIN, 3),
+ "Compute stalled (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(100, ROGUE_FW_GROUP_MAIN, 3),
+ "Compute resumed (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(101, ROGUE_FW_GROUP_MAIN, 4),
+ "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(102, ROGUE_FW_GROUP_MAIN, 4),
+ "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(103, ROGUE_FW_GROUP_MAIN, 1),
+ "DM: %u signal check failed" },
+ { ROGUE_FW_LOG_CREATESFID(104, ROGUE_FW_GROUP_MAIN, 3),
+ "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d" },
+ { ROGUE_FW_LOG_CREATESFID(105, ROGUE_FW_GROUP_MAIN, 0),
+ "TDM finished" },
+ { ROGUE_FW_LOG_CREATESFID(106, ROGUE_FW_GROUP_MAIN, 4),
+ "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08x 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(107, ROGUE_FW_GROUP_MAIN, 0),
+ "BRN 54141 HIT" },
+ { ROGUE_FW_LOG_CREATESFID(108, ROGUE_FW_GROUP_MAIN, 0),
+ "BRN 54141 Dummy TA kicked" },
+ { ROGUE_FW_LOG_CREATESFID(109, ROGUE_FW_GROUP_MAIN, 0),
+ "BRN 54141 resume TA" },
+ { ROGUE_FW_LOG_CREATESFID(110, ROGUE_FW_GROUP_MAIN, 0),
+ "BRN 54141 double hit after applying WA" },
+ { ROGUE_FW_LOG_CREATESFID(111, ROGUE_FW_GROUP_MAIN, 2),
+ "BRN 54141 Dummy TA VDM base address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(112, ROGUE_FW_GROUP_MAIN, 4),
+ "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(113, ROGUE_FW_GROUP_MAIN, 2),
+ "TDM stalled (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(114, ROGUE_FW_GROUP_MAIN, 1),
+ "Write Offset update notification for stalled FWCtx 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(115, ROGUE_FW_GROUP_MAIN, 3),
+ "Changing OSid %d's priority from %u to %u" },
+ { ROGUE_FW_LOG_CREATESFID(116, ROGUE_FW_GROUP_MAIN, 0),
+ "Compute resumed" },
+ { ROGUE_FW_LOG_CREATESFID(117, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(118, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(119, ROGUE_FW_GROUP_MAIN, 11),
+ "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(120, ROGUE_FW_GROUP_MAIN, 10),
+ "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(121, ROGUE_FW_GROUP_MAIN, 8),
+ "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(122, ROGUE_FW_GROUP_MAIN, 6),
+ "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(123, ROGUE_FW_GROUP_MAIN, 8),
+ "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(124, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(125, ROGUE_FW_GROUP_MAIN, 1),
+ "Reconfigure CSRM: special coeff support enable %d." },
+ { ROGUE_FW_LOG_CREATESFID(127, ROGUE_FW_GROUP_MAIN, 1),
+ "TA requires max coeff mode, deferring: %d." },
+ { ROGUE_FW_LOG_CREATESFID(128, ROGUE_FW_GROUP_MAIN, 1),
+ "3D requires max coeff mode, deferring: %d." },
+ { ROGUE_FW_LOG_CREATESFID(129, ROGUE_FW_GROUP_MAIN, 1),
+ "Kill DM%d failed" },
+ { ROGUE_FW_LOG_CREATESFID(130, ROGUE_FW_GROUP_MAIN, 2),
+ "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(131, ROGUE_FW_GROUP_MAIN, 3),
+ "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(132, ROGUE_FW_GROUP_MAIN, 1),
+ "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs)." },
+ { ROGUE_FW_LOG_CREATESFID(133, ROGUE_FW_GROUP_MAIN, 1),
+ "HCS changed to %d ms" },
+ { ROGUE_FW_LOG_CREATESFID(134, ROGUE_FW_GROUP_MAIN, 4),
+ "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(135, ROGUE_FW_GROUP_MAIN, 2),
+ " Phantom %d: USCTiles=%d" },
+ { ROGUE_FW_LOG_CREATESFID(136, ROGUE_FW_GROUP_MAIN, 0),
+ "Isolation grouping is disabled" },
+ { ROGUE_FW_LOG_CREATESFID(137, ROGUE_FW_GROUP_MAIN, 1),
+ "Isolation group configured with a priority threshold of %d" },
+ { ROGUE_FW_LOG_CREATESFID(138, ROGUE_FW_GROUP_MAIN, 1),
+ "OS %d has come online" },
+ { ROGUE_FW_LOG_CREATESFID(139, ROGUE_FW_GROUP_MAIN, 1),
+ "OS %d has gone offline" },
+ { ROGUE_FW_LOG_CREATESFID(140, ROGUE_FW_GROUP_MAIN, 4),
+ "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(141, ROGUE_FW_GROUP_MAIN, 7),
+ "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(142, ROGUE_FW_GROUP_MAIN, 6),
+ "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(143, ROGUE_FW_GROUP_MAIN, 5),
+ "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(144, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU deinit" },
+ { ROGUE_FW_LOG_CREATESFID(145, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU units deinit" },
+ { ROGUE_FW_LOG_CREATESFID(146, ROGUE_FW_GROUP_MAIN, 2),
+ "Initialised OS %d with config flags 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(147, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO limit exceeded %d/%d" },
+ { ROGUE_FW_LOG_CREATESFID(148, ROGUE_FW_GROUP_MAIN, 0),
+ "3D Dummy stencil store" },
+ { ROGUE_FW_LOG_CREATESFID(149, ROGUE_FW_GROUP_MAIN, 3),
+ "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(150, ROGUE_FW_GROUP_MAIN, 1),
+ "Unknown Command (eCmdType=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(151, ROGUE_FW_GROUP_MAIN, 4),
+ "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(152, ROGUE_FW_GROUP_MAIN, 5),
+ "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d" },
+ { ROGUE_FW_LOG_CREATESFID(153, ROGUE_FW_GROUP_MAIN, 3),
+ "TDM context switch check: Roff %u points to 0x%08x, Match=%u" },
+ { ROGUE_FW_LOG_CREATESFID(154, ROGUE_FW_GROUP_MAIN, 6),
+ "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(155, ROGUE_FW_GROUP_MAIN, 2),
+ "FW IRQ # %u @ %u" },
+ { ROGUE_FW_LOG_CREATESFID(156, ROGUE_FW_GROUP_MAIN, 3),
+ "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u" },
+ { ROGUE_FW_LOG_CREATESFID(157, ROGUE_FW_GROUP_MAIN, 3),
+ "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(158, ROGUE_FW_GROUP_MAIN, 3),
+ "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(159, ROGUE_FW_GROUP_MAIN, 4),
+ "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(160, ROGUE_FW_GROUP_MAIN, 4),
+ "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u" },
+ { ROGUE_FW_LOG_CREATESFID(161, ROGUE_FW_GROUP_MAIN, 3),
+ "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(162, ROGUE_FW_GROUP_MAIN, 4),
+ "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(163, ROGUE_FW_GROUP_MAIN, 4),
+ "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u" },
+ { ROGUE_FW_LOG_CREATESFID(164, ROGUE_FW_GROUP_MAIN, 3),
+ "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(165, ROGUE_FW_GROUP_MAIN, 8),
+ "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(166, ROGUE_FW_GROUP_MAIN, 4),
+ "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)" },
+ { ROGUE_FW_LOG_CREATESFID(167, ROGUE_FW_GROUP_MAIN, 2),
+ "OSid %u has %u stale commands in its KCCB" },
+ { ROGUE_FW_LOG_CREATESFID(168, ROGUE_FW_GROUP_MAIN, 0),
+ "Applying VCE pause" },
+ { ROGUE_FW_LOG_CREATESFID(169, ROGUE_FW_GROUP_MAIN, 3),
+ "OSid %u KCCB slot %u value updated to %u" },
+ { ROGUE_FW_LOG_CREATESFID(170, ROGUE_FW_GROUP_MAIN, 7),
+ "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(171, ROGUE_FW_GROUP_MAIN, 10),
+ "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u" },
+ { ROGUE_FW_LOG_CREATESFID(172, ROGUE_FW_GROUP_MAIN, 10),
+ "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u" },
+ { ROGUE_FW_LOG_CREATESFID(173, ROGUE_FW_GROUP_MAIN, 2),
+ "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u" },
+ { ROGUE_FW_LOG_CREATESFID(174, ROGUE_FW_GROUP_MAIN, 2),
+ "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(175, ROGUE_FW_GROUP_MAIN, 3),
+ "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(176, ROGUE_FW_GROUP_MAIN, 2),
+ "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(177, ROGUE_FW_GROUP_MAIN, 1),
+ "Set Periodic Hardware Reset Mode: %d" },
+ { ROGUE_FW_LOG_CREATESFID(179, ROGUE_FW_GROUP_MAIN, 3),
+ "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(180, ROGUE_FW_GROUP_MAIN, 1),
+ "PHR mode %d triggered a reset" },
+ { ROGUE_FW_LOG_CREATESFID(181, ROGUE_FW_GROUP_MAIN, 2),
+ "Signal update, Snoop Filter: %u, Signal Id: %u" },
+ { ROGUE_FW_LOG_CREATESFID(182, ROGUE_FW_GROUP_MAIN, 1),
+ "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8." },
+ { ROGUE_FW_LOG_CREATESFID(183, ROGUE_FW_GROUP_MAIN, 4),
+ "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u" },
+ { ROGUE_FW_LOG_CREATESFID(184, ROGUE_FW_GROUP_MAIN, 5),
+ "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d" },
+ { ROGUE_FW_LOG_CREATESFID(185, ROGUE_FW_GROUP_MAIN, 3),
+ "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x." },
+ { ROGUE_FW_LOG_CREATESFID(186, ROGUE_FW_GROUP_MAIN, 3),
+ "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u" },
+ { ROGUE_FW_LOG_CREATESFID(187, ROGUE_FW_GROUP_MAIN, 2),
+ "Signal updates: FIFO: %u, Signals: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(188, ROGUE_FW_GROUP_MAIN, 2),
+ "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(189, ROGUE_FW_GROUP_MAIN, 0),
+ "Insert BRN68497 WA blit after TDM Context store." },
+ { ROGUE_FW_LOG_CREATESFID(190, ROGUE_FW_GROUP_MAIN, 1),
+ "UFO Updates for previously finished FWCtx 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(191, ROGUE_FW_GROUP_MAIN, 1),
+ "RTC with RTA present, %u active render targets" },
+ { ROGUE_FW_LOG_CREATESFID(192, ROGUE_FW_GROUP_MAIN, 0),
+ "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!" },
+ { ROGUE_FW_LOG_CREATESFID(193, ROGUE_FW_GROUP_MAIN, 2),
+ "Block 0x%x / Counter 0x%x INVALID and ignored" },
+ { ROGUE_FW_LOG_CREATESFID(194, ROGUE_FW_GROUP_MAIN, 2),
+ "ECC fault GPU=0x%08x FW=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(195, ROGUE_FW_GROUP_MAIN, 1),
+ "Processing XPU event on DM = %d" },
+ { ROGUE_FW_LOG_CREATESFID(196, ROGUE_FW_GROUP_MAIN, 2),
+ "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u" },
+ { ROGUE_FW_LOG_CREATESFID(197, ROGUE_FW_GROUP_MAIN, 1),
+ "GPU-%u has locked up (see HWR logs for more info)" },
+ { ROGUE_FW_LOG_CREATESFID(198, ROGUE_FW_GROUP_MAIN, 3),
+ "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(199, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU has locked up (see HWR logs for more info)" },
+ { ROGUE_FW_LOG_CREATESFID(200, ROGUE_FW_GROUP_MAIN, 1),
+ "Reprocessing outstanding XPU events from cores 0x%02x" },
+ { ROGUE_FW_LOG_CREATESFID(201, ROGUE_FW_GROUP_MAIN, 3),
+ "Secondary XPU event on DM=%d, CoreMask=0x%02x, Raised=0x%02x" },
+ { ROGUE_FW_LOG_CREATESFID(202, ROGUE_FW_GROUP_MAIN, 8),
+ "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(203, ROGUE_FW_GROUP_MAIN, 3),
+ "TDM stalled Core %u (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(204, ROGUE_FW_GROUP_MAIN, 8),
+ "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(205, ROGUE_FW_GROUP_MAIN, 4),
+ "Compute stalled core %u (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(206, ROGUE_FW_GROUP_MAIN, 6),
+ "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(207, ROGUE_FW_GROUP_MAIN, 3),
+ "TDM resumed core %u (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(208, ROGUE_FW_GROUP_MAIN, 4),
+ "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(209, ROGUE_FW_GROUP_MAIN, 2),
+ " Updated permission for OSid %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)" },
+ { ROGUE_FW_LOG_CREATESFID(210, ROGUE_FW_GROUP_MAIN, 2),
+ "Mask = 0x%X, mask2 = 0x%X" },
+ { ROGUE_FW_LOG_CREATESFID(211, ROGUE_FW_GROUP_MAIN, 3),
+ " core %u, reg = %u, mask = 0x%X)" },
+ { ROGUE_FW_LOG_CREATESFID(212, ROGUE_FW_GROUP_MAIN, 1),
+ "ECC fault received from safety bus: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(213, ROGUE_FW_GROUP_MAIN, 1),
+ "Safety Watchdog threshold period set to 0x%x clock cycles" },
+ { ROGUE_FW_LOG_CREATESFID(214, ROGUE_FW_GROUP_MAIN, 0),
+ "MTS Safety Event triggered by the safety watchdog." },
+ { ROGUE_FW_LOG_CREATESFID(215, ROGUE_FW_GROUP_MAIN, 3),
+ "DM%d USC tasks range limit 0 - %d, stride %d" },
+ { ROGUE_FW_LOG_CREATESFID(216, ROGUE_FW_GROUP_MAIN, 1),
+ "ECC fault GPU=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(217, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU Hardware units reset to prevent transient faults." },
+ { ROGUE_FW_LOG_CREATESFID(218, ROGUE_FW_GROUP_MAIN, 2),
+ "Kick Abort cmd: FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(219, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick Ray: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(220, ROGUE_FW_GROUP_MAIN, 0),
+ "Ray finished" },
+ { ROGUE_FW_LOG_CREATESFID(221, ROGUE_FW_GROUP_MAIN, 2),
+ "State of firmware's private data at boot time: %d (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X" },
+ { ROGUE_FW_LOG_CREATESFID(222, ROGUE_FW_GROUP_MAIN, 2),
+ "CFI Timeout detected (%d increasing to %d)" },
+ { ROGUE_FW_LOG_CREATESFID(223, ROGUE_FW_GROUP_MAIN, 2),
+ "CFI Timeout detected for FBM (%d increasing to %d)" },
+ { ROGUE_FW_LOG_CREATESFID(224, ROGUE_FW_GROUP_MAIN, 0),
+ "Geom OOM event not allowed" },
+ { ROGUE_FW_LOG_CREATESFID(225, ROGUE_FW_GROUP_MAIN, 4),
+ "Changing OSid %d's priority from %u to %u; Isolation = %u (0 = off; 1 = on)" },
+ { ROGUE_FW_LOG_CREATESFID(226, ROGUE_FW_GROUP_MAIN, 2),
+ "Skipping already executed TA FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(227, ROGUE_FW_GROUP_MAIN, 2),
+ "Attempt to execute TA FWCtx 0x%08.8x @ %d ahead of time on other GEOM" },
+ { ROGUE_FW_LOG_CREATESFID(228, ROGUE_FW_GROUP_MAIN, 8),
+ "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(229, ROGUE_FW_GROUP_MAIN, 12),
+ "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(230, ROGUE_FW_GROUP_MAIN, 11),
+ "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(231, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(232, ROGUE_FW_GROUP_MAIN, 1),
+ "TDM finished: Kick ID %u " },
+ { ROGUE_FW_LOG_CREATESFID(233, ROGUE_FW_GROUP_MAIN, 1),
+ "TA finished: Kick ID %u " },
+ { ROGUE_FW_LOG_CREATESFID(234, ROGUE_FW_GROUP_MAIN, 3),
+ "3D finished: Kick ID %u , HWRTData0State=%x, HWRTData1State=%x" },
+ { ROGUE_FW_LOG_CREATESFID(235, ROGUE_FW_GROUP_MAIN, 1),
+ "Compute finished: Kick ID %u " },
+ { ROGUE_FW_LOG_CREATESFID(236, ROGUE_FW_GROUP_MAIN, 10),
+ "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d, Base 0x%08x%08x. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(237, ROGUE_FW_GROUP_MAIN, 8),
+ "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(238, ROGUE_FW_GROUP_MAIN, 1),
+ "Ray finished: Kick ID %u " },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_MTS, 2),
+ "Bg Task DM = %u, counted = %d" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_MTS, 1),
+ "Bg Task complete DM = %u" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_MTS, 3),
+ "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_MTS, 1),
+ "Irq Task complete DM = %u" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_MTS, 0),
+ "Kick MTS Bg task DM=All" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_MTS, 1),
+ "Kick MTS Irq task DM=%d" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_MTS, 2),
+ "Ready queue debug DM = %u, celltype = %d" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_MTS, 2),
+ "Ready-to-run debug DM = %u, item = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_MTS, 3),
+ "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_MTS, 3),
+ "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_MTS, 3),
+ "Ready queue debug DM = %u, celltype = %d, OSid = %u" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_MTS, 3),
+ "Bg Task DM = %u, counted = %d, OSid = %u" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_MTS, 1),
+ "Bg Task complete DM Bitfield: %u" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_MTS, 0),
+ "Irq Task complete." },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_MTS, 7),
+ "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d." },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_MTS, 4),
+ "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_MTS, 2),
+ "KCCB Slot %u: Return value %u" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_MTS, 1),
+ "Bg Task OSid = %u" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_MTS, 3),
+ "KCCB Slot %u: Cmd=0x%08x, OSid=%u" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_MTS, 1),
+ "Irq Task (EVENT_STATUS=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_MTS, 2),
+ "VZ sideband test, kicked with OSid=%u from MTS, OSid for test=%u" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_CLEANUP, 1),
+ "FwCommonContext [0x%08x] cleaned" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_CLEANUP, 3),
+ "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_CLEANUP, 2),
+ "HWRTData [0x%08x] for DM=%d, received cleanup request" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_CLEANUP, 3),
+ "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_CLEANUP, 2),
+ "HWRTData [0x%08x] HW Context for DM%u is busy" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_CLEANUP, 2),
+ "HWRTData [0x%08x] HW Context %u cleaned" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_CLEANUP, 1),
+ "Freelist [0x%08x] cleaned" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_CLEANUP, 1),
+ "ZSBuffer [0x%08x] cleaned" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_CLEANUP, 3),
+ "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_CLEANUP, 4),
+ "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_CLEANUP, 2),
+ "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_CLEANUP, 3),
+ "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_CLEANUP, 4),
+ "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_CLEANUP, 2),
+ "HW Ray Frame Data [0x%08x] HW Context %u cleaned" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_CLEANUP, 1),
+ "Discarding invalid cleanup request of type 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_CLEANUP, 1),
+ "Received cleanup request for HWRTData [0x%08x]" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_CLEANUP, 3),
+ "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_CLEANUP, 3),
+ "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_CSW, 1),
+ "CDM FWCtx 0x%08.8x needs resume" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_CSW, 3),
+ "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_CSW, 1),
+ "CDM FWCtx shared alloc size load 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_CSW, 0),
+ "*** CDM FWCtx store complete" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_CSW, 0),
+ "*** CDM FWCtx store start" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_CSW, 0),
+ "CDM Soft Reset" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_CSW, 1),
+ "3D FWCtx 0x%08.8x needs resume" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_CSW, 1),
+ "*** 3D FWCtx 0x%08.8x resume" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_CSW, 0),
+ "*** 3D context store complete" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_CSW, 3),
+ "3D context store pipe state: 0x%08.8x 0x%08.8x 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_CSW, 0),
+ "*** 3D context store start" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_CSW, 1),
+ "*** 3D TQ FWCtx 0x%08.8x resume" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_CSW, 1),
+ "TA FWCtx 0x%08.8x needs resume" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_CSW, 3),
+ "*** TA FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_CSW, 2),
+ "TA context shared alloc size store 0x%x, load 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_CSW, 0),
+ "*** TA context store complete" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_CSW, 0),
+ "*** TA context store start" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_CSW, 3),
+ "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_CSW, 2),
+ "Set FWCtx 0x%x priority to %u" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_CSW, 2),
+ "3D context store pipe%d state: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_CSW, 2),
+ "3D context resume pipe%d state: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_CSW, 1),
+ "SHG FWCtx 0x%08.8x needs resume" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_CSW, 3),
+ "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_CSW, 2),
+ "SHG context shared alloc size store 0x%x, load 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_CSW, 0),
+ "*** SHG context store complete" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_CSW, 0),
+ "*** SHG context store start" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_CSW, 1),
+ "Performing TA indirection, last used pipe %d" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_CSW, 0),
+ "CDM context store hit ctrl stream terminate. Skip resume." },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_CSW, 4),
+ "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_CSW, 2),
+ "TA PDS/USC state buffer flip (%d->%d)" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_CSW, 0),
+ "TA context store hit BRN 52563: vertex store tasks outstanding" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_CSW, 1),
+ "TA USC poll failed (USC vertex task count: %d)" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_CSW, 0),
+ "TA context store deferred due to BRN 54141." },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_CSW, 7),
+ "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_CSW, 0),
+ "*** TDM context store start" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_CSW, 0),
+ "*** TDM context store complete" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_CSW, 2),
+ "TDM context needs resume, header [0x%08.8x, 0x%08.8x]" },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_CSW, 8),
+ "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u" },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_CSW, 3),
+ "3D context store pipe %2d (%2d) state: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_CSW, 3),
+ "3D context resume pipe %2d (%2d) state: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_CSW, 1),
+ "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)" },
+ { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_CSW, 3),
+ "3D context store pipe%d state: 0x%08.8x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_CSW, 3),
+ "3D context resume pipe%d state: 0x%08.8x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_CSW, 2),
+ "3D context resume IPP state: 0x%08.8x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_CSW, 1),
+ "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_CSW, 3),
+ "TDM context resume pipe%d state: 0x%08.8x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_CSW, 0),
+ "*** 3D context store start version 4" },
+ { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_CSW, 2),
+ "Multicore context resume on DM%d active core mask 0x%04.4x" },
+ { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_CSW, 2),
+ "Multicore context store on DM%d active core mask 0x%04.4x" },
+ { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_CSW, 5),
+ "TDM context resume Core %d, pipe%d state: 0x%08.8x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_CSW, 0),
+ "*** RDM FWCtx store complete" },
+ { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_CSW, 0),
+ "*** RDM FWCtx store start" },
+ { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_CSW, 1),
+ "RDM FWCtx 0x%08.8x needs resume" },
+ { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_CSW, 1),
+ "RDM FWCtx 0x%08.8x resume" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_BIF, 3),
+ "Activate MemCtx=0x%08x BIFreq=%d secure=%d" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_BIF, 1),
+ "Deactivate MemCtx=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_BIF, 1),
+ "Alloc PC reg %d" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_BIF, 2),
+ "Grab reg set %d refcount now %d" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_BIF, 2),
+ "Ungrab reg set %d refcount now %d" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_BIF, 6),
+ "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_BIF, 2),
+ "Trust enabled:%d, for BIFreq=%d" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_BIF, 9),
+ "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_BIF, 4),
+ "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_BIF, 3),
+ "Wrote the Value %d to OSID1, Context %d, Register's contents are now 0x%04x" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_BIF, 7),
+ "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x" }, \
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_BIF, 5),
+ "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_BIF, 1),
+ "Unmap GPU memory (event status 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_BIF, 3),
+ "Activate MemCtx=0x%08x DM=%d secure=%d" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_BIF, 6),
+ "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_BIF, 4),
+ "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_BIF, 2),
+ "Trust enabled:%d, for DM=%d" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_BIF, 5),
+ "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_BIF, 6),
+ "Setup register set=%d DM=%d, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%d" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_BIF, 3),
+ "Alloc PC set %d as register range [%u - %u]" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_MISC, 1),
+ "GPIO write 0x%02x" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_MISC, 1),
+ "GPIO read 0x%02x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_MISC, 0),
+ "GPIO enabled" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_MISC, 0),
+ "GPIO disabled" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_MISC, 1),
+ "GPIO status=%d (0=OK, 1=Disabled)" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_MISC, 2),
+ "GPIO_AP: Read address=0x%02x (%d byte(s))" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_MISC, 2),
+ "GPIO_AP: Write address=0x%02x (%d byte(s))" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_MISC, 0),
+ "GPIO_AP timeout!" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_MISC, 1),
+ "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_MISC, 1),
+ "GPIO already read 0x%02x" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_MISC, 2),
+ "SR: Check buffer %d available returned %d" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Waiting for buffer %d" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_MISC, 2),
+ "SR: Timeout waiting for buffer %d (after %d ticks)" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_MISC, 2),
+ "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Skip remaining strip %d in frame" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Inform HW that strip %d is a new frame" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Strip mode is %d" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Strip Render start (strip %d)" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Strip Render complete (buffer %d)" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Strip Render fault (buffer %d)" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_MISC, 1),
+ "TRP state: %d" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_MISC, 1),
+ "TRP failure: %d" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_MISC, 1),
+ "SW TRP State: %d" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_MISC, 1),
+ "SW TRP failure: %d" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_MISC, 1),
+ "HW kick event (%u)" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_MISC, 4),
+ "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_MISC, 6),
+ "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_MISC, 6),
+ "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_MISC, 4),
+ "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_MISC, 6),
+ "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_PM, 10),
+ "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_PM, 8),
+ "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_PM, 14),
+ "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_PM, 14),
+ "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_PM, 5),
+ "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_PM, 1),
+ "Grow for freelist ID=0x%08x denied by host" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_PM, 5),
+ "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_PM, 1),
+ "Reconstruction of freelist ID=0x%08x failed" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_PM, 2),
+ "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_PM, 2),
+ "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_PM, 1),
+ "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_PM, 1),
+ "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_PM, 1),
+ "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_PM, 1),
+ "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_PM, 1),
+ "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_PM, 7),
+ "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_PM, 10),
+ "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_PM, 10),
+ "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_PM, 7),
+ "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_PM, 7),
+ "Freelist update failed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_PM, 10),
+ "UFL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_PM, 10),
+ "UFL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_PM, 5),
+ "Freelist 0x%08x base address from HW: 0x%02x%08x (expected value: 0x%02x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_PM, 5),
+ "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_PM, 5),
+ "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_PM, 5),
+ "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_PM, 4),
+ "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_PM, 1),
+ "3D Timeout Now for FWCtx 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_PM, 1),
+ "GEOM PM Recycle for FWCtx 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_PM, 1),
+ "PM running primary config (Core %d)" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_PM, 1),
+ "PM running secondary config (Core %d)" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_PM, 1),
+ "PM running tertiary config (Core %d)" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_PM, 1),
+ "PM running quaternary config (Core %d)" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_RPM, 3),
+ "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_RPM, 3),
+ "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_RPM, 0),
+ "RPM request failed. Waiting for freelist grow." },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_RPM, 0),
+ "RPM request failed. Aborting the current frame." },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_RPM, 1),
+ "RPM waiting for pending grow on freelist 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_RPM, 3),
+ "Request freelist grow [0x%08x] current pages %d, grow size %d" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_RPM, 2),
+ "Freelist load: SHF = 0x%08x, SHG = 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_RPM, 2),
+ "SHF FPL register: 0x%08x.0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_RPM, 2),
+ "SHG FPL register: 0x%08x.0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_RPM, 5),
+ "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_RPM, 0),
+ "Restarting SHG" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_RPM, 0),
+ "Grow failed, aborting the current frame." },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_RPM, 1),
+ "RPM abort complete on HWFrameData [0x%08x]." },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_RPM, 1),
+ "RPM freelist cleanup [0x%08x] requires abort to proceed." },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_RPM, 2),
+ "RPM page table base register: 0x%08x.0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_RPM, 0),
+ "Issuing RPM abort." },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_RPM, 0),
+ "RPM OOM received but toggle bits indicate free pages available" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_RPM, 0),
+ "RPM hardware timeout. Unable to process OOM event." },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_RPM, 5),
+ "SHF FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_RPM, 5),
+ "SHG FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_RPM, 3),
+ "SHF FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_RPM, 3),
+ "SHG FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_RTD, 2),
+ "3D RTData 0x%08x finished on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_RTD, 2),
+ "3D RTData 0x%08x ready on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_RTD, 4),
+ "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_RTD, 2),
+ "Loading VFP table 0x%08x%08x for 3D" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_RTD, 2),
+ "Loading VFP table 0x%08x%08x for TA" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_RTD, 10),
+ "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_RTD, 0),
+ "Perform VHEAP table store" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_RTD, 2),
+ "RTData 0x%08x: found match in Context=%d: Load=No, Store=No" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_RTD, 2),
+ "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_RTD, 3),
+ "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_RTD, 3),
+ "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_RTD, 5),
+ "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_RTD, 10),
+ "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_RTD, 2),
+ "TA RTData 0x%08x finished on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_RTD, 2),
+ "TA RTData 0x%08x loaded on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_RTD, 12),
+ "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_RTD, 12),
+ "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_RTD, 1),
+ "Freelist 0x%x RESET!!!!!!!!" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_RTD, 5),
+ "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_RTD, 3),
+ "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_RTD, 1),
+ "Freelist reconstruction ACK from host (HWR state :%u)" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_RTD, 0),
+ "Freelist reconstruction completed" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_RTD, 3),
+ "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_RTD, 3),
+ "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_RTD, 8),
+ "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_RTD, 2),
+ "3D RTData 0x%08x loaded on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_RTD, 4),
+ "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_RTD, 2),
+ "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_RTD, 3),
+ "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_RTD, 12),
+ "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_RTD, 12),
+ "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_RTD, 5),
+ "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_RTD, 7),
+ "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_RTD, 4),
+ "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_RTD, 6),
+ "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_RTD, 1),
+ "TA RTData 0x%08x marked as killed." },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_RTD, 1),
+ "3D RTData 0x%08x marked as killed." },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_RTD, 1),
+ "RTData 0x%08x will be killed after TA restart." },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_RTD, 3),
+ "RTData 0x%08x Render State Buffer 0x%02x%08x will be reset." },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_RTD, 3),
+ "GEOM RTData 0x%08x using Render State Buffer 0x%02x%08x." },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_RTD, 3),
+ "FRAG RTData 0x%08x using Render State Buffer 0x%02x%08x." },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_SPM, 0),
+ "Force Z-Load for partial render" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_SPM, 0),
+ "Force Z-Store for partial render" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_SPM, 1),
+ "3D MemFree: Local FL 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_SPM, 1),
+ "3D MemFree: MMU FL 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_SPM, 1),
+ "3D MemFree: Global FL 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_SPM, 6),
+ "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_SPM, 3),
+ "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_SPM, 5),
+ "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_SPM, 0),
+ "Partial render avoided" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_SPM, 0),
+ "Partial render discarded" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_SPM, 0),
+ "Partial Render finished" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_SPM, 0),
+ "SPM Owner = 3D-BG" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_SPM, 0),
+ "SPM Owner = 3D-IRQ" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_SPM, 0),
+ "SPM Owner = NONE" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_SPM, 0),
+ "SPM Owner = TA-BG" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_SPM, 0),
+ "SPM Owner = TA-IRQ" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_SPM, 2),
+ "ZStore address 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_SPM, 2),
+ "SStore address 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_SPM, 2),
+ "ZLoad address 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_SPM, 2),
+ "SLoad address 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_SPM, 0),
+ "No deferred ZS Buffer provided" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_SPM, 1),
+ "ZS Buffer successfully populated (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_SPM, 1),
+ "No need to populate ZS Buffer (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_SPM, 1),
+ "ZS Buffer successfully unpopulated (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_SPM, 1),
+ "No need to unpopulate ZS Buffer (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_SPM, 1),
+ "Send ZS-Buffer backing request to host (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_SPM, 1),
+ "Send ZS-Buffer unbacking request to host (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_SPM, 1),
+ "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_SPM, 1),
+ "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_SPM, 1),
+ "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_SPM, 1),
+ "Partial Render waiting for SBuffer to be backed (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = none" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = PR blocked" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = wait for grow" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = wait for HW" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = PR running" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = PR avoided" },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = PR executed" },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_SPM, 2),
+ "3DMemFree matches freelist 0x%08x (FL type = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_SPM, 0),
+ "Raise the 3DMemFreeDetected flag" },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_SPM, 1),
+ "Wait for pending grow on Freelist 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_SPM, 1),
+ "ZS Buffer failed to be populated (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_SPM, 5),
+ "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u" },
+ { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_SPM, 4),
+ "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u" },
+ { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_SPM, 5),
+ "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u" },
+ { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_SPM, 1),
+ "No deferred partial render FW (Type=%d) Buffer provided" },
+ { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_SPM, 1),
+ "No need to populate PR Buffer (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_SPM, 1),
+ "No need to unpopulate PR Buffer (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_SPM, 1),
+ "Send PR Buffer backing request to host (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_SPM, 1),
+ "Send PR Buffer unbacking request to host (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_SPM, 1),
+ "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_SPM, 1),
+ "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_SPM, 2),
+ "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_SPM, 4),
+ "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u" },
+ { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_SPM, 3),
+ "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_SPM, 3),
+ "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u" },
+ { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_SPM, 3),
+ "OOM TA/3D PR deadlock unblocked reordering DM%d runlist head from Context 0x%08x to 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = PR force free" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_POW, 4),
+ "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_POW, 3),
+ "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_POW, 3),
+ "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_POW, 4),
+ "Initiate powoff query. Inactive DMs: %d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_POW, 2),
+ "Any RD-DM pending? %d, Any RD-DM Active? %d" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_POW, 3),
+ "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_POW, 2),
+ "HW Request On(1)/Off(0): %d, Units: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_POW, 2),
+ "Request to change num of dusts to %d (Power flags=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_POW, 2),
+ "Changing number of dusts from %d to %d" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_POW, 0),
+ "Sidekick init" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_POW, 1),
+ "Rascal+Dusts init (# dusts mask: 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_POW, 0),
+ "Initiate powoff query for RD-DMs." },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_POW, 0),
+ "Initiate powoff query for TLA-DM." },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_POW, 2),
+ "Any RD-DM pending? %d, Any RD-DM Active? %d" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_POW, 2),
+ "TLA-DM pending? %d, TLA-DM Active? %d" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_POW, 1),
+ "Request power up due to BRN37270. Pow stat int: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_POW, 3),
+ "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_POW, 1),
+ "OS requested forced IDLE, pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_POW, 1),
+ "OS cancelled forced IDLE, pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_POW, 3),
+ "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_POW, 3),
+ "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_POW, 2),
+ "Active PM latency set to %dms. Core clock: %d Hz" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_POW, 2),
+ "Compute cluster mask change to 0x%x, %d dusts powered." },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_POW, 0),
+ "Null command executed, repeating initiate powoff query for RD-DMs." },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_POW, 1),
+ "Power monitor: Estimate of dynamic energy %u" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_POW, 3),
+ "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: New deadline, time = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: New workload, cycles = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Proactive frequency calculated = %u" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Reactive utilisation = %u percent" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: Reactive frequency calculated = %u.%u" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: OPP Point Sent = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: Deadline removed = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: Workload removed = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Throttle to a maximum = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_POW, 0),
+ "Proactive DVFS: Failed to pass OPP point via GPIO." },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_POW, 0),
+ "Proactive DVFS: Invalid node passed to function." },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u" },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u" },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u" },
+ { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_POW, 0),
+ "Proactive DVFS: Disabled: Not enabled by host." },
+ { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_POW, 2),
+ "HW Request Completed(1)/Aborted(0): %d, Ticks: %d" },
+ { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_POW, 1),
+ "Allowed number of dusts is %d due to BRN59042." },
+ { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_POW, 3),
+ "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_POW, 5),
+ "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u" },
+ { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_POW, 0),
+ "Proactive DVFS: GPU transitioned to idle" },
+ { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_POW, 0),
+ "Proactive DVFS: GPU transitioned to active" },
+ { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_POW, 1),
+ "Power counter dumping: Data truncated writing register %u. Buffer too small." },
+ { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_POW, 0),
+ "Power controller returned ABORT for last request so retrying." },
+ { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_POW, 2),
+ "Discarding invalid power request: type 0x%x, DM %u" },
+ { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_POW, 2),
+ "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_POW, 2),
+ "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(55, ROGUE_FW_GROUP_POW, 1),
+ "Detected attempt to change dust count while not forced idle (pow state 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(56, ROGUE_FW_GROUP_POW, 3),
+ "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(57, ROGUE_FW_GROUP_POW, 2),
+ "Conflicting clock frequency range: OPP min = %u, max = %u" },
+ { ROGUE_FW_LOG_CREATESFID(58, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Set floor to a minimum = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(59, ROGUE_FW_GROUP_POW, 2),
+ "OS requested pow off (forced = %d), pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(60, ROGUE_FW_GROUP_POW, 1),
+ "Discarding invalid power request: type 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(61, ROGUE_FW_GROUP_POW, 3),
+ "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(62, ROGUE_FW_GROUP_POW, 2),
+ "Changing SPU power state mask from 0x%x to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(63, ROGUE_FW_GROUP_POW, 1),
+ "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(64, ROGUE_FW_GROUP_POW, 1),
+ "Invalid SPU power mask 0x%x! Changing to 1" },
+ { ROGUE_FW_LOG_CREATESFID(65, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: Send OPP %u with clock divider value %u" },
+ { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_POW, 0),
+ "PPA block started in perf validation mode." },
+ { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_POW, 1),
+ "Reset PPA block state %u (1=reset, 0=recalculate)." },
+ { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_POW, 1),
+ "Power controller returned ABORT for Core-%d last request so retrying." },
+ { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_POW, 3),
+ "HW Request On(1)/Off(0): %d, Units: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(70, ROGUE_FW_GROUP_POW, 5),
+ "Request to change SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x. Pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(71, ROGUE_FW_GROUP_POW, 4),
+ "Changing SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(72, ROGUE_FW_GROUP_POW, 2),
+ "RAC pending? %d, RAC Active? %d" },
+ { ROGUE_FW_LOG_CREATESFID(73, ROGUE_FW_GROUP_POW, 0),
+ "Initiate powoff query for RAC." },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_HWR, 2),
+ "Lockup detected on DM%d, FWCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_HWR, 3),
+ "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_HWR, 0),
+ "Reset HW" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_HWR, 0),
+ "Lockup recovered." },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_HWR, 2),
+ "Lock-up DM%d FWCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_HWR, 4),
+ "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_HWR, 3),
+ "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_HWR, 3),
+ "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_HWR, 4),
+ "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_HWR, 4),
+ "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_HWR, 3),
+ "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_HWR, 4),
+ "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_HWR, 3),
+ "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_HWR, 2),
+ "Discarded cmd on DM%u FWCtx=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_HWR, 6),
+ "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_HWR, 2),
+ "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_HWR, 5),
+ "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_HWR, 8),
+ "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_HWR, 2),
+ "FL reconstruction 0x%08.8x c%d" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_HWR, 3),
+ "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x." },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_HWR, 2),
+ "Reset HW (mmu:%d, extmem: %d)" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_HWR, 4),
+ "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_HWR, 2),
+ "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_HWR, 5),
+ "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_HWR, 1),
+ "Recovery DM%u: DM fully recovered" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_HWR, 2),
+ "DM%u: Hold scheduling due to R-Flag = 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_HWR, 0),
+ "Analysis: Need freelist reconstruction" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_HWR, 2),
+ "Analysis DM%u: Lockup FWCtx: 0x%08.8x. Need to skip to next command" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_HWR, 2),
+ "Analysis DM%u: Lockup while TA is OOM FWCtx: 0x%08.8x. Need to skip to next command" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_HWR, 2),
+ "Analysis DM%u: Lockup while partial render FWCtx: 0x%08.8x. Need PR cleanup" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_HWR, 0),
+ "GPU has locked up" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_HWR, 1),
+ "DM%u ready for HWR" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_HWR, 2),
+ "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_HWR, 1),
+ "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_HWR, 1),
+ "DM%u timed out" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_HWR, 1),
+ "RGX_CR_EVENT_STATUS=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_HWR, 2),
+ "DM%u lockup falsely detected, R-Flags=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_HWR, 0),
+ "GPU has overrun its deadline" },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_HWR, 0),
+ "GPU has failed a poll" },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_HWR, 2),
+ "RGX DM%u phase count=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_HWR, 2),
+ "Reset HW (loop:%d, poll failures: 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_HWR, 1),
+ "MMU fault event: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_HWR, 1),
+ "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_HWR, 1),
+ "Fast CRC Failed. Proceeding to full register checking (DM: %u)." },
+ { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_HWR, 2),
+ "Meta MMU page fault detected (Meta MMU Status: 0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_HWR, 2),
+ "Fast CRC Check result for DM%u is HWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_HWR, 2),
+ "Full Signature Check result for DM%u is HWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_HWR, 3),
+ "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u" },
+ { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_HWR, 3),
+ "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d" },
+ { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_HWR, 2),
+ "Deadline counter for DM%u is HWRDeadline=%u" },
+ { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_HWR, 1),
+ "Holding Scheduling on OSid %u due to pending freelist reconstruction" },
+ { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_HWR, 2),
+ "Requesting reconstruction for freelist 0x%x (ID=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_HWR, 1),
+ "Reconstruction of freelist ID=%d complete" },
+ { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_HWR, 4),
+ "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(55, ROGUE_FW_GROUP_HWR, 1),
+ "Reconstruction of freelist ID=%d failed" },
+ { ROGUE_FW_LOG_CREATESFID(56, ROGUE_FW_GROUP_HWR, 4),
+ "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(57, ROGUE_FW_GROUP_HWR, 4),
+ "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(58, ROGUE_FW_GROUP_HWR, 2),
+ "USC slots: %u used by DM%u" },
+ { ROGUE_FW_LOG_CREATESFID(59, ROGUE_FW_GROUP_HWR, 1),
+ "USC slots: %u empty" },
+ { ROGUE_FW_LOG_CREATESFID(60, ROGUE_FW_GROUP_HWR, 5),
+ "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(61, ROGUE_FW_GROUP_HWR, 1),
+ "Begin hardware reset (HWR Counter=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(62, ROGUE_FW_GROUP_HWR, 1),
+ "Finished hardware reset (HWR Counter=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(63, ROGUE_FW_GROUP_HWR, 2),
+ "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction" },
+ { ROGUE_FW_LOG_CREATESFID(64, ROGUE_FW_GROUP_HWR, 5),
+ "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(65, ROGUE_FW_GROUP_HWR, 4),
+ "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_HWR, 3),
+ "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_HWR, 1),
+ "At least one other DM is running okay so DM%u will get another chance" },
+ { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_HWR, 2),
+ "Reconstructing in FW, FL: 0x%x (ID=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_HWR, 4),
+ "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)" },
+ { ROGUE_FW_LOG_CREATESFID(70, ROGUE_FW_GROUP_HWR, 5),
+ "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(71, ROGUE_FW_GROUP_HWR, 3),
+ "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(72, ROGUE_FW_GROUP_HWR, 1),
+ "End long HW poll (result=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(73, ROGUE_FW_GROUP_HWR, 3),
+ "DM%u has taken %d ticks and deadline is %d ticks" },
+ { ROGUE_FW_LOG_CREATESFID(74, ROGUE_FW_GROUP_HWR, 5),
+ "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u" },
+ { ROGUE_FW_LOG_CREATESFID(75, ROGUE_FW_GROUP_HWR, 6),
+ "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(76, ROGUE_FW_GROUP_HWR, 1),
+ "GPU-%u has locked up" },
+ { ROGUE_FW_LOG_CREATESFID(77, ROGUE_FW_GROUP_HWR, 1),
+ "DM%u has locked up" },
+ { ROGUE_FW_LOG_CREATESFID(78, ROGUE_FW_GROUP_HWR, 2),
+ "Core %d RGX_CR_EVENT_STATUS=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(79, ROGUE_FW_GROUP_HWR, 2),
+ "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(80, ROGUE_FW_GROUP_HWR, 5),
+ "BIF0 page fault detected (Core %d MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(81, ROGUE_FW_GROUP_HWR, 3),
+ "MMU page fault detected (Core %d MMU Status: 0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(82, ROGUE_FW_GROUP_HWR, 4),
+ "MMU page fault detected (Core %d MMU Status: 0x%08x%08x 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(83, ROGUE_FW_GROUP_HWR, 4),
+ "Reset HW (core:%d of %d, loop:%d, poll failures: 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(84, ROGUE_FW_GROUP_HWR, 3),
+ "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(85, ROGUE_FW_GROUP_HWR, 3),
+ "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(86, ROGUE_FW_GROUP_HWR, 4),
+ "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d" },
+ { ROGUE_FW_LOG_CREATESFID(87, ROGUE_FW_GROUP_HWR, 6),
+ "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u" },
+ { ROGUE_FW_LOG_CREATESFID(88, ROGUE_FW_GROUP_HWR, 3),
+ "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(89, ROGUE_FW_GROUP_HWR, 2),
+ "TEXAS1_PFS poll failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(90, ROGUE_FW_GROUP_HWR, 2),
+ "BIF_PFS poll failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(91, ROGUE_FW_GROUP_HWR, 2),
+ "MMU_ABORT_PM_STATUS set poll failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(92, ROGUE_FW_GROUP_HWR, 2),
+ "MMU_ABORT_PM_STATUS unset poll failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(93, ROGUE_FW_GROUP_HWR, 2),
+ "MMU_CTRL_INVAL poll (all but fw) failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(94, ROGUE_FW_GROUP_HWR, 2),
+ "MMU_CTRL_INVAL poll (all) failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(95, ROGUE_FW_GROUP_HWR, 3),
+ "TEXAS%d_PFS poll failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(96, ROGUE_FW_GROUP_HWR, 3),
+ "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(97, ROGUE_FW_GROUP_HWR, 1),
+ "FW attempted to write to read-only GPU address 0x%08x" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_HWP, 2),
+ "Block 0x%x mapped to Config Idx %u" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_HWP, 1),
+ "Block 0x%x omitted from event - not enabled in HW" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_HWP, 1),
+ "Block 0x%x included in event - enabled in HW" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_HWP, 2),
+ "Select register state hi_0x%x lo_0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_HWP, 1),
+ "Counter stream block header word 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_HWP, 1),
+ "Counter register offset 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_HWP, 1),
+ "Block 0x%x config unset, skipping" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_HWP, 1),
+ "Accessing Indirect block 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_HWP, 1),
+ "Accessing Direct block 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_HWP, 1),
+ "Programmed counter select register at offset 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_HWP, 2),
+ "Block register offset 0x%x and value 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_HWP, 1),
+ "Reading config block from driver 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_HWP, 2),
+ "Reading block range 0x%x to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_HWP, 1),
+ "Recording block 0x%x config from driver" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_HWP, 0),
+ "Finished reading config block from driver" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_HWP, 2),
+ "Custom Counter offset: 0x%x value: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_HWP, 2),
+ "Select counter n:%u ID:0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_HWP, 3),
+ "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_HWP, 1),
+ "Custom Counters filter status %d" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_HWP, 2),
+ "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_HWP, 2),
+ "The package will be discarded because it contains %d counters IDs while the upper limit is %d" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_HWP, 2),
+ "Check Filter 0x%x is 0x%x ?" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_HWP, 1),
+ "The custom block %u is reset" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_HWP, 1),
+ "Encountered an invalid command (%d)" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_HWP, 2),
+ "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_HWP, 3),
+ "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_HWP, 1),
+ "Custom Counter block: %d" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_HWP, 1),
+ "Block 0x%x ENABLED" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_HWP, 1),
+ "Block 0x%x DISABLED" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_HWP, 2),
+ "Accessing Indirect block 0x%x, instance %u" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_HWP, 2),
+ "Counter register 0x%x, Value 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_HWP, 1),
+ "Counters filter status %d" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_HWP, 2),
+ "Block 0x%x mapped to Ctl Idx %u" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_HWP, 0),
+ "Block(s) in use for workload estimation." },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_HWP, 3),
+ "GPU %u Cycle counter 0x%x, Value 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_HWP, 3),
+ "GPU Mask 0x%x Cycle counter 0x%x, Value 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_HWP, 1),
+ "Blocks IGNORED for GPU %u" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_DMA, 5),
+ "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_DMA, 4),
+ "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_DMA, 1),
+ "DMA Interrupt register 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_DMA, 1),
+ "Waiting for transfer of type 0x%02x completion..." },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_DMA, 3),
+ "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_DMA, 3),
+ "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_DMA, 1),
+ "Transfer 0x%02x request poll failure" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_DMA, 2),
+ "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_DMA, 7),
+ "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_DBG, 2),
+ "0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_DBG, 1),
+ "0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_DBG, 2),
+ "0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_DBG, 3),
+ "0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_DBG, 4),
+ "0x%08x 0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_DBG, 5),
+ "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_DBG, 6),
+ "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_DBG, 7),
+ "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_DBG, 8),
+ "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_DBG, 1),
+ "%d" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_DBG, 2),
+ "%d %d" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_DBG, 3),
+ "%d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_DBG, 4),
+ "%d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_DBG, 5),
+ "%d %d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_DBG, 6),
+ "%d %d %d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_DBG, 7),
+ "%d %d %d %d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_DBG, 8),
+ "%d %d %d %d %d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_DBG, 1),
+ "%u" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_DBG, 2),
+ "%u %u" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_DBG, 3),
+ "%u %u %u" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_DBG, 4),
+ "%u %u %u %u" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_DBG, 5),
+ "%u %u %u %u %u" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_DBG, 6),
+ "%u %u %u %u %u %u" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_DBG, 7),
+ "%u %u %u %u %u %u %u" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_DBG, 8),
+ "%u %u %u %u %u %u %u %u" },
+
+ { ROGUE_FW_LOG_CREATESFID(65535, ROGUE_FW_GROUP_NULL, 15),
+ "You should not use this string" },
+};
+
+#define ROGUE_FW_SF_FIRST ROGUE_FW_LOG_CREATESFID(0, ROGUE_FW_GROUP_NULL, 0)
+#define ROGUE_FW_SF_MAIN_ASSERT_FAILED ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_MAIN, 1)
+#define ROGUE_FW_SF_LAST ROGUE_FW_LOG_CREATESFID(65535, ROGUE_FW_GROUP_NULL, 15)
+
+#endif /* PVR_ROGUE_FWIF_SF_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h
new file mode 100644
index 000000000000..6c09c15bf9bd
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_SHARED_H
+#define PVR_ROGUE_FWIF_SHARED_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#define ROGUE_FWIF_NUM_RTDATAS 2U
+#define ROGUE_FWIF_NUM_GEOMDATAS 1U
+#define ROGUE_FWIF_NUM_RTDATA_FREELISTS 2U
+#define ROGUE_NUM_GEOM_CORES 1U
+
+#define ROGUE_NUM_GEOM_CORES_SIZE 2U
+
+/*
+ * Maximum number of UFOs in a CCB command.
+ * The number is based on having 32 sync prims (as originally), plus 32 sync
+ * checkpoints.
+ * Once the use of sync prims is no longer supported, we will retain
+ * the same total (64) as the number of sync checkpoints which may be
+ * supporting a fence is not visible to the client driver and has to
+ * allow for the number of different timelines involved in fence merges.
+ */
+#define ROGUE_FWIF_CCB_CMD_MAX_UFOS (32U + 32U)
+
+/*
+ * This is a generic limit imposed on any DM (GEOMETRY,FRAGMENT,CDM,TDM,2D,TRANSFER)
+ * command passed through the bridge.
+ * Just across the bridge in the server, any incoming kick command size is
+ * checked against this maximum limit.
+ * In case the incoming command size is larger than the specified limit,
+ * the bridge call is retired with error.
+ */
+#define ROGUE_FWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U)
+
+#define ROGUE_FWIF_PRBUFFER_START (0)
+#define ROGUE_FWIF_PRBUFFER_ZSBUFFER (0)
+#define ROGUE_FWIF_PRBUFFER_MSAABUFFER (1)
+#define ROGUE_FWIF_PRBUFFER_MAXSUPPORTED (2)
+
+struct rogue_fwif_dma_addr {
+ aligned_u64 dev_addr;
+ u32 fw_addr;
+ u32 padding;
+} __aligned(8);
+
+struct rogue_fwif_ufo {
+ u32 addr;
+ u32 value;
+};
+
+#define ROGUE_FWIF_UFO_ADDR_IS_SYNC_CHECKPOINT (1)
+
+struct rogue_fwif_sync_checkpoint {
+ u32 state;
+ u32 fw_ref_count;
+};
+
+struct rogue_fwif_cleanup_ctl {
+ /* Number of commands received by the FW */
+ u32 submitted_commands;
+ /* Number of commands executed by the FW */
+ u32 executed_commands;
+} __aligned(8);
+
+/*
+ * Used to share frame numbers across UM-KM-FW,
+ * frame number is set in UM,
+ * frame number is required in both KM for HTB and FW for FW trace.
+ *
+ * May be used to house Kick flags in the future.
+ */
+struct rogue_fwif_cmd_common {
+ /* associated frame number */
+ u32 frame_num;
+};
+
+/*
+ * Geometry and fragment commands require set of firmware addresses that are stored in the Kernel.
+ * Client has handle(s) to Kernel containers storing these addresses, instead of raw addresses. We
+ * have to patch/write these addresses in KM to prevent UM from controlling FW addresses directly.
+ * Typedefs for geometry and fragment commands are shared between Client and Firmware (both
+ * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use geometry|fragment
+ * CMD type definitions directly. Therefore we have a SHARED block that is shared between UM-KM-FW
+ * across all BVNC configurations.
+ */
+struct rogue_fwif_cmd_geom_frag_shared {
+ /* Common command attributes */
+ struct rogue_fwif_cmd_common cmn;
+
+ /*
+ * RTData associated with this command, this is used for context
+ * selection and for storing out HW-context, when TA is switched out for
+ * continuing later
+ */
+ u32 hwrt_data_fw_addr;
+
+ /* Supported PR Buffers like Z/S/MSAA Scratch */
+ u32 pr_buffer_fw_addr[ROGUE_FWIF_PRBUFFER_MAXSUPPORTED];
+};
+
+/*
+ * Client Circular Command Buffer (CCCB) control structure.
+ * This is shared between the Server and the Firmware and holds byte offsets
+ * into the CCCB as well as the wrapping mask to aid wrap around. A given
+ * snapshot of this queue with Cmd 1 running on the GPU might be:
+ *
+ * Roff Doff Woff
+ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........]
+ * < runnable commands >< !ready to run >
+ *
+ * Cmd 1 : Currently executing on the GPU data master.
+ * Cmd 2,3,4: Fence dependencies met, commands runnable.
+ * Cmd 5... : Fence dependency not met yet.
+ */
+struct rogue_fwif_cccb_ctl {
+ /* Host write offset into CCB. This must be aligned to 16 bytes. */
+ u32 write_offset;
+ /*
+ * Firmware read offset into CCB. Points to the command that is runnable
+ * on GPU, if R!=W
+ */
+ u32 read_offset;
+ /*
+ * Firmware fence dependency offset. Points to commands not ready, i.e.
+ * fence dependencies are not met.
+ */
+ u32 dep_offset;
+ /* Offset wrapping mask, total capacity in bytes of the CCB-1 */
+ u32 wrap_mask;
+
+ /* Only used if SUPPORT_AGP is present. */
+ u32 read_offset2;
+
+ /* Only used if SUPPORT_AGP4 is present. */
+ u32 read_offset3;
+ /* Only used if SUPPORT_AGP4 is present. */
+ u32 read_offset4;
+
+ u32 padding;
+} __aligned(8);
+
+#define ROGUE_FW_LOCAL_FREELIST (0)
+#define ROGUE_FW_GLOBAL_FREELIST (1)
+#define ROGUE_FW_FREELIST_TYPE_LAST ROGUE_FW_GLOBAL_FREELIST
+#define ROGUE_FW_MAX_FREELISTS (ROGUE_FW_FREELIST_TYPE_LAST + 1U)
+
+struct rogue_fwif_geom_registers_caswitch {
+ u64 geom_reg_vdm_context_state_base_addr;
+ u64 geom_reg_vdm_context_state_resume_addr;
+ u64 geom_reg_ta_context_state_base_addr;
+
+ struct {
+ u64 geom_reg_vdm_context_store_task0;
+ u64 geom_reg_vdm_context_store_task1;
+ u64 geom_reg_vdm_context_store_task2;
+
+ /* VDM resume state update controls */
+ u64 geom_reg_vdm_context_resume_task0;
+ u64 geom_reg_vdm_context_resume_task1;
+ u64 geom_reg_vdm_context_resume_task2;
+
+ u64 geom_reg_vdm_context_store_task3;
+ u64 geom_reg_vdm_context_store_task4;
+
+ u64 geom_reg_vdm_context_resume_task3;
+ u64 geom_reg_vdm_context_resume_task4;
+ } geom_state[2];
+};
+
+#define ROGUE_FWIF_GEOM_REGISTERS_CSWITCH_SIZE \
+ sizeof(struct rogue_fwif_geom_registers_caswitch)
+
+struct rogue_fwif_cdm_registers_cswitch {
+ u64 cdmreg_cdm_context_pds0;
+ u64 cdmreg_cdm_context_pds1;
+ u64 cdmreg_cdm_terminate_pds;
+ u64 cdmreg_cdm_terminate_pds1;
+
+ /* CDM resume controls */
+ u64 cdmreg_cdm_resume_pds0;
+ u64 cdmreg_cdm_context_pds0_b;
+ u64 cdmreg_cdm_resume_pds0_b;
+};
+
+struct rogue_fwif_static_rendercontext_state {
+ /* Geom registers for ctx switch */
+ struct rogue_fwif_geom_registers_caswitch ctxswitch_regs[ROGUE_NUM_GEOM_CORES_SIZE]
+ __aligned(8);
+};
+
+#define ROGUE_FWIF_STATIC_RENDERCONTEXT_SIZE \
+ sizeof(struct rogue_fwif_static_rendercontext_state)
+
+struct rogue_fwif_static_computecontext_state {
+ /* CDM registers for ctx switch */
+ struct rogue_fwif_cdm_registers_cswitch ctxswitch_regs __aligned(8);
+};
+
+#define ROGUE_FWIF_STATIC_COMPUTECONTEXT_SIZE \
+ sizeof(struct rogue_fwif_static_computecontext_state)
+
+enum rogue_fwif_prbuffer_state {
+ ROGUE_FWIF_PRBUFFER_UNBACKED = 0,
+ ROGUE_FWIF_PRBUFFER_BACKED,
+ ROGUE_FWIF_PRBUFFER_BACKING_PENDING,
+ ROGUE_FWIF_PRBUFFER_UNBACKING_PENDING,
+};
+
+struct rogue_fwif_prbuffer {
+ /* Buffer ID*/
+ u32 buffer_id;
+ /* Needs On-demand Z/S/MSAA Buffer allocation */
+ bool on_demand __aligned(4);
+ /* Z/S/MSAA -Buffer state */
+ enum rogue_fwif_prbuffer_state state;
+ /* Cleanup state */
+ struct rogue_fwif_cleanup_ctl cleanup_sate;
+ /* Compatibility and other flags */
+ u32 prbuffer_flags;
+} __aligned(8);
+
+/* Last reset reason for a context. */
+enum rogue_context_reset_reason {
+ /* No reset reason recorded */
+ ROGUE_CONTEXT_RESET_REASON_NONE = 0,
+ /* Caused a reset due to locking up */
+ ROGUE_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1,
+ /* Affected by another context locking up */
+ ROGUE_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2,
+ /* Overran the global deadline */
+ ROGUE_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3,
+ /* Affected by another context overrunning */
+ ROGUE_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4,
+ /* Forced reset to ensure scheduling requirements */
+ ROGUE_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5,
+ /* FW Safety watchdog triggered */
+ ROGUE_CONTEXT_RESET_REASON_FW_WATCHDOG = 12,
+ /* FW page fault (no HWR) */
+ ROGUE_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13,
+ /* FW execution error (GPU reset requested) */
+ ROGUE_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14,
+ /* Host watchdog detected FW error */
+ ROGUE_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15,
+ /* Geometry DM OOM event is not allowed */
+ ROGUE_CONTEXT_GEOM_OOM_DISABLED = 16,
+};
+
+struct rogue_context_reset_reason_data {
+ enum rogue_context_reset_reason reset_reason;
+ u32 reset_ext_job_ref;
+};
+
+#include "pvr_rogue_fwif_shared_check.h"
+
+#endif /* PVR_ROGUE_FWIF_SHARED_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h
new file mode 100644
index 000000000000..597ed54bbd3a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_SHARED_CHECK_H
+#define PVR_ROGUE_FWIF_SHARED_CHECK_H
+
+#include <linux/build_bug.h>
+
+#define OFFSET_CHECK(type, member, offset) \
+ static_assert(offsetof(type, member) == (offset), \
+ "offsetof(" #type ", " #member ") incorrect")
+
+#define SIZE_CHECK(type, size) \
+ static_assert(sizeof(type) == (size), #type " is incorrect size")
+
+OFFSET_CHECK(struct rogue_fwif_dma_addr, dev_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_dma_addr, fw_addr, 8);
+SIZE_CHECK(struct rogue_fwif_dma_addr, 16);
+
+OFFSET_CHECK(struct rogue_fwif_ufo, addr, 0);
+OFFSET_CHECK(struct rogue_fwif_ufo, value, 4);
+SIZE_CHECK(struct rogue_fwif_ufo, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cleanup_ctl, submitted_commands, 0);
+OFFSET_CHECK(struct rogue_fwif_cleanup_ctl, executed_commands, 4);
+SIZE_CHECK(struct rogue_fwif_cleanup_ctl, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, write_offset, 0);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset, 4);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, dep_offset, 8);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, wrap_mask, 12);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset2, 16);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset3, 20);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset4, 24);
+SIZE_CHECK(struct rogue_fwif_cccb_ctl, 32);
+
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_reg_vdm_context_state_base_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_reg_vdm_context_state_resume_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_reg_ta_context_state_base_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task0, 24);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task1, 32);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task2, 40);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task0, 48);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task1, 56);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task2, 64);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task3, 72);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task4, 80);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task3, 88);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task4, 96);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task0, 104);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task1, 112);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task2, 120);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task0, 128);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task1, 136);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task2, 144);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task3, 152);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task4, 160);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task3, 168);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task4, 176);
+SIZE_CHECK(struct rogue_fwif_geom_registers_caswitch, 184);
+
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0, 0);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds1, 8);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds, 16);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds1, 24);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0, 32);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0_b, 40);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0_b, 48);
+SIZE_CHECK(struct rogue_fwif_cdm_registers_cswitch, 56);
+
+OFFSET_CHECK(struct rogue_fwif_static_rendercontext_state, ctxswitch_regs, 0);
+SIZE_CHECK(struct rogue_fwif_static_rendercontext_state, 368);
+
+OFFSET_CHECK(struct rogue_fwif_static_computecontext_state, ctxswitch_regs, 0);
+SIZE_CHECK(struct rogue_fwif_static_computecontext_state, 56);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_common, frame_num, 0);
+SIZE_CHECK(struct rogue_fwif_cmd_common, 4);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, cmn, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, hwrt_data_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, pr_buffer_fw_addr, 8);
+SIZE_CHECK(struct rogue_fwif_cmd_geom_frag_shared, 16);
+
+#endif /* PVR_ROGUE_FWIF_SHARED_CHECK_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h
new file mode 100644
index 000000000000..1c2c4ebedc25
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_STREAM_H
+#define PVR_ROGUE_FWIF_STREAM_H
+
+/**
+ * DOC: Streams
+ *
+ * Commands are submitted to the kernel driver in the form of streams.
+ *
+ * A command stream has the following layout :
+ * - A 64-bit header containing:
+ * * A u32 containing the length of the main stream inclusive of the length of the header.
+ * * A u32 for padding.
+ * - The main stream data.
+ * - The extension stream (optional), which is composed of:
+ * * One or more headers.
+ * * The extension stream data, corresponding to the extension headers.
+ *
+ * The main stream provides the base command data. This has a fixed layout based on the features
+ * supported by a given GPU.
+ *
+ * The extension stream provides the command parameters that are required for BRNs & ERNs for the
+ * current GPU. This stream is comprised of one or more headers, followed by data for each given
+ * BRN/ERN.
+ *
+ * Each header is a u32 containing a bitmask of quirks & enhancements in the extension stream, a
+ * "type" field determining the set of quirks & enhancements the bitmask represents, and a
+ * continuation bit determining whether any more headers are present. The headers are then followed
+ * by command data; this is specific to each quirk/enhancement. All unused / reserved bits in the
+ * header must be set to 0.
+ *
+ * All parameters and headers in the main and extension streams must be naturally aligned.
+ *
+ * If a parameter appears in both the main and extension streams, then the extension parameter is
+ * used.
+ */
+
+/*
+ * Stream extension header definition
+ */
+#define PVR_STREAM_EXTHDR_TYPE_SHIFT 29U
+#define PVR_STREAM_EXTHDR_TYPE_MASK (7U << PVR_STREAM_EXTHDR_TYPE_SHIFT)
+#define PVR_STREAM_EXTHDR_TYPE_MAX 8U
+#define PVR_STREAM_EXTHDR_CONTINUATION BIT(28U)
+
+#define PVR_STREAM_EXTHDR_DATA_MASK ~(PVR_STREAM_EXTHDR_TYPE_MASK | PVR_STREAM_EXTHDR_CONTINUATION)
+
+/*
+ * Stream extension header - Geometry 0
+ */
+#define PVR_STREAM_EXTHDR_TYPE_GEOM0 0U
+
+#define PVR_STREAM_EXTHDR_GEOM0_BRN49927 BIT(0U)
+
+#define PVR_STREAM_EXTHDR_GEOM0_VALID PVR_STREAM_EXTHDR_GEOM0_BRN49927
+
+/*
+ * Stream extension header - Fragment 0
+ */
+#define PVR_STREAM_EXTHDR_TYPE_FRAG0 0U
+
+#define PVR_STREAM_EXTHDR_FRAG0_BRN47217 BIT(0U)
+#define PVR_STREAM_EXTHDR_FRAG0_BRN49927 BIT(1U)
+
+#define PVR_STREAM_EXTHDR_FRAG0_VALID PVR_STREAM_EXTHDR_FRAG0_BRN49927
+
+/*
+ * Stream extension header - Compute 0
+ */
+#define PVR_STREAM_EXTHDR_TYPE_COMPUTE0 0U
+
+#define PVR_STREAM_EXTHDR_COMPUTE0_BRN49927 BIT(0U)
+
+#define PVR_STREAM_EXTHDR_COMPUTE0_VALID PVR_STREAM_EXTHDR_COMPUTE0_BRN49927
+
+#endif /* PVR_ROGUE_FWIF_STREAM_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h b/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h
new file mode 100644
index 000000000000..684766006703
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_HEAP_CONFIG_H
+#define PVR_ROGUE_HEAP_CONFIG_H
+
+#include <linux/sizes.h>
+
+/*
+ * ROGUE Device Virtual Address Space Definitions
+ *
+ * This file defines the ROGUE virtual address heaps that are used in
+ * application memory contexts. It also shows where the Firmware memory heap
+ * fits into this, but the firmware heap is only ever created in the
+ * kernel driver and never exposed to userspace.
+ *
+ * ROGUE_PDSCODEDATA_HEAP_BASE and ROGUE_USCCODE_HEAP_BASE will be programmed,
+ * on a global basis, into ROGUE_CR_PDS_EXEC_BASE and ROGUE_CR_USC_CODE_BASE_*
+ * respectively. Therefore if client drivers use multiple configs they must
+ * still be consistent with their definitions for these heaps.
+ *
+ * Base addresses have to be a multiple of 4MiB.
+ * Heaps must not start at 0x0000000000, as this is reserved for internal
+ * use within the driver.
+ * Range comments, those starting in column 0 below are a section heading of
+ * sorts and are above the heaps in that range. Often this is the reserved
+ * size of the heap within the range.
+ */
+
+/* 0x00_0000_0000 ************************************************************/
+
+/* 0x00_0000_0000 - 0x00_0040_0000 */
+/* 0 MiB to 4 MiB, size of 4 MiB : RESERVED */
+
+/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/
+/* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : RESERVED **/
+
+/* 0x80_0000_0000 ************************************************************/
+
+/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/
+/* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/
+#define ROGUE_GENERAL_HEAP_BASE 0x8000000000ull
+#define ROGUE_GENERAL_HEAP_SIZE SZ_128G
+
+/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF */
+/* 640 GiB to 704 GiB, size of 64 GiB : FREE */
+
+/* B0_0000_0000 - 0xB7_FFFF_FFFF */
+/* 704 GiB to 736 GiB, size of 32 GiB : FREE */
+
+/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF */
+/* 736 GiB to 768 GiB, size of 32 GiB : RESERVED */
+
+/* 0xC0_0000_0000 ************************************************************/
+
+/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF */
+/* 768 GiB to 872 GiB, size of 104 GiB : FREE */
+
+/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF */
+/* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP */
+#define ROGUE_PDSCODEDATA_HEAP_BASE 0xDA00000000ull
+#define ROGUE_PDSCODEDATA_HEAP_SIZE SZ_4G
+
+/* 0xDB_0000_0000 - 0xDB_FFFF_FFFF */
+/* 876 GiB to 880 GiB, size of 256 MiB (reserved 4GiB) : BRN **/
+/*
+ * The BRN63142 quirk workaround requires Region Header memory to be at the top
+ * of a 16GiB aligned range. This is so when masked with 0x03FFFFFFFF the
+ * address will avoid aliasing PB addresses. Start at 879.75GiB. Size of 256MiB.
+ */
+#define ROGUE_RGNHDR_HEAP_BASE 0xDBF0000000ull
+#define ROGUE_RGNHDR_HEAP_SIZE SZ_256M
+
+/* 0xDC_0000_0000 - 0xDF_FFFF_FFFF */
+/* 880 GiB to 896 GiB, size of 16 GiB : FREE */
+
+/* 0xE0_0000_0000 - 0xE0_FFFF_FFFF */
+/* 896 GiB to 900 GiB, size of 4 GiB : USCCODE_HEAP */
+#define ROGUE_USCCODE_HEAP_BASE 0xE000000000ull
+#define ROGUE_USCCODE_HEAP_SIZE SZ_4G
+
+/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF */
+/* 900 GiB to 903 GiB, size of 3 GiB : RESERVED */
+
+/* 0xE1_C000_000 - 0xE1_FFFF_FFFF */
+/* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP */
+#define ROGUE_FW_HEAP_BASE 0xE1C0000000ull
+
+/* 0xE2_0000_0000 - 0xE3_FFFF_FFFF */
+/* 904 GiB to 912 GiB, size of 8 GiB : FREE */
+
+/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF */
+/* 912 GiB to 968 GiB, size of 16 GiB : TRANSFER_FRAG */
+#define ROGUE_TRANSFER_FRAG_HEAP_BASE 0xE400000000ull
+#define ROGUE_TRANSFER_FRAG_HEAP_SIZE SZ_16G
+
+/* 0xE8_0000_0000 - 0xF1_FFFF_FFFF */
+/* 928 GiB to 968 GiB, size of 40 GiB : RESERVED */
+
+/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/
+/* 968 GiB to 969 GiB, size of 2 MiB : VISTEST_HEAP */
+#define ROGUE_VISTEST_HEAP_BASE 0xF200000000ull
+#define ROGUE_VISTEST_HEAP_SIZE SZ_2M
+
+/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF */
+/* 969 GiB to 972 GiB, size of 3 GiB : FREE */
+
+/* 0xF3_0000_0000 - 0xFF_FFFF_FFFF */
+/* 972 GiB to 1024 GiB, size of 52 GiB : FREE */
+
+/* 0xFF_FFFF_FFFF ************************************************************/
+
+#endif /* PVR_ROGUE_HEAP_CONFIG_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_meta.h b/drivers/gpu/drm/imagination/pvr_rogue_meta.h
new file mode 100644
index 000000000000..3020e6582daa
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_meta.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_META_H
+#define PVR_ROGUE_META_H
+
+/***** The META HW register definitions in the file are updated manually *****/
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/*
+ ******************************************************************************
+ * META registers and MACROS
+ *****************************************************************************
+ */
+#define META_CR_CTRLREG_BASE(t) (0x04800000U + (0x1000U * (t)))
+
+#define META_CR_TXPRIVEXT (0x048000E8)
+#define META_CR_TXPRIVEXT_MINIM_EN BIT(7)
+
+#define META_CR_SYSC_JTAG_THREAD (0x04830030)
+#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004)
+
+#define META_CR_PERF_COUNT0 (0x0480FFE0)
+#define META_CR_PERF_COUNT1 (0x0480FFE8)
+#define META_CR_PERF_COUNT_CTRL_SHIFT (28)
+#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000)
+#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (8 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (9 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEMISS \
+ (0xA << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICORE (0xD << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_THR_SHIFT (24)
+#define META_CR_PERF_COUNT_THR_MASK (0x0F000000)
+#define META_CR_PERF_COUNT_THR_0 (0x1 << META_CR_PERF_COUNT_THR_SHIFT)
+#define META_CR_PERF_COUNT_THR_1 (0x2 << META_CR_PERF_COUNT_THR_1)
+
+#define META_CR_TxVECINT_BHALT (0x04820500)
+#define META_CR_PERF_ICORE0 (0x0480FFD0)
+#define META_CR_PERF_ICORE1 (0x0480FFD8)
+#define META_CR_PERF_ICORE_DCACHEMISS (0x8)
+
+#define META_CR_PERF_COUNT(ctrl, thr) \
+ ((META_CR_PERF_COUNT_CTRL_##ctrl << META_CR_PERF_COUNT_CTRL_SHIFT) | \
+ ((thr) << META_CR_PERF_COUNT_THR_SHIFT))
+
+#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U)
+#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U)
+
+/* Poll for done. */
+#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000U)
+/* Set for read. */
+#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000U)
+#define META_CR_TXUXXRXRQ_TX_S (12)
+#define META_CR_TXUXXRXRQ_RX_S (4)
+#define META_CR_TXUXXRXRQ_UXX_S (0)
+
+/* Internal ctrl regs. */
+#define META_CR_TXUIN_ID (0x0)
+/* Data unit regs. */
+#define META_CR_TXUD0_ID (0x1)
+/* Data unit regs. */
+#define META_CR_TXUD1_ID (0x2)
+/* Address unit regs. */
+#define META_CR_TXUA0_ID (0x3)
+/* Address unit regs. */
+#define META_CR_TXUA1_ID (0x4)
+/* PC registers. */
+#define META_CR_TXUPC_ID (0x5)
+
+/* Macros to calculate register access values. */
+#define META_CR_CORE_REG(thr, reg_num, unit) \
+ (((u32)(thr) << META_CR_TXUXXRXRQ_TX_S) | \
+ ((u32)(reg_num) << META_CR_TXUXXRXRQ_RX_S) | \
+ ((u32)(unit) << META_CR_TXUXXRXRQ_UXX_S))
+
+#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID)
+#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID)
+#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID)
+
+#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID)
+#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID)
+#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID)
+
+#define SP_ACCESS(thread) META_CR_CORE_REG(thread, 0, META_CR_TXUA0_ID)
+#define PC_ACCESS(thread) META_CR_CORE_REG(thread, 0, META_CR_TXUPC_ID)
+
+#define META_CR_COREREG_ENABLE (0x0000000U)
+#define META_CR_COREREG_STATUS (0x0000010U)
+#define META_CR_COREREG_DEFR (0x00000A0U)
+#define META_CR_COREREG_PRIVEXT (0x00000E8U)
+
+#define META_CR_T0ENABLE_OFFSET \
+ (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE)
+#define META_CR_T0STATUS_OFFSET \
+ (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS)
+#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR)
+#define META_CR_T0PRIVEXT_OFFSET \
+ (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_T1ENABLE_OFFSET \
+ (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE)
+#define META_CR_T1STATUS_OFFSET \
+ (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS)
+#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR)
+#define META_CR_T1PRIVEXT_OFFSET \
+ (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_TXENABLE_ENABLE_BIT (0x00000001U) /* Set if running */
+#define META_CR_TXSTATUS_PRIV (0x00020000U)
+#define META_CR_TXPRIVEXT_MINIM (0x00000080U)
+
+#define META_MEM_GLOBAL_RANGE_BIT (0x80000000U)
+
+#define META_CR_TXCLKCTRL (0x048000B0)
+#define META_CR_TXCLKCTRL_ALL_ON (0x55111111)
+#define META_CR_TXCLKCTRL_ALL_AUTO (0xAA222222)
+
+#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600)
+#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14)
+#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6)
+#define META_CR_SYSC_DCPART(n) (0x04830200 + (n) * 0x8)
+#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31)
+#define META_CR_SYSC_ICPART(n) (0x04830220 + (n) * 0x8)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7)
+#define META_CR_MMCU_DCACHE_CTRL (0x04830018)
+#define META_CR_MMCU_ICACHE_CTRL (0x04830020)
+#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1)
+
+/*
+ ******************************************************************************
+ * META LDR Format
+ ******************************************************************************
+ */
+/* Block header structure. */
+struct rogue_meta_ldr_block_hdr {
+ u32 dev_id;
+ u32 sl_code;
+ u32 sl_data;
+ u16 pc_ctrl;
+ u16 crc;
+};
+
+/* High level data stream block structure. */
+struct rogue_meta_ldr_l1_data_blk {
+ u16 cmd;
+ u16 length;
+ u32 next;
+ u32 cmd_data[4];
+};
+
+/* High level data stream block structure. */
+struct rogue_meta_ldr_l2_data_blk {
+ u16 tag;
+ u16 length;
+ u32 block_data[4];
+};
+
+/* Config command structure. */
+struct rogue_meta_ldr_cfg_blk {
+ u32 type;
+ u32 block_data[4];
+};
+
+/* Block type definitions */
+#define ROGUE_META_LDR_COMMENT_TYPE_MASK (0x0010U)
+#define ROGUE_META_LDR_BLK_IS_COMMENT(x) (((x) & ROGUE_META_LDR_COMMENT_TYPE_MASK) != 0U)
+
+/*
+ * Command definitions
+ * Value Name Description
+ * 0 LoadMem Load memory with binary data.
+ * 1 LoadCore Load a set of core registers.
+ * 2 LoadMMReg Load a set of memory mapped registers.
+ * 3 StartThreads Set each thread PC and SP, then enable threads.
+ * 4 ZeroMem Zeros a memory region.
+ * 5 Config Perform a configuration command.
+ */
+#define ROGUE_META_LDR_CMD_MASK (0x000FU)
+
+#define ROGUE_META_LDR_CMD_LOADMEM (0x0000U)
+#define ROGUE_META_LDR_CMD_LOADCORE (0x0001U)
+#define ROGUE_META_LDR_CMD_LOADMMREG (0x0002U)
+#define ROGUE_META_LDR_CMD_START_THREADS (0x0003U)
+#define ROGUE_META_LDR_CMD_ZEROMEM (0x0004U)
+#define ROGUE_META_LDR_CMD_CONFIG (0x0005U)
+
+/*
+ * Config Command definitions
+ * Value Name Description
+ * 0 Pause Pause for x times 100 instructions
+ * 1 Read Read a value from register - No value return needed.
+ * Utilises effects of issuing reads to certain registers
+ * 2 Write Write to mem location
+ * 3 MemSet Set mem to value
+ * 4 MemCheck check mem for specific value.
+ */
+#define ROGUE_META_LDR_CFG_PAUSE (0x0000)
+#define ROGUE_META_LDR_CFG_READ (0x0001)
+#define ROGUE_META_LDR_CFG_WRITE (0x0002)
+#define ROGUE_META_LDR_CFG_MEMSET (0x0003)
+#define ROGUE_META_LDR_CFG_MEMCHECK (0x0004)
+
+/*
+ ******************************************************************************
+ * ROGUE FW segmented MMU definitions
+ ******************************************************************************
+ */
+/* All threads can access the segment. */
+#define ROGUE_FW_SEGMMU_ALLTHRS (0xf << 8U)
+/* Writable. */
+#define ROGUE_FW_SEGMMU_WRITEABLE (0x1U << 1U)
+/* All threads can access and writable. */
+#define ROGUE_FW_SEGMMU_ALLTHRS_WRITEABLE \
+ (ROGUE_FW_SEGMMU_ALLTHRS | ROGUE_FW_SEGMMU_WRITEABLE)
+
+/* Direct map region 10 used for mapping GPU memory - max 8MB. */
+#define ROGUE_FW_SEGMMU_DMAP_GPU_ID (10U)
+#define ROGUE_FW_SEGMMU_DMAP_GPU_ADDR_START (0x07000000U)
+#define ROGUE_FW_SEGMMU_DMAP_GPU_MAX_SIZE (0x00800000U)
+
+/* Segment IDs. */
+#define ROGUE_FW_SEGMMU_DATA_ID (1U)
+#define ROGUE_FW_SEGMMU_BOOTLDR_ID (2U)
+#define ROGUE_FW_SEGMMU_TEXT_ID (ROGUE_FW_SEGMMU_BOOTLDR_ID)
+
+/*
+ * SLC caching strategy in S7 and volcanic is emitted through the segment MMU.
+ * All the segments configured through the macro ROGUE_FW_SEGMMU_OUTADDR_TOP are
+ * CACHED in the SLC.
+ * The interface has been kept the same to simplify the code changes.
+ * The bifdm argument is ignored (no longer relevant) in S7 and volcanic.
+ */
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(pers, slc_policy, mmu_ctx) \
+ ((((u64)((pers) & 0x3)) << 52) | (((u64)((mmu_ctx) & 0xFF)) << 44) | \
+ (((u64)((slc_policy) & 0x1)) << 40))
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(mmu_ctx) \
+ ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x3, 0x0, mmu_ctx)
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(mmu_ctx) \
+ ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x0, 0x1, mmu_ctx)
+
+/*
+ * To configure the Page Catalog and BIF-DM fed into the BIF for Garten
+ * accesses through this segment.
+ */
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(pc, bifdm) \
+ (((u64)((u64)(pc) & 0xFU) << 44U) | ((u64)((u64)(bifdm) & 0xFU) << 40U))
+
+#define ROGUE_FW_SEGMMU_META_BIFDM_ID (0x7U)
+
+/* META segments have 4kB minimum size. */
+#define ROGUE_FW_SEGMMU_ALIGN (0x1000U)
+
+/* Segmented MMU registers (n = segment id). */
+#define META_CR_MMCU_SEGMENT_N_BASE(n) (0x04850000U + ((n) * 0x10U))
+#define META_CR_MMCU_SEGMENT_N_LIMIT(n) (0x04850004U + ((n) * 0x10U))
+#define META_CR_MMCU_SEGMENT_N_OUTA0(n) (0x04850008U + ((n) * 0x10U))
+#define META_CR_MMCU_SEGMENT_N_OUTA1(n) (0x0485000CU + ((n) * 0x10U))
+
+/*
+ * The following defines must be recalculated if the Meta MMU segments used
+ * to access Host-FW data are changed
+ * Current combinations are:
+ * - SLC uncached, META cached, FW base address 0x70000000
+ * - SLC uncached, META uncached, FW base address 0xF0000000
+ * - SLC cached, META cached, FW base address 0x10000000
+ * - SLC cached, META uncached, FW base address 0x90000000
+ */
+#define ROGUE_FW_SEGMMU_DATA_BASE_ADDRESS (0x10000000U)
+#define ROGUE_FW_SEGMMU_DATA_META_CACHED (0x0U)
+#define ROGUE_FW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT)
+#define ROGUE_FW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT)
+/*
+ * For non-VIVT SLCs the cacheability of the FW data in the SLC is selected in
+ * the PTEs for the FW data, not in the Meta Segment MMU, which means these
+ * defines have no real effect in those cases.
+ */
+#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0U)
+#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U)
+#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U)
+
+/*
+ ******************************************************************************
+ * ROGUE FW Bootloader defaults
+ ******************************************************************************
+ */
+#define ROGUE_FW_BOOTLDR_META_ADDR (0x40000000U)
+#define ROGUE_FW_BOOTLDR_DEVV_ADDR_0 (0xC0000000U)
+#define ROGUE_FW_BOOTLDR_DEVV_ADDR_1 (0x000000E1)
+#define ROGUE_FW_BOOTLDR_DEVV_ADDR \
+ ((((u64)ROGUE_FW_BOOTLDR_DEVV_ADDR_1) << 32) | \
+ ROGUE_FW_BOOTLDR_DEVV_ADDR_0)
+#define ROGUE_FW_BOOTLDR_LIMIT (0x1FFFF000)
+#define ROGUE_FW_MAX_BOOTLDR_OFFSET (0x1000)
+
+/* Bootloader configuration offset is in dwords (512 bytes) */
+#define ROGUE_FW_BOOTLDR_CONF_OFFSET (0x80)
+
+/*
+ ******************************************************************************
+ * ROGUE META Stack
+ ******************************************************************************
+ */
+#define ROGUE_META_STACK_SIZE (0x1000U)
+
+/*
+ ******************************************************************************
+ * ROGUE META Core memory
+ ******************************************************************************
+ */
+/* Code and data both map to the same physical memory. */
+#define ROGUE_META_COREMEM_CODE_ADDR (0x80000000U)
+#define ROGUE_META_COREMEM_DATA_ADDR (0x82000000U)
+#define ROGUE_META_COREMEM_OFFSET_MASK (0x01ffffffU)
+
+#define ROGUE_META_IS_COREMEM_CODE(a, b) \
+ ({ \
+ u32 _a = (a), _b = (b); \
+ ((_a) >= ROGUE_META_COREMEM_CODE_ADDR) && \
+ ((_a) < (ROGUE_META_COREMEM_CODE_ADDR + (_b))); \
+ })
+#define ROGUE_META_IS_COREMEM_DATA(a, b) \
+ ({ \
+ u32 _a = (a), _b = (b); \
+ ((_a) >= ROGUE_META_COREMEM_DATA_ADDR) && \
+ ((_a) < (ROGUE_META_COREMEM_DATA_ADDR + (_b))); \
+ })
+/*
+ ******************************************************************************
+ * 2nd thread
+ ******************************************************************************
+ */
+#define ROGUE_FW_THR1_PC (0x18930000)
+#define ROGUE_FW_THR1_SP (0x78890000)
+
+/*
+ ******************************************************************************
+ * META compatibility
+ ******************************************************************************
+ */
+
+#define META_CR_CORE_ID (0x04831000)
+#define META_CR_CORE_ID_VER_SHIFT (16U)
+#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU)
+
+#define ROGUE_CR_META_MTP218_CORE_ID_VALUE 0x19
+#define ROGUE_CR_META_MTP219_CORE_ID_VALUE 0x1E
+#define ROGUE_CR_META_LTP218_CORE_ID_VALUE 0x1C
+#define ROGUE_CR_META_LTP217_CORE_ID_VALUE 0x1F
+
+#define ROGUE_FW_PROCESSOR_META "META"
+
+#endif /* PVR_ROGUE_META_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mips.h b/drivers/gpu/drm/imagination/pvr_rogue_mips.h
new file mode 100644
index 000000000000..41ed618fda3f
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_mips.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_MIPS_H
+#define PVR_ROGUE_MIPS_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* Utility defines for memory management. */
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K (12)
+#define ROGUE_MIPSFW_PAGE_SIZE_4K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+#define ROGUE_MIPSFW_PAGE_MASK_4K (ROGUE_MIPSFW_PAGE_SIZE_4K - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K (16)
+#define ROGUE_MIPSFW_PAGE_SIZE_64K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K)
+#define ROGUE_MIPSFW_PAGE_MASK_64K (ROGUE_MIPSFW_PAGE_SIZE_64K - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_256K (18)
+#define ROGUE_MIPSFW_PAGE_SIZE_256K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_256K)
+#define ROGUE_MIPSFW_PAGE_MASK_256K (ROGUE_MIPSFW_PAGE_SIZE_256K - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_1MB (20)
+#define ROGUE_MIPSFW_PAGE_SIZE_1MB (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_1MB)
+#define ROGUE_MIPSFW_PAGE_MASK_1MB (ROGUE_MIPSFW_PAGE_SIZE_1MB - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_4MB (22)
+#define ROGUE_MIPSFW_PAGE_SIZE_4MB (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_4MB)
+#define ROGUE_MIPSFW_PAGE_MASK_4MB (ROGUE_MIPSFW_PAGE_SIZE_4MB - 1)
+#define ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE (2)
+/* log2 page table sizes dependent on FW heap size and page size (for each OS). */
+#define ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_4K(pvr_dev) ((pvr_dev)->fw_dev.fw_heap_info.log2_size - \
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K + \
+ ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE)
+#define ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_64K(pvr_dev) ((pvr_dev)->fw_dev.fw_heap_info.log2_size - \
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K + \
+ ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE)
+/* Maximum number of page table pages (both Host and MIPS pages). */
+#define ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES (4)
+/* Total number of TLB entries. */
+#define ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES (16)
+/* "Uncached" caching policy. */
+#define ROGUE_MIPSFW_UNCACHED_CACHE_POLICY (2)
+/* "Write-back write-allocate" caching policy. */
+#define ROGUE_MIPSFW_WRITEBACK_CACHE_POLICY (3)
+/* "Write-through no write-allocate" caching policy. */
+#define ROGUE_MIPSFW_WRITETHROUGH_CACHE_POLICY (1)
+/* Cached policy used by MIPS in case of physical bus on 32 bit. */
+#define ROGUE_MIPSFW_CACHED_POLICY (ROGUE_MIPSFW_WRITEBACK_CACHE_POLICY)
+/* Cached policy used by MIPS in case of physical bus on more than 32 bit. */
+#define ROGUE_MIPSFW_CACHED_POLICY_ABOVE_32BIT (ROGUE_MIPSFW_WRITETHROUGH_CACHE_POLICY)
+/* Total number of Remap entries. */
+#define ROGUE_MIPSFW_NUMBER_OF_REMAP_ENTRIES (2 * ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES)
+
+/* MIPS EntryLo/PTE format. */
+
+#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_SHIFT (31U)
+#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK (0X7FFFFFFF)
+#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_EN (0X80000000)
+
+#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT (30U)
+#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK (0XBFFFFFFF)
+#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_EN (0X40000000)
+
+/* Page Frame Number */
+#define ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT (6)
+#define ROGUE_MIPSFW_ENTRYLO_PFN_ALIGNSHIFT (12)
+/* Mask used for the MIPS Page Table in case of physical bus on 32 bit. */
+#define ROGUE_MIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0)
+#define ROGUE_MIPSFW_ENTRYLO_PFN_SIZE (20)
+/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit. */
+#define ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT (0x3FFFFFC0)
+#define ROGUE_MIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT (24)
+#define ROGUE_MIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT (ROGUE_MIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \
+ ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT)
+
+#define ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT (3U)
+#define ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK (0XFFFFFFC7)
+
+#define ROGUE_MIPSFW_ENTRYLO_DIRTY_SHIFT (2U)
+#define ROGUE_MIPSFW_ENTRYLO_DIRTY_CLRMSK (0XFFFFFFFB)
+#define ROGUE_MIPSFW_ENTRYLO_DIRTY_EN (0X00000004)
+
+#define ROGUE_MIPSFW_ENTRYLO_VALID_SHIFT (1U)
+#define ROGUE_MIPSFW_ENTRYLO_VALID_CLRMSK (0XFFFFFFFD)
+#define ROGUE_MIPSFW_ENTRYLO_VALID_EN (0X00000002)
+
+#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_SHIFT (0U)
+#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_CLRMSK (0XFFFFFFFE)
+#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN (0X00000001)
+
+#define ROGUE_MIPSFW_ENTRYLO_DVG (ROGUE_MIPSFW_ENTRYLO_DIRTY_EN | \
+ ROGUE_MIPSFW_ENTRYLO_VALID_EN | \
+ ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN)
+#define ROGUE_MIPSFW_ENTRYLO_UNCACHED (ROGUE_MIPSFW_UNCACHED_CACHE_POLICY << \
+ ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT)
+#define ROGUE_MIPSFW_ENTRYLO_DVG_UNCACHED (ROGUE_MIPSFW_ENTRYLO_DVG | \
+ ROGUE_MIPSFW_ENTRYLO_UNCACHED)
+
+/* Remap Range Config Addr Out. */
+/* These defines refer to the upper half of the Remap Range Config register. */
+#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_MASK (0x0FFFFFF0)
+#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT (4) /* wrt upper half of the register. */
+#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12)
+#define ROGUE_MIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \
+ ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT)
+
+/*
+ * Pages to trampoline problematic physical addresses:
+ * - ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000
+ * - ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000
+ * - ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000
+ * - (benign trampoline) : 0x1FC0_3000
+ * that would otherwise be erroneously remapped by the MIPS wrapper.
+ * (see "Firmware virtual layout and remap configuration" section below)
+ */
+
+#define ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2)
+#define ROGUE_MIPSFW_TRAMPOLINE_NUMPAGES BIT(ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES)
+#define ROGUE_MIPSFW_TRAMPOLINE_SIZE (ROGUE_MIPSFW_TRAMPOLINE_NUMPAGES << \
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+#define ROGUE_MIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES + \
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+
+#define ROGUE_MIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+#define ROGUE_MIPSFW_TRAMPOLINE_OFFSET(a) ((a) - ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+
+#define ROGUE_MIPSFW_SENSITIVE_ADDR(a) (ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN == \
+ (~((1 << ROGUE_MIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE) - 1) \
+ & (a)))
+
+/* Firmware virtual layout and remap configuration. */
+/*
+ * For each remap region we define:
+ * - the virtual base used by the Firmware to access code/data through that region
+ * - the microAptivAP physical address correspondent to the virtual base address,
+ * used as input address and remapped to the actual physical address
+ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from
+ * the bottom of the base input address that survive onto the output address
+ * (this defines both the alignment and the maximum size of the remapped region)
+ * - one or more code/data segments within the remapped region.
+ */
+
+/* Boot remap setup. */
+#define ROGUE_MIPSFW_BOOT_REMAP_VIRTUAL_BASE (0xBFC00000)
+#define ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN (0x1FC00000)
+#define ROGUE_MIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE (12)
+#define ROGUE_MIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE (ROGUE_MIPSFW_BOOT_REMAP_VIRTUAL_BASE)
+
+/* Data remap setup. */
+#define ROGUE_MIPSFW_DATA_REMAP_VIRTUAL_BASE (0xBFC01000)
+#define ROGUE_MIPSFW_DATA_CACHED_REMAP_VIRTUAL_BASE (0x9FC01000)
+#define ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN (0x1FC01000)
+#define ROGUE_MIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE (12)
+#define ROGUE_MIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE (ROGUE_MIPSFW_DATA_REMAP_VIRTUAL_BASE)
+
+/* Code remap setup. */
+#define ROGUE_MIPSFW_CODE_REMAP_VIRTUAL_BASE (0x9FC02000)
+#define ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN (0x1FC02000)
+#define ROGUE_MIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE (12)
+#define ROGUE_MIPSFW_EXCEPTIONS_VIRTUAL_BASE (ROGUE_MIPSFW_CODE_REMAP_VIRTUAL_BASE)
+
+/* Permanent mappings setup. */
+#define ROGUE_MIPSFW_PT_VIRTUAL_BASE (0xCF000000)
+#define ROGUE_MIPSFW_REGISTERS_VIRTUAL_BASE (0xCF800000)
+#define ROGUE_MIPSFW_STACK_VIRTUAL_BASE (0xCF600000)
+
+/* Bootloader configuration data. */
+/*
+ * Bootloader configuration offset (where ROGUE_MIPSFW_BOOT_DATA lives)
+ * within the bootloader/NMI data page.
+ */
+#define ROGUE_MIPSFW_BOOTLDR_CONF_OFFSET (0x0)
+
+/* NMI shared data. */
+/* Base address of the shared data within the bootloader/NMI data page. */
+#define ROGUE_MIPSFW_NMI_SHARED_DATA_BASE (0x100)
+/* Size used by Debug dump data. */
+#define ROGUE_MIPSFW_NMI_SHARED_SIZE (0x2B0)
+/* Offsets in the NMI shared area in 32-bit words. */
+#define ROGUE_MIPSFW_NMI_SYNC_FLAG_OFFSET (0x0)
+#define ROGUE_MIPSFW_NMI_STATE_OFFSET (0x1)
+#define ROGUE_MIPSFW_NMI_ERROR_STATE_SET (0x1)
+
+/* MIPS boot stage. */
+#define ROGUE_MIPSFW_BOOT_STAGE_OFFSET (0x400)
+
+/*
+ * MIPS private data in the bootloader data page.
+ * Memory below this offset is used by the FW only, no interface data allowed.
+ */
+#define ROGUE_MIPSFW_PRIVATE_DATA_OFFSET (0x800)
+
+struct rogue_mipsfw_boot_data {
+ u64 stack_phys_addr;
+ u64 reg_base;
+ u64 pt_phys_addr[ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES];
+ u32 pt_log2_page_size;
+ u32 pt_num_pages;
+ u32 reserved1;
+ u32 reserved2;
+};
+
+#define ROGUE_MIPSFW_GET_OFFSET_IN_DWORDS(offset) ((offset) / sizeof(u32))
+#define ROGUE_MIPSFW_GET_OFFSET_IN_QWORDS(offset) ((offset) / sizeof(u64))
+
+/* Used for compatibility checks. */
+#define ROGUE_MIPSFW_ARCHTYPE_VER_CLRMSK (0xFFFFE3FFU)
+#define ROGUE_MIPSFW_ARCHTYPE_VER_SHIFT (10U)
+#define ROGUE_MIPSFW_CORE_ID_VALUE (0x001U)
+#define ROGUE_FW_PROCESSOR_MIPS "MIPS"
+
+/* microAptivAP cache line size. */
+#define ROGUE_MIPSFW_MICROAPTIVEAP_CACHELINE_SIZE (16U)
+
+/*
+ * The SOCIF transactions are identified with the top 16 bits of the physical address emitted by
+ * the MIPS.
+ */
+#define ROGUE_MIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN (16U)
+
+/* Values to put in the MIPS selectors for performance counters. */
+/* Icache accesses in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0 (9U)
+/* Icache misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1 (9U)
+
+/* Dcache accesses in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0 (10U)
+/* Dcache misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1 (11U)
+
+/* ITLB instruction accesses in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0 (5U)
+/* JTLB instruction accesses misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1 (7U)
+
+ /* Instructions completed in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0 (1U)
+/* JTLB data misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1 (8U)
+
+/* Shift for the Event field in the MIPS perf ctrl registers. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT (5U)
+
+/* Additional flags for performance counters. See MIPS manual for further reference. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE (8U)
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE (2U)
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_EXL (1U)
+
+#define ROGUE_MIPSFW_C0_NBHWIRQ 8
+
+/* Macros to decode C0_Cause register. */
+#define ROGUE_MIPSFW_C0_CAUSE_EXCCODE(cause) (((cause) & 0x7c) >> 2)
+#define ROGUE_MIPSFW_C0_CAUSE_EXCCODE_FWERROR 9
+/* Use only when Coprocessor Unusable exception. */
+#define ROGUE_MIPSFW_C0_CAUSE_UNUSABLE_UNIT(cause) (((cause) >> 28) & 0x3)
+#define ROGUE_MIPSFW_C0_CAUSE_PENDING_HWIRQ(cause) (((cause) & 0x3fc00) >> 10)
+#define ROGUE_MIPSFW_C0_CAUSE_FDCIPENDING BIT(21)
+#define ROGUE_MIPSFW_C0_CAUSE_IV BIT(23)
+#define ROGUE_MIPSFW_C0_CAUSE_IC BIT(25)
+#define ROGUE_MIPSFW_C0_CAUSE_PCIPENDING BIT(26)
+#define ROGUE_MIPSFW_C0_CAUSE_TIPENDING BIT(30)
+#define ROGUE_MIPSFW_C0_CAUSE_BRANCH_DELAY BIT(31)
+
+/* Macros to decode C0_Debug register. */
+#define ROGUE_MIPSFW_C0_DEBUG_EXCCODE(debug) (((debug) >> 10) & 0x1f)
+#define ROGUE_MIPSFW_C0_DEBUG_DSS BIT(0)
+#define ROGUE_MIPSFW_C0_DEBUG_DBP BIT(1)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBL BIT(2)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBS BIT(3)
+#define ROGUE_MIPSFW_C0_DEBUG_DIB BIT(4)
+#define ROGUE_MIPSFW_C0_DEBUG_DINT BIT(5)
+#define ROGUE_MIPSFW_C0_DEBUG_DIBIMPR BIT(6)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBLIMPR BIT(18)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBSIMPR BIT(19)
+#define ROGUE_MIPSFW_C0_DEBUG_IEXI BIT(20)
+#define ROGUE_MIPSFW_C0_DEBUG_DBUSEP BIT(21)
+#define ROGUE_MIPSFW_C0_DEBUG_CACHEEP BIT(22)
+#define ROGUE_MIPSFW_C0_DEBUG_MCHECKP BIT(23)
+#define ROGUE_MIPSFW_C0_DEBUG_IBUSEP BIT(24)
+#define ROGUE_MIPSFW_C0_DEBUG_DM BIT(30)
+#define ROGUE_MIPSFW_C0_DEBUG_DBD BIT(31)
+
+/* Macros to decode TLB entries. */
+#define ROGUE_MIPSFW_TLB_GET_MASK(page_mask) (((page_mask) >> 13) & 0XFFFFU)
+/* Page size in KB. */
+#define ROGUE_MIPSFW_TLB_GET_PAGE_SIZE(page_mask) ((((page_mask) | 0x1FFF) + 1) >> 11)
+/* Page size in KB. */
+#define ROGUE_MIPSFW_TLB_GET_PAGE_MASK(page_size) ((((page_size) << 11) - 1) & ~0x7FF)
+#define ROGUE_MIPSFW_TLB_GET_VPN2(entry_hi) ((entry_hi) >> 13)
+#define ROGUE_MIPSFW_TLB_GET_COHERENCY(entry_lo) (((entry_lo) >> 3) & 0x7U)
+#define ROGUE_MIPSFW_TLB_GET_PFN(entry_lo) (((entry_lo) >> 6) & 0XFFFFFU)
+/* GET_PA uses a non-standard PFN mask for 36 bit addresses. */
+#define ROGUE_MIPSFW_TLB_GET_PA(entry_lo) (((u64)(entry_lo) & \
+ ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) << 6)
+#define ROGUE_MIPSFW_TLB_GET_INHIBIT(entry_lo) (((entry_lo) >> 30) & 0x3U)
+#define ROGUE_MIPSFW_TLB_GET_DGV(entry_lo) ((entry_lo) & 0x7U)
+#define ROGUE_MIPSFW_TLB_GLOBAL BIT(0)
+#define ROGUE_MIPSFW_TLB_VALID BIT(1)
+#define ROGUE_MIPSFW_TLB_DIRTY BIT(2)
+#define ROGUE_MIPSFW_TLB_XI BIT(30)
+#define ROGUE_MIPSFW_TLB_RI BIT(31)
+
+#define ROGUE_MIPSFW_REMAP_GET_REGION_SIZE(region_size_encoding) (1 << (((region_size_encoding) \
+ + 1) << 1))
+
+struct rogue_mips_tlb_entry {
+ u32 tlb_page_mask;
+ u32 tlb_hi;
+ u32 tlb_lo0;
+ u32 tlb_lo1;
+};
+
+struct rogue_mips_remap_entry {
+ u32 remap_addr_in; /* Always 4k aligned. */
+ u32 remap_addr_out; /* Always 4k aligned. */
+ u32 remap_region_size;
+};
+
+struct rogue_mips_state {
+ u32 error_state; /* This must come first in the structure. */
+ u32 error_epc;
+ u32 status_register;
+ u32 cause_register;
+ u32 bad_register;
+ u32 epc;
+ u32 sp;
+ u32 debug;
+ u32 depc;
+ u32 bad_instr;
+ u32 unmapped_address;
+ struct rogue_mips_tlb_entry tlb[ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES];
+ struct rogue_mips_remap_entry remap[ROGUE_MIPSFW_NUMBER_OF_REMAP_ENTRIES];
+};
+
+#include "pvr_rogue_mips_check.h"
+
+#endif /* PVR_ROGUE_MIPS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h b/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h
new file mode 100644
index 000000000000..824b4bf33ac1
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_MIPS_CHECK_H
+#define PVR_ROGUE_MIPS_CHECK_H
+
+#include <linux/build_bug.h>
+
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_page_mask) == 0,
+ "offsetof(struct rogue_mips_tlb_entry, tlb_page_mask) incorrect");
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_hi) == 4,
+ "offsetof(struct rogue_mips_tlb_entry, tlb_hi) incorrect");
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_lo0) == 8,
+ "offsetof(struct rogue_mips_tlb_entry, tlb_lo0) incorrect");
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_lo1) == 12,
+ "offsetof(struct rogue_mips_tlb_entry, tlb_lo1) incorrect");
+static_assert(sizeof(struct rogue_mips_tlb_entry) == 16,
+ "struct rogue_mips_tlb_entry is incorrect size");
+
+static_assert(offsetof(struct rogue_mips_remap_entry, remap_addr_in) == 0,
+ "offsetof(struct rogue_mips_remap_entry, remap_addr_in) incorrect");
+static_assert(offsetof(struct rogue_mips_remap_entry, remap_addr_out) == 4,
+ "offsetof(struct rogue_mips_remap_entry, remap_addr_out) incorrect");
+static_assert(offsetof(struct rogue_mips_remap_entry, remap_region_size) == 8,
+ "offsetof(struct rogue_mips_remap_entry, remap_region_size) incorrect");
+static_assert(sizeof(struct rogue_mips_remap_entry) == 12,
+ "struct rogue_mips_remap_entry is incorrect size");
+
+static_assert(offsetof(struct rogue_mips_state, error_state) == 0,
+ "offsetof(struct rogue_mips_state, error_state) incorrect");
+static_assert(offsetof(struct rogue_mips_state, error_epc) == 4,
+ "offsetof(struct rogue_mips_state, error_epc) incorrect");
+static_assert(offsetof(struct rogue_mips_state, status_register) == 8,
+ "offsetof(struct rogue_mips_state, status_register) incorrect");
+static_assert(offsetof(struct rogue_mips_state, cause_register) == 12,
+ "offsetof(struct rogue_mips_state, cause_register) incorrect");
+static_assert(offsetof(struct rogue_mips_state, bad_register) == 16,
+ "offsetof(struct rogue_mips_state, bad_register) incorrect");
+static_assert(offsetof(struct rogue_mips_state, epc) == 20,
+ "offsetof(struct rogue_mips_state, epc) incorrect");
+static_assert(offsetof(struct rogue_mips_state, sp) == 24,
+ "offsetof(struct rogue_mips_state, sp) incorrect");
+static_assert(offsetof(struct rogue_mips_state, debug) == 28,
+ "offsetof(struct rogue_mips_state, debug) incorrect");
+static_assert(offsetof(struct rogue_mips_state, depc) == 32,
+ "offsetof(struct rogue_mips_state, depc) incorrect");
+static_assert(offsetof(struct rogue_mips_state, bad_instr) == 36,
+ "offsetof(struct rogue_mips_state, bad_instr) incorrect");
+static_assert(offsetof(struct rogue_mips_state, unmapped_address) == 40,
+ "offsetof(struct rogue_mips_state, unmapped_address) incorrect");
+static_assert(offsetof(struct rogue_mips_state, tlb) == 44,
+ "offsetof(struct rogue_mips_state, tlb) incorrect");
+static_assert(offsetof(struct rogue_mips_state, remap) == 300,
+ "offsetof(struct rogue_mips_state, remap) incorrect");
+static_assert(sizeof(struct rogue_mips_state) == 684,
+ "struct rogue_mips_state is incorrect size");
+
+#endif /* PVR_ROGUE_MIPS_CHECK_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h
new file mode 100644
index 000000000000..f361ccdd5405
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+/* *** Autogenerated C -- do not edit *** */
+
+#ifndef PVR_ROGUE_MMU_DEFS_H
+#define PVR_ROGUE_MMU_DEFS_H
+
+#define ROGUE_MMU_DEFS_REVISION 0
+
+#define ROGUE_BIF_DM_ENCODING_VERTEX (0x00000000U)
+#define ROGUE_BIF_DM_ENCODING_PIXEL (0x00000001U)
+#define ROGUE_BIF_DM_ENCODING_COMPUTE (0x00000002U)
+#define ROGUE_BIF_DM_ENCODING_TLA (0x00000003U)
+#define ROGUE_BIF_DM_ENCODING_PB_VCE (0x00000004U)
+#define ROGUE_BIF_DM_ENCODING_PB_TE (0x00000005U)
+#define ROGUE_BIF_DM_ENCODING_META (0x00000007U)
+#define ROGUE_BIF_DM_ENCODING_HOST (0x00000008U)
+#define ROGUE_BIF_DM_ENCODING_PM_ALIST (0x00000009U)
+
+#define ROGUE_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U)
+#define ROGUE_MMUCTRL_VADDR_PC_INDEX_CLRMSK (0xFFFFFF003FFFFFFFULL)
+#define ROGUE_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U)
+#define ROGUE_MMUCTRL_VADDR_PD_INDEX_CLRMSK (0xFFFFFFFFC01FFFFFULL)
+#define ROGUE_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U)
+#define ROGUE_MMUCTRL_VADDR_PT_INDEX_CLRMSK (0xFFFFFFFFFFE00FFFULL)
+
+#define ROGUE_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U)
+#define ROGUE_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U)
+#define ROGUE_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U)
+
+#define ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U)
+#define ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U)
+#define ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U)
+
+#define ROGUE_MMUCTRL_PAGE_SIZE_MASK (0x00000007U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_4KB (0x00000000U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_16KB (0x00000001U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_64KB (0x00000002U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_256KB (0x00000003U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_1MB (0x00000004U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_2MB (0x00000005U)
+
+#define ROGUE_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U)
+#define ROGUE_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (0xFFFFFF0000000FFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U)
+#define ROGUE_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (0xFFFFFF0000003FFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U)
+#define ROGUE_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (0xFFFFFF000000FFFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U)
+#define ROGUE_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (0xFFFFFF000003FFFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U)
+#define ROGUE_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (0xFFFFFF00000FFFFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U)
+#define ROGUE_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (0xFFFFFF00001FFFFFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U)
+#define ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (0xFFFFFF0000000FFFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U)
+#define ROGUE_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (0xFFFFFF00000003FFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U)
+#define ROGUE_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (0xFFFFFF00000000FFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U)
+#define ROGUE_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (0xFFFFFF000000003FULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U)
+#define ROGUE_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (0xFFFFFF000000001FULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U)
+#define ROGUE_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (0xFFFFFF000000001FULL)
+
+#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U)
+#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (0xBFFFFFFFFFFFFFFFULL)
+#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (0x4000000000000000ULL)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (0xC00000FFFFFFFFFFULL)
+#define ROGUE_MMUCTRL_PT_DATA_PAGE_SHIFT (12U)
+#define ROGUE_MMUCTRL_PT_DATA_PAGE_CLRMSK (0xFFFFFF0000000FFFULL)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (0xFFFFFFFFFFFFF03FULL)
+#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U)
+#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFFFFFFFFDFULL)
+#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (0x0000000000000020ULL)
+#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U)
+#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (0xFFFFFFFFFFFFFFEFULL)
+#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_EN (0x0000000000000010ULL)
+#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U)
+#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (0xFFFFFFFFFFFFFFF7ULL)
+#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (0x0000000000000008ULL)
+#define ROGUE_MMUCTRL_PT_DATA_CC_SHIFT (2U)
+#define ROGUE_MMUCTRL_PT_DATA_CC_CLRMSK (0xFFFFFFFFFFFFFFFBULL)
+#define ROGUE_MMUCTRL_PT_DATA_CC_EN (0x0000000000000004ULL)
+#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U)
+#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (0xFFFFFFFFFFFFFFFDULL)
+#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_EN (0x0000000000000002ULL)
+#define ROGUE_MMUCTRL_PT_DATA_VALID_SHIFT (0U)
+#define ROGUE_MMUCTRL_PT_DATA_VALID_CLRMSK (0xFFFFFFFFFFFFFFFEULL)
+#define ROGUE_MMUCTRL_PT_DATA_VALID_EN (0x0000000000000001ULL)
+
+#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U)
+#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFEFFFFFFFFFFULL)
+#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (0x0000010000000000ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U)
+#define ROGUE_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (0xFFFFFF000000001FULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (0xFFFFFFFFFFFFFFF1ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (0x0000000000000000ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (0x0000000000000002ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (0x0000000000000004ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (0x0000000000000006ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (0x0000000000000008ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (0x000000000000000aULL)
+#define ROGUE_MMUCTRL_PD_DATA_VALID_SHIFT (0U)
+#define ROGUE_MMUCTRL_PD_DATA_VALID_CLRMSK (0xFFFFFFFFFFFFFFFEULL)
+#define ROGUE_MMUCTRL_PD_DATA_VALID_EN (0x0000000000000001ULL)
+
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U)
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU)
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U)
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U)
+#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U)
+#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU)
+#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U)
+#define ROGUE_MMUCTRL_PC_DATA_VALID_SHIFT (0U)
+#define ROGUE_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU)
+#define ROGUE_MMUCTRL_PC_DATA_VALID_EN (0x00000001U)
+
+#endif /* PVR_ROGUE_MMU_DEFS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_stream.c b/drivers/gpu/drm/imagination/pvr_stream.c
new file mode 100644
index 000000000000..975336a4facf
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_rogue_fwif_stream.h"
+#include "pvr_stream.h"
+
+#include <linux/align.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <uapi/drm/pvr_drm.h>
+
+static __always_inline bool
+stream_def_is_supported(struct pvr_device *pvr_dev, const struct pvr_stream_def *stream_def)
+{
+ if (stream_def->feature == PVR_FEATURE_NONE)
+ return true;
+
+ if (!(stream_def->feature & PVR_FEATURE_NOT) &&
+ pvr_device_has_feature(pvr_dev, stream_def->feature)) {
+ return true;
+ }
+
+ if ((stream_def->feature & PVR_FEATURE_NOT) &&
+ !pvr_device_has_feature(pvr_dev, stream_def->feature & ~PVR_FEATURE_NOT)) {
+ return true;
+ }
+
+ return false;
+}
+
+static int
+pvr_stream_get_data(u8 *stream, u32 *stream_offset, u32 stream_size, u32 data_size, u32 align_size,
+ void *dest)
+{
+ *stream_offset = ALIGN(*stream_offset, align_size);
+
+ if ((*stream_offset + data_size) > stream_size)
+ return -EINVAL;
+
+ memcpy(dest, stream + *stream_offset, data_size);
+
+ (*stream_offset) += data_size;
+
+ return 0;
+}
+
+/**
+ * pvr_stream_process_1() - Process a single stream and fill destination structure
+ * @pvr_dev: Device pointer.
+ * @stream_def: Stream definition.
+ * @nr_entries: Number of entries in &stream_def.
+ * @stream: Pointer to stream.
+ * @stream_offset: Starting offset within stream.
+ * @stream_size: Size of input stream, in bytes.
+ * @dest: Pointer to destination structure.
+ * @dest_size: Size of destination structure.
+ * @stream_offset_out: Pointer to variable to write updated stream offset to. May be NULL.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL on malformed stream.
+ */
+static int
+pvr_stream_process_1(struct pvr_device *pvr_dev, const struct pvr_stream_def *stream_def,
+ u32 nr_entries, u8 *stream, u32 stream_offset, u32 stream_size,
+ u8 *dest, u32 dest_size, u32 *stream_offset_out)
+{
+ int err = 0;
+ u32 i;
+
+ for (i = 0; i < nr_entries; i++) {
+ if (stream_def[i].offset >= dest_size) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (!stream_def_is_supported(pvr_dev, &stream_def[i]))
+ continue;
+
+ switch (stream_def[i].size) {
+ case PVR_STREAM_SIZE_8:
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u8),
+ sizeof(u8), dest + stream_def[i].offset);
+ if (err)
+ return err;
+ break;
+
+ case PVR_STREAM_SIZE_16:
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u16),
+ sizeof(u16), dest + stream_def[i].offset);
+ if (err)
+ return err;
+ break;
+
+ case PVR_STREAM_SIZE_32:
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32),
+ sizeof(u32), dest + stream_def[i].offset);
+ if (err)
+ return err;
+ break;
+
+ case PVR_STREAM_SIZE_64:
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u64),
+ sizeof(u64), dest + stream_def[i].offset);
+ if (err)
+ return err;
+ break;
+
+ case PVR_STREAM_SIZE_ARRAY:
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size,
+ stream_def[i].array_size, sizeof(u64),
+ dest + stream_def[i].offset);
+ if (err)
+ return err;
+ break;
+ }
+ }
+
+ if (stream_offset_out)
+ *stream_offset_out = stream_offset;
+
+ return err;
+}
+
+static int
+pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
+ const struct pvr_stream_cmd_defs *cmd_defs, void *ext_stream,
+ u32 stream_offset, u32 ext_stream_size, void *dest)
+{
+ u32 musthave_masks[PVR_STREAM_EXTHDR_TYPE_MAX];
+ u32 ext_header;
+ int err = 0;
+ u32 i;
+
+ /* Copy "must have" mask from device. We clear this as we process the stream. */
+ memcpy(musthave_masks, pvr_dev->stream_musthave_quirks[cmd_defs->type],
+ sizeof(musthave_masks));
+
+ do {
+ const struct pvr_stream_ext_header *header;
+ u32 type;
+ u32 data;
+
+ err = pvr_stream_get_data(ext_stream, &stream_offset, ext_stream_size, sizeof(u32),
+ sizeof(ext_header), &ext_header);
+ if (err)
+ return err;
+
+ type = (ext_header & PVR_STREAM_EXTHDR_TYPE_MASK) >> PVR_STREAM_EXTHDR_TYPE_SHIFT;
+ data = ext_header & PVR_STREAM_EXTHDR_DATA_MASK;
+
+ if (type >= cmd_defs->ext_nr_headers)
+ return -EINVAL;
+
+ header = &cmd_defs->ext_headers[type];
+ if (data & ~header->valid_mask)
+ return -EINVAL;
+
+ musthave_masks[type] &= ~data;
+
+ for (i = 0; i < header->ext_streams_num; i++) {
+ const struct pvr_stream_ext_def *ext_def = &header->ext_streams[i];
+
+ if (!(ext_header & ext_def->header_mask))
+ continue;
+
+ if (!pvr_device_has_uapi_quirk(pvr_dev, ext_def->quirk))
+ return -EINVAL;
+
+ err = pvr_stream_process_1(pvr_dev, ext_def->stream, ext_def->stream_len,
+ ext_stream, stream_offset,
+ ext_stream_size, dest,
+ cmd_defs->dest_size, &stream_offset);
+ if (err)
+ return err;
+ }
+ } while (ext_header & PVR_STREAM_EXTHDR_CONTINUATION);
+
+ /*
+ * Verify that "must have" mask is now zero. If it isn't then one of the "must have" quirks
+ * for this command was not present.
+ */
+ for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ if (musthave_masks[i])
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * pvr_stream_process() - Build FW structure from stream
+ * @pvr_dev: Device pointer.
+ * @cmd_defs: Stream definition.
+ * @stream: Pointer to command stream.
+ * @stream_size: Size of command stream, in bytes.
+ * @dest_out: Pointer to destination buffer.
+ *
+ * Caller is responsible for freeing the output structure.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%ENOMEM on out of memory, or
+ * * -%EINVAL on malformed stream.
+ */
+int
+pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
+ void *stream, u32 stream_size, void *dest_out)
+{
+ u32 stream_offset = 0;
+ u32 main_stream_len;
+ u32 padding;
+ int err;
+
+ if (!stream || !stream_size)
+ return -EINVAL;
+
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32),
+ sizeof(u32), &main_stream_len);
+ if (err)
+ return err;
+
+ /*
+ * u32 after stream length is padding to ensure u64 alignment, but may be used for expansion
+ * in the future. Verify it's zero.
+ */
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32),
+ sizeof(u32), &padding);
+ if (err)
+ return err;
+
+ if (main_stream_len < stream_offset || main_stream_len > stream_size || padding)
+ return -EINVAL;
+
+ err = pvr_stream_process_1(pvr_dev, cmd_defs->main_stream, cmd_defs->main_stream_len,
+ stream, stream_offset, main_stream_len, dest_out,
+ cmd_defs->dest_size, &stream_offset);
+ if (err)
+ return err;
+
+ if (stream_offset < stream_size) {
+ err = pvr_stream_process_ext_stream(pvr_dev, cmd_defs, stream, stream_offset,
+ stream_size, dest_out);
+ if (err)
+ return err;
+ } else {
+ u32 i;
+
+ /*
+ * If we don't have an extension stream then there must not be any "must have"
+ * quirks for this command.
+ */
+ for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ if (pvr_dev->stream_musthave_quirks[cmd_defs->type][i])
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pvr_stream_create_musthave_masks() - Create "must have" masks for streams based on current device
+ * quirks
+ * @pvr_dev: Device pointer.
+ */
+void
+pvr_stream_create_musthave_masks(struct pvr_device *pvr_dev)
+{
+ memset(pvr_dev->stream_musthave_quirks, 0, sizeof(pvr_dev->stream_musthave_quirks));
+
+ if (pvr_device_has_uapi_quirk(pvr_dev, 47217))
+ pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_FRAG][0] |=
+ PVR_STREAM_EXTHDR_FRAG0_BRN47217;
+
+ if (pvr_device_has_uapi_quirk(pvr_dev, 49927)) {
+ pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_GEOM][0] |=
+ PVR_STREAM_EXTHDR_GEOM0_BRN49927;
+ pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_FRAG][0] |=
+ PVR_STREAM_EXTHDR_FRAG0_BRN49927;
+ pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_COMPUTE][0] |=
+ PVR_STREAM_EXTHDR_COMPUTE0_BRN49927;
+ }
+}
diff --git a/drivers/gpu/drm/imagination/pvr_stream.h b/drivers/gpu/drm/imagination/pvr_stream.h
new file mode 100644
index 000000000000..d92acb3a61d7
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_STREAM_H
+#define PVR_STREAM_H
+
+#include <linux/bits.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+
+struct pvr_device;
+
+struct pvr_job;
+
+enum pvr_stream_type {
+ PVR_STREAM_TYPE_GEOM = 0,
+ PVR_STREAM_TYPE_FRAG,
+ PVR_STREAM_TYPE_COMPUTE,
+ PVR_STREAM_TYPE_TRANSFER,
+ PVR_STREAM_TYPE_STATIC_RENDER_CONTEXT,
+ PVR_STREAM_TYPE_STATIC_COMPUTE_CONTEXT,
+
+ PVR_STREAM_TYPE_MAX
+};
+
+enum pvr_stream_size {
+ PVR_STREAM_SIZE_8 = 0,
+ PVR_STREAM_SIZE_16,
+ PVR_STREAM_SIZE_32,
+ PVR_STREAM_SIZE_64,
+ PVR_STREAM_SIZE_ARRAY,
+};
+
+#define PVR_FEATURE_NOT BIT(31)
+#define PVR_FEATURE_NONE U32_MAX
+
+struct pvr_stream_def {
+ u32 offset;
+ enum pvr_stream_size size;
+ u32 array_size;
+ u32 feature;
+};
+
+struct pvr_stream_ext_def {
+ const struct pvr_stream_def *stream;
+ u32 stream_len;
+ u32 header_mask;
+ u32 quirk;
+};
+
+struct pvr_stream_ext_header {
+ const struct pvr_stream_ext_def *ext_streams;
+ u32 ext_streams_num;
+ u32 valid_mask;
+};
+
+struct pvr_stream_cmd_defs {
+ enum pvr_stream_type type;
+
+ const struct pvr_stream_def *main_stream;
+ u32 main_stream_len;
+
+ u32 ext_nr_headers;
+ const struct pvr_stream_ext_header *ext_headers;
+
+ size_t dest_size;
+};
+
+int
+pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
+ void *stream, u32 stream_size, void *dest_out);
+void
+pvr_stream_create_musthave_masks(struct pvr_device *pvr_dev);
+
+#endif /* PVR_STREAM_H */
diff --git a/drivers/gpu/drm/imagination/pvr_stream_defs.c b/drivers/gpu/drm/imagination/pvr_stream_defs.c
new file mode 100644
index 000000000000..f8bd1a8c01db
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream_defs.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device_info.h"
+#include "pvr_rogue_fwif_client.h"
+#include "pvr_rogue_fwif_stream.h"
+#include "pvr_stream.h"
+#include "pvr_stream_defs.h"
+
+#include <linux/stddef.h>
+#include <uapi/drm/pvr_drm.h>
+
+#define PVR_STREAM_DEF_SET(owner, member, _size, _array_size, _feature) \
+ { .offset = offsetof(struct owner, member), \
+ .size = (_size), \
+ .array_size = (_array_size), \
+ .feature = (_feature) }
+
+#define PVR_STREAM_DEF(owner, member, member_size) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, PVR_FEATURE_NONE)
+
+#define PVR_STREAM_DEF_FEATURE(owner, member, member_size, feature) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, feature)
+
+#define PVR_STREAM_DEF_NOT_FEATURE(owner, member, member_size, feature) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, \
+ (feature) | PVR_FEATURE_NOT)
+
+#define PVR_STREAM_DEF_ARRAY(owner, member) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY, \
+ sizeof(((struct owner *)0)->member), PVR_FEATURE_NONE)
+
+#define PVR_STREAM_DEF_ARRAY_FEATURE(owner, member, feature) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY, \
+ sizeof(((struct owner *)0)->member), feature)
+
+#define PVR_STREAM_DEF_ARRAY_NOT_FEATURE(owner, member, feature) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY, \
+ sizeof(((struct owner *)0)->member), (feature) | PVR_FEATURE_NOT)
+
+/*
+ * When adding new parameters to the stream definition, the new parameters must go after the
+ * existing parameters, to preserve order. As parameters are naturally aligned, care must be taken
+ * with respect to implicit padding in the stream; padding should be minimised as much as possible.
+ */
+static const struct pvr_stream_def rogue_fwif_cmd_geom_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.vdm_ctrl_stream_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.tpu_border_colour_table, 64),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_draw_indirect0, 64,
+ PVR_FEATURE_VDM_DRAWINDIRECT),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_draw_indirect1, 32,
+ PVR_FEATURE_VDM_DRAWINDIRECT),
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.ppp_ctrl, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.te_psg, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.vdm_context_resume_task0_size, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_context_resume_task3_size, 32,
+ PVR_FEATURE_VDM_OBJECT_LEVEL_LLS),
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.view_idx, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.pds_coeff_free_prog, 32,
+ PVR_FEATURE_TESSELLATION),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_geom_stream_brn49927[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.tpu, 32),
+};
+
+static const struct pvr_stream_ext_def cmd_geom_ext_streams_0[] = {
+ {
+ .stream = rogue_fwif_cmd_geom_stream_brn49927,
+ .stream_len = ARRAY_SIZE(rogue_fwif_cmd_geom_stream_brn49927),
+ .header_mask = PVR_STREAM_EXTHDR_GEOM0_BRN49927,
+ .quirk = 49927,
+ },
+};
+
+static const struct pvr_stream_ext_header cmd_geom_ext_headers[] = {
+ {
+ .ext_streams = cmd_geom_ext_streams_0,
+ .ext_streams_num = ARRAY_SIZE(cmd_geom_ext_streams_0),
+ .valid_mask = PVR_STREAM_EXTHDR_GEOM0_VALID,
+ },
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_geom_stream = {
+ .type = PVR_STREAM_TYPE_GEOM,
+
+ .main_stream = rogue_fwif_cmd_geom_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_geom_stream),
+
+ .ext_nr_headers = ARRAY_SIZE(cmd_geom_ext_headers),
+ .ext_headers = cmd_geom_ext_headers,
+
+ .dest_size = sizeof(struct rogue_fwif_cmd_geom),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_frag_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_scissor_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_dbias_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_oclqry_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_zlsctl, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_zload_store_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_stencil_load_store_base, 64),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.fb_cdc_zls, 64,
+ PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP),
+ PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pbe_word),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.tpu_border_colour_table, 64),
+ PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pds_bgnd),
+ PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pds_pr_bgnd),
+ PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.usc_clear_register),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.usc_pixel_output_ctrl, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_bgobjdepth, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_bgobjvals, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_aa, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_xtp_pipe_enable, 32,
+ PVR_FEATURE_S7_TOP_INFRASTRUCTURE),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_ctl, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.event_pixel_pds_info, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.pixel_phantom, 32,
+ PVR_FEATURE_CLUSTER_GROUPING),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.view_idx, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.event_pixel_pds_data, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_oclqry_stride, 32,
+ PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_zls_pixels, 32,
+ PVR_FEATURE_ZLS_SUBTILE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.rgx_cr_blackpearl_fix, 32,
+ PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, zls_stride, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, sls_stride, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, execute_count, 32,
+ PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_frag_stream_brn47217[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_oclqry_stride, 32),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_frag_stream_brn49927[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.tpu, 32),
+};
+
+static const struct pvr_stream_ext_def cmd_frag_ext_streams_0[] = {
+ {
+ .stream = rogue_fwif_cmd_frag_stream_brn47217,
+ .stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream_brn47217),
+ .header_mask = PVR_STREAM_EXTHDR_FRAG0_BRN47217,
+ .quirk = 47217,
+ },
+ {
+ .stream = rogue_fwif_cmd_frag_stream_brn49927,
+ .stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream_brn49927),
+ .header_mask = PVR_STREAM_EXTHDR_FRAG0_BRN49927,
+ .quirk = 49927,
+ },
+};
+
+static const struct pvr_stream_ext_header cmd_frag_ext_headers[] = {
+ {
+ .ext_streams = cmd_frag_ext_streams_0,
+ .ext_streams_num = ARRAY_SIZE(cmd_frag_ext_streams_0),
+ .valid_mask = PVR_STREAM_EXTHDR_FRAG0_VALID,
+ },
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_frag_stream = {
+ .type = PVR_STREAM_TYPE_FRAG,
+
+ .main_stream = rogue_fwif_cmd_frag_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream),
+
+ .ext_nr_headers = ARRAY_SIZE(cmd_frag_ext_headers),
+ .ext_headers = cmd_frag_ext_headers,
+
+ .dest_size = sizeof(struct rogue_fwif_cmd_frag),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_compute_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.tpu_border_colour_table, 64),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb_queue, 64,
+ PVR_FEATURE_CDM_USER_MODE_QUEUE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb_base, 64,
+ PVR_FEATURE_CDM_USER_MODE_QUEUE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb, 64,
+ PVR_FEATURE_CDM_USER_MODE_QUEUE),
+ PVR_STREAM_DEF_NOT_FEATURE(rogue_fwif_cmd_compute, regs.cdm_ctrl_stream_base, 64,
+ PVR_FEATURE_CDM_USER_MODE_QUEUE),
+ PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.cdm_context_state_base_addr, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.cdm_resume_pds1, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_item, 32,
+ PVR_FEATURE_COMPUTE_MORTON_CAPABLE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.compute_cluster, 32,
+ PVR_FEATURE_CLUSTER_GROUPING),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.tpu_tag_cdm_ctrl, 32,
+ PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, stream_start_offset, 32,
+ PVR_FEATURE_CDM_USER_MODE_QUEUE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, execute_count, 32,
+ PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_compute_stream_brn49927[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.tpu, 32),
+};
+
+static const struct pvr_stream_ext_def cmd_compute_ext_streams_0[] = {
+ {
+ .stream = rogue_fwif_cmd_compute_stream_brn49927,
+ .stream_len = ARRAY_SIZE(rogue_fwif_cmd_compute_stream_brn49927),
+ .header_mask = PVR_STREAM_EXTHDR_COMPUTE0_BRN49927,
+ .quirk = 49927,
+ },
+};
+
+static const struct pvr_stream_ext_header cmd_compute_ext_headers[] = {
+ {
+ .ext_streams = cmd_compute_ext_streams_0,
+ .ext_streams_num = ARRAY_SIZE(cmd_compute_ext_streams_0),
+ .valid_mask = PVR_STREAM_EXTHDR_COMPUTE0_VALID,
+ },
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_compute_stream = {
+ .type = PVR_STREAM_TYPE_COMPUTE,
+
+ .main_stream = rogue_fwif_cmd_compute_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_compute_stream),
+
+ .ext_nr_headers = ARRAY_SIZE(cmd_compute_ext_headers),
+ .ext_headers = cmd_compute_ext_headers,
+
+ .dest_size = sizeof(struct rogue_fwif_cmd_compute),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_transfer_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd0_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd1_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd3_sizeinfo, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_mtile_base, 64),
+ PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_transfer, regs.pbe_wordx_mrty),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_bgobjvals, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_pixel_output_ctrl, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register0, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register1, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register2, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register3, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_mtile_size, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_render_origin, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_ctl, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_aa, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_info, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_code, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_data, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_render, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_rgn, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_transfer, regs.isp_xtp_pipe_enable, 32,
+ PVR_FEATURE_S7_TOP_INFRASTRUCTURE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_transfer, regs.frag_screen, 32,
+ PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_transfer_stream = {
+ .type = PVR_STREAM_TYPE_TRANSFER,
+
+ .main_stream = rogue_fwif_cmd_transfer_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_transfer_stream),
+
+ .ext_nr_headers = 0,
+
+ .dest_size = sizeof(struct rogue_fwif_cmd_transfer),
+};
+
+static const struct pvr_stream_def rogue_fwif_static_render_context_state_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_reg_vdm_context_state_base_addr, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_reg_vdm_context_state_resume_addr, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_reg_ta_context_state_base_addr, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task0, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task1, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task2, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task3, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task4, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task0, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task1, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task2, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task3, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task4, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task0, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task1, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task2, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task3, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task4, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task0, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task1, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task2, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task3, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task4, 64),
+};
+
+const struct pvr_stream_cmd_defs pvr_static_render_context_state_stream = {
+ .type = PVR_STREAM_TYPE_STATIC_RENDER_CONTEXT,
+
+ .main_stream = rogue_fwif_static_render_context_state_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_static_render_context_state_stream),
+
+ .ext_nr_headers = 0,
+
+ .dest_size = sizeof(struct rogue_fwif_geom_registers_caswitch),
+};
+
+static const struct pvr_stream_def rogue_fwif_static_compute_context_state_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds1, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds1, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0_b, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0_b, 64),
+};
+
+const struct pvr_stream_cmd_defs pvr_static_compute_context_state_stream = {
+ .type = PVR_STREAM_TYPE_STATIC_COMPUTE_CONTEXT,
+
+ .main_stream = rogue_fwif_static_compute_context_state_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_static_compute_context_state_stream),
+
+ .ext_nr_headers = 0,
+
+ .dest_size = sizeof(struct rogue_fwif_cdm_registers_cswitch),
+};
diff --git a/drivers/gpu/drm/imagination/pvr_stream_defs.h b/drivers/gpu/drm/imagination/pvr_stream_defs.h
new file mode 100644
index 000000000000..f33b82165833
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream_defs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_STREAM_DEFS_H
+#define PVR_STREAM_DEFS_H
+
+#include "pvr_stream.h"
+
+extern const struct pvr_stream_cmd_defs pvr_cmd_geom_stream;
+extern const struct pvr_stream_cmd_defs pvr_cmd_frag_stream;
+extern const struct pvr_stream_cmd_defs pvr_cmd_compute_stream;
+extern const struct pvr_stream_cmd_defs pvr_cmd_transfer_stream;
+extern const struct pvr_stream_cmd_defs pvr_static_render_context_state_stream;
+extern const struct pvr_stream_cmd_defs pvr_static_compute_context_state_stream;
+
+#endif /* PVR_STREAM_DEFS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_sync.c b/drivers/gpu/drm/imagination/pvr_sync.c
new file mode 100644
index 000000000000..129f646d14ba
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_sync.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <drm/drm_syncobj.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/xarray.h>
+#include <linux/dma-fence-unwrap.h>
+
+#include "pvr_device.h"
+#include "pvr_queue.h"
+#include "pvr_sync.h"
+
+static int
+pvr_check_sync_op(const struct drm_pvr_sync_op *sync_op)
+{
+ u8 handle_type;
+
+ if (sync_op->flags & ~DRM_PVR_SYNC_OP_FLAGS_MASK)
+ return -EINVAL;
+
+ handle_type = sync_op->flags & DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK;
+ if (handle_type != DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ &&
+ handle_type != DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ)
+ return -EINVAL;
+
+ if (handle_type == DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ &&
+ sync_op->value != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+pvr_sync_signal_free(struct pvr_sync_signal *sig_sync)
+{
+ if (!sig_sync)
+ return;
+
+ drm_syncobj_put(sig_sync->syncobj);
+ dma_fence_chain_free(sig_sync->chain);
+ dma_fence_put(sig_sync->fence);
+ kfree(sig_sync);
+}
+
+void
+pvr_sync_signal_array_cleanup(struct xarray *array)
+{
+ struct pvr_sync_signal *sig_sync;
+ unsigned long i;
+
+ xa_for_each(array, i, sig_sync)
+ pvr_sync_signal_free(sig_sync);
+
+ xa_destroy(array);
+}
+
+static struct pvr_sync_signal *
+pvr_sync_signal_array_add(struct xarray *array, struct drm_file *file, u32 handle, u64 point)
+{
+ struct pvr_sync_signal *sig_sync;
+ struct dma_fence *cur_fence;
+ int err;
+ u32 id;
+
+ sig_sync = kzalloc(sizeof(*sig_sync), GFP_KERNEL);
+ if (!sig_sync)
+ return ERR_PTR(-ENOMEM);
+
+ sig_sync->handle = handle;
+ sig_sync->point = point;
+
+ if (point > 0) {
+ sig_sync->chain = dma_fence_chain_alloc();
+ if (!sig_sync->chain) {
+ err = -ENOMEM;
+ goto err_free_sig_sync;
+ }
+ }
+
+ sig_sync->syncobj = drm_syncobj_find(file, handle);
+ if (!sig_sync->syncobj) {
+ err = -EINVAL;
+ goto err_free_sig_sync;
+ }
+
+ /* Retrieve the current fence attached to that point. It's
+ * perfectly fine to get a NULL fence here, it just means there's
+ * no fence attached to that point yet.
+ */
+ if (!drm_syncobj_find_fence(file, handle, point, 0, &cur_fence))
+ sig_sync->fence = cur_fence;
+
+ err = xa_alloc(array, &id, sig_sync, xa_limit_32b, GFP_KERNEL);
+ if (err)
+ goto err_free_sig_sync;
+
+ return sig_sync;
+
+err_free_sig_sync:
+ pvr_sync_signal_free(sig_sync);
+ return ERR_PTR(err);
+}
+
+static struct pvr_sync_signal *
+pvr_sync_signal_array_search(struct xarray *array, u32 handle, u64 point)
+{
+ struct pvr_sync_signal *sig_sync;
+ unsigned long i;
+
+ xa_for_each(array, i, sig_sync) {
+ if (handle == sig_sync->handle && point == sig_sync->point)
+ return sig_sync;
+ }
+
+ return NULL;
+}
+
+static struct pvr_sync_signal *
+pvr_sync_signal_array_get(struct xarray *array, struct drm_file *file, u32 handle, u64 point)
+{
+ struct pvr_sync_signal *sig_sync;
+
+ sig_sync = pvr_sync_signal_array_search(array, handle, point);
+ if (sig_sync)
+ return sig_sync;
+
+ return pvr_sync_signal_array_add(array, file, handle, point);
+}
+
+int
+pvr_sync_signal_array_collect_ops(struct xarray *array,
+ struct drm_file *file,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops)
+{
+ for (u32 i = 0; i < sync_op_count; i++) {
+ struct pvr_sync_signal *sig_sync;
+ int ret;
+
+ if (!(sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL))
+ continue;
+
+ ret = pvr_check_sync_op(&sync_ops[i]);
+ if (ret)
+ return ret;
+
+ sig_sync = pvr_sync_signal_array_get(array, file,
+ sync_ops[i].handle,
+ sync_ops[i].value);
+ if (IS_ERR(sig_sync))
+ return PTR_ERR(sig_sync);
+ }
+
+ return 0;
+}
+
+int
+pvr_sync_signal_array_update_fences(struct xarray *array,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops,
+ struct dma_fence *done_fence)
+{
+ for (u32 i = 0; i < sync_op_count; i++) {
+ struct dma_fence *old_fence;
+ struct pvr_sync_signal *sig_sync;
+
+ if (!(sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL))
+ continue;
+
+ sig_sync = pvr_sync_signal_array_search(array, sync_ops[i].handle,
+ sync_ops[i].value);
+ if (WARN_ON(!sig_sync))
+ return -EINVAL;
+
+ old_fence = sig_sync->fence;
+ sig_sync->fence = dma_fence_get(done_fence);
+ dma_fence_put(old_fence);
+
+ if (WARN_ON(!sig_sync->fence))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+pvr_sync_signal_array_push_fences(struct xarray *array)
+{
+ struct pvr_sync_signal *sig_sync;
+ unsigned long i;
+
+ xa_for_each(array, i, sig_sync) {
+ if (sig_sync->chain) {
+ drm_syncobj_add_point(sig_sync->syncobj, sig_sync->chain,
+ sig_sync->fence, sig_sync->point);
+ sig_sync->chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(sig_sync->syncobj, sig_sync->fence);
+ }
+ }
+}
+
+static int
+pvr_sync_add_dep_to_job(struct drm_sched_job *job, struct dma_fence *f)
+{
+ struct dma_fence_unwrap iter;
+ u32 native_fence_count = 0;
+ struct dma_fence *uf;
+ int err = 0;
+
+ dma_fence_unwrap_for_each(uf, &iter, f) {
+ if (pvr_queue_fence_is_ufo_backed(uf))
+ native_fence_count++;
+ }
+
+ /* No need to unwrap the fence if it's fully non-native. */
+ if (!native_fence_count)
+ return drm_sched_job_add_dependency(job, f);
+
+ dma_fence_unwrap_for_each(uf, &iter, f) {
+ /* There's no dma_fence_unwrap_stop() helper cleaning up the refs
+ * owned by dma_fence_unwrap(), so let's just iterate over all
+ * entries without doing anything when something failed.
+ */
+ if (err)
+ continue;
+
+ if (pvr_queue_fence_is_ufo_backed(uf)) {
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(uf);
+
+ /* If this is a native dependency, we wait for the scheduled fence,
+ * and we will let pvr_queue_run_job() issue FW waits.
+ */
+ err = drm_sched_job_add_dependency(job,
+ dma_fence_get(&s_fence->scheduled));
+ } else {
+ err = drm_sched_job_add_dependency(job, dma_fence_get(uf));
+ }
+ }
+
+ dma_fence_put(f);
+ return err;
+}
+
+int
+pvr_sync_add_deps_to_job(struct pvr_file *pvr_file, struct drm_sched_job *job,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops,
+ struct xarray *signal_array)
+{
+ int err = 0;
+
+ if (!sync_op_count)
+ return 0;
+
+ for (u32 i = 0; i < sync_op_count; i++) {
+ struct pvr_sync_signal *sig_sync;
+ struct dma_fence *fence;
+
+ if (sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL)
+ continue;
+
+ err = pvr_check_sync_op(&sync_ops[i]);
+ if (err)
+ return err;
+
+ sig_sync = pvr_sync_signal_array_search(signal_array, sync_ops[i].handle,
+ sync_ops[i].value);
+ if (sig_sync) {
+ if (WARN_ON(!sig_sync->fence))
+ return -EINVAL;
+
+ fence = dma_fence_get(sig_sync->fence);
+ } else {
+ err = drm_syncobj_find_fence(from_pvr_file(pvr_file), sync_ops[i].handle,
+ sync_ops[i].value, 0, &fence);
+ if (err)
+ return err;
+ }
+
+ err = pvr_sync_add_dep_to_job(job, fence);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_sync.h b/drivers/gpu/drm/imagination/pvr_sync.h
new file mode 100644
index 000000000000..db6ccfda104a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_sync.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_SYNC_H
+#define PVR_SYNC_H
+
+#include <uapi/drm/pvr_drm.h>
+
+/* Forward declaration from <linux/xarray.h>. */
+struct xarray;
+
+/* Forward declaration from <drm/drm_file.h>. */
+struct drm_file;
+
+/* Forward declaration from <drm/gpu_scheduler.h>. */
+struct drm_sched_job;
+
+/* Forward declaration from "pvr_device.h". */
+struct pvr_file;
+
+/**
+ * struct pvr_sync_signal - Object encoding a syncobj signal operation
+ *
+ * The job submission logic collects all signal operations in an array of
+ * pvr_sync_signal objects. This array also serves as a cache to get the
+ * latest dma_fence when multiple jobs are submitted at once, and one job
+ * signals a syncobj point that's later waited on by a subsequent job.
+ */
+struct pvr_sync_signal {
+ /** @handle: Handle of the syncobj to signal. */
+ u32 handle;
+
+ /**
+ * @point: Point to signal in the syncobj.
+ *
+ * Only relevant for timeline syncobjs.
+ */
+ u64 point;
+
+ /** @syncobj: Syncobj retrieved from the handle. */
+ struct drm_syncobj *syncobj;
+
+ /**
+ * @chain: Chain object used to link the new fence with the
+ * existing timeline syncobj.
+ *
+ * Should be zero when manipulating a regular syncobj.
+ */
+ struct dma_fence_chain *chain;
+
+ /**
+ * @fence: New fence object to attach to the syncobj.
+ *
+ * This pointer starts with the current fence bound to
+ * the <handle,point> pair.
+ */
+ struct dma_fence *fence;
+};
+
+void
+pvr_sync_signal_array_cleanup(struct xarray *array);
+
+int
+pvr_sync_signal_array_collect_ops(struct xarray *array,
+ struct drm_file *file,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops);
+
+int
+pvr_sync_signal_array_update_fences(struct xarray *array,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops,
+ struct dma_fence *done_fence);
+
+void
+pvr_sync_signal_array_push_fences(struct xarray *array);
+
+int
+pvr_sync_add_deps_to_job(struct pvr_file *pvr_file, struct drm_sched_job *job,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops,
+ struct xarray *signal_array);
+
+#endif /* PVR_SYNC_H */
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
new file mode 100644
index 000000000000..f42345fbe4bf
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -0,0 +1,1092 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_vm.h"
+
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_gem.h"
+#include "pvr_mmu.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_heap_config.h"
+
+#include <drm/drm_exec.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gpuvm.h>
+
+#include <linux/container_of.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gfp_types.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/stddef.h>
+
+/**
+ * DOC: Memory context
+ *
+ * This is the "top level" datatype in the VM code. It's exposed in the public
+ * API as an opaque handle.
+ */
+
+/**
+ * struct pvr_vm_context - Context type used to represent a single VM.
+ */
+struct pvr_vm_context {
+ /**
+ * @pvr_dev: The PowerVR device to which this context is bound.
+ * This binding is immutable for the life of the context.
+ */
+ struct pvr_device *pvr_dev;
+
+ /** @mmu_ctx: The context for binding to physical memory. */
+ struct pvr_mmu_context *mmu_ctx;
+
+ /** @gpuvm_mgr: GPUVM object associated with this context. */
+ struct drm_gpuvm gpuvm_mgr;
+
+ /** @lock: Global lock on this VM. */
+ struct mutex lock;
+
+ /**
+ * @fw_mem_ctx_obj: Firmware object representing firmware memory
+ * context.
+ */
+ struct pvr_fw_object *fw_mem_ctx_obj;
+
+ /** @ref_count: Reference count of object. */
+ struct kref ref_count;
+
+ /**
+ * @dummy_gem: GEM object to enable VM reservation. All private BOs
+ * should use the @dummy_gem.resv and not their own _resv field.
+ */
+ struct drm_gem_object dummy_gem;
+};
+
+static inline
+struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
+{
+ return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
+}
+
+struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
+{
+ if (vm_ctx)
+ kref_get(&vm_ctx->ref_count);
+
+ return vm_ctx;
+}
+
+/**
+ * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
+ * page table structure behind a VM context.
+ * @vm_ctx: Target VM context.
+ */
+dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
+{
+ return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
+}
+
+/**
+ * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
+ * @vm_ctx: Target VM context.
+ *
+ * This is used to allow private BOs to share a dma_resv for faster fence
+ * updates.
+ *
+ * Returns: The dma_resv pointer.
+ */
+struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
+{
+ return vm_ctx->dummy_gem.resv;
+}
+
+/**
+ * DOC: Memory mappings
+ */
+
+/**
+ * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
+ */
+struct pvr_vm_gpuva {
+ /** @base: The wrapped drm_gpuva object. */
+ struct drm_gpuva base;
+};
+
+enum pvr_vm_bind_type {
+ PVR_VM_BIND_TYPE_MAP,
+ PVR_VM_BIND_TYPE_UNMAP,
+};
+
+/**
+ * struct pvr_vm_bind_op - Context of a map/unmap operation.
+ */
+struct pvr_vm_bind_op {
+ /** @type: Map or unmap. */
+ enum pvr_vm_bind_type type;
+
+ /** @pvr_obj: Object associated with mapping (map only). */
+ struct pvr_gem_object *pvr_obj;
+
+ /**
+ * @vm_ctx: VM context where the mapping will be created or destroyed.
+ */
+ struct pvr_vm_context *vm_ctx;
+
+ /** @mmu_op_ctx: MMU op context. */
+ struct pvr_mmu_op_context *mmu_op_ctx;
+
+ /** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
+ struct drm_gpuvm_bo *gpuvm_bo;
+
+ /**
+ * @new_va: Prealloced VA mapping object (init in callback).
+ * Used when creating a mapping.
+ */
+ struct pvr_vm_gpuva *new_va;
+
+ /**
+ * @prev_va: Prealloced VA mapping object (init in callback).
+ * Used when a mapping or unmapping operation overlaps an existing
+ * mapping and splits away the beginning into a new mapping.
+ */
+ struct pvr_vm_gpuva *prev_va;
+
+ /**
+ * @next_va: Prealloced VA mapping object (init in callback).
+ * Used when a mapping or unmapping operation overlaps an existing
+ * mapping and splits away the end into a new mapping.
+ */
+ struct pvr_vm_gpuva *next_va;
+
+ /** @offset: Offset into @pvr_obj to begin mapping from. */
+ u64 offset;
+
+ /** @device_addr: Device-virtual address at the start of the mapping. */
+ u64 device_addr;
+
+ /** @size: Size of the desired mapping. */
+ u64 size;
+};
+
+/**
+ * pvr_vm_bind_op_exec() - Execute a single bind op.
+ * @bind_op: Bind op context.
+ *
+ * Returns:
+ * * 0 on success,
+ * * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
+ * a callback function.
+ */
+static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
+{
+ switch (bind_op->type) {
+ case PVR_VM_BIND_TYPE_MAP:
+ return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
+ bind_op, bind_op->device_addr,
+ bind_op->size,
+ gem_from_pvr_gem(bind_op->pvr_obj),
+ bind_op->offset);
+
+ case PVR_VM_BIND_TYPE_UNMAP:
+ return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
+ bind_op, bind_op->device_addr,
+ bind_op->size);
+ }
+
+ /*
+ * This shouldn't happen unless something went wrong
+ * in drm_sched.
+ */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
+{
+ drm_gpuvm_bo_put(bind_op->gpuvm_bo);
+
+ kfree(bind_op->new_va);
+ kfree(bind_op->prev_va);
+ kfree(bind_op->next_va);
+
+ if (bind_op->pvr_obj)
+ pvr_gem_object_put(bind_op->pvr_obj);
+
+ if (bind_op->mmu_op_ctx)
+ pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
+}
+
+static int
+pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
+ struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj, u64 offset,
+ u64 device_addr, u64 size)
+{
+ struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
+ const bool is_user = vm_ctx == vm_ctx->pvr_dev->kernel_vm_ctx;
+ const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
+ struct sg_table *sgt;
+ u64 offset_plus_size;
+ int err;
+
+ if (check_add_overflow(offset, size, &offset_plus_size))
+ return -EINVAL;
+
+ if (is_user &&
+ !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
+ return -EINVAL;
+ }
+
+ if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
+ offset & ~PAGE_MASK || size & ~PAGE_MASK ||
+ offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
+ return -EINVAL;
+
+ bind_op->type = PVR_VM_BIND_TYPE_MAP;
+
+ dma_resv_lock(obj->resv, NULL);
+ bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
+ dma_resv_unlock(obj->resv);
+ if (IS_ERR(bind_op->gpuvm_bo))
+ return PTR_ERR(bind_op->gpuvm_bo);
+
+ bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
+ bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
+ bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
+ if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
+ err = -ENOMEM;
+ goto err_bind_op_fini;
+ }
+
+ /* Pin pages so they're ready for use. */
+ sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
+ err = PTR_ERR_OR_ZERO(sgt);
+ if (err)
+ goto err_bind_op_fini;
+
+ bind_op->mmu_op_ctx =
+ pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
+ err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
+ if (err) {
+ bind_op->mmu_op_ctx = NULL;
+ goto err_bind_op_fini;
+ }
+
+ bind_op->pvr_obj = pvr_obj;
+ bind_op->vm_ctx = vm_ctx;
+ bind_op->device_addr = device_addr;
+ bind_op->size = size;
+ bind_op->offset = offset;
+
+ return 0;
+
+err_bind_op_fini:
+ pvr_vm_bind_op_fini(bind_op);
+
+ return err;
+}
+
+static int
+pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
+ struct pvr_vm_context *vm_ctx, u64 device_addr,
+ u64 size)
+{
+ int err;
+
+ if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
+ return -EINVAL;
+
+ bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
+
+ bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
+ bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
+ if (!bind_op->prev_va || !bind_op->next_va) {
+ err = -ENOMEM;
+ goto err_bind_op_fini;
+ }
+
+ bind_op->mmu_op_ctx =
+ pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
+ err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
+ if (err) {
+ bind_op->mmu_op_ctx = NULL;
+ goto err_bind_op_fini;
+ }
+
+ bind_op->vm_ctx = vm_ctx;
+ bind_op->device_addr = device_addr;
+ bind_op->size = size;
+
+ return 0;
+
+err_bind_op_fini:
+ pvr_vm_bind_op_fini(bind_op);
+
+ return err;
+}
+
+/**
+ * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
+ * @op: gpuva op containing the remap details.
+ * @op_ctx: Operation context.
+ *
+ * Context: Called by drm_gpuvm_sm_map following a successful mapping while
+ * @op_ctx.vm_ctx mutex is held.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_mmu_map().
+ */
+static int
+pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
+{
+ struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
+ struct pvr_vm_bind_op *ctx = op_ctx;
+ int err;
+
+ if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
+ return -EINVAL;
+
+ err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
+ op->map.va.addr);
+ if (err)
+ return err;
+
+ drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
+ drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
+ ctx->new_va = NULL;
+
+ return 0;
+}
+
+/**
+ * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
+ * @op: gpuva op containing the unmap details.
+ * @op_ctx: Operation context.
+ *
+ * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
+ * @op_ctx.vm_ctx mutex is held.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_mmu_unmap().
+ */
+static int
+pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
+{
+ struct pvr_vm_bind_op *ctx = op_ctx;
+
+ int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
+ op->unmap.va->va.range);
+
+ if (err)
+ return err;
+
+ drm_gpuva_unmap(&op->unmap);
+ drm_gpuva_unlink(op->unmap.va);
+
+ return 0;
+}
+
+/**
+ * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
+ * @op: gpuva op containing the remap details.
+ * @op_ctx: Operation context.
+ *
+ * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
+ * mapping or unmapping operation causes a region to be split. The
+ * @op_ctx.vm_ctx mutex is held.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
+ */
+static int
+pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
+{
+ struct pvr_vm_bind_op *ctx = op_ctx;
+ u64 va_start = 0, va_range = 0;
+ int err;
+
+ drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
+ err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
+ if (err)
+ return err;
+
+ /* No actual remap required: the page table tree depth is fixed to 3,
+ * and we use 4k page table entries only for now.
+ */
+ drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
+
+ if (op->remap.prev) {
+ pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
+ drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
+ ctx->prev_va = NULL;
+ }
+
+ if (op->remap.next) {
+ pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
+ drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
+ ctx->next_va = NULL;
+ }
+
+ drm_gpuva_unlink(op->remap.unmap->va);
+
+ return 0;
+}
+
+/*
+ * Public API
+ *
+ * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
+ */
+
+/**
+ * pvr_device_addr_is_valid() - Tests whether a device-virtual address
+ * is valid.
+ * @device_addr: Virtual device address to test.
+ *
+ * Return:
+ * * %true if @device_addr is within the valid range for a device page
+ * table and is aligned to the device page size, or
+ * * %false otherwise.
+ */
+bool
+pvr_device_addr_is_valid(u64 device_addr)
+{
+ return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
+ (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
+}
+
+/**
+ * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
+ * address and associated size are both valid.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address to test.
+ * @size: Size of the range based at @device_addr to test.
+ *
+ * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
+ * @device_addr + @size) to verify a device-virtual address range initially
+ * seems intuitive, but it produces a false-negative when the address range
+ * is right at the end of device-virtual address space.
+ *
+ * This function catches that corner case, as well as checking that
+ * @size is non-zero.
+ *
+ * Return:
+ * * %true if @device_addr is device page aligned; @size is device page
+ * aligned; the range specified by @device_addr and @size is within the
+ * bounds of the device-virtual address space, and @size is non-zero, or
+ * * %false otherwise.
+ */
+bool
+pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
+ u64 device_addr, u64 size)
+{
+ return pvr_device_addr_is_valid(device_addr) &&
+ drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
+ size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
+ (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
+}
+
+static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
+{
+ kfree(to_pvr_vm_context(gpuvm));
+}
+
+static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
+ .vm_free = pvr_gpuvm_free,
+ .sm_step_map = pvr_vm_gpuva_map,
+ .sm_step_remap = pvr_vm_gpuva_remap,
+ .sm_step_unmap = pvr_vm_gpuva_unmap,
+};
+
+static void
+fw_mem_context_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
+ struct pvr_vm_context *vm_ctx = priv;
+
+ fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
+ fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
+}
+
+/**
+ * pvr_vm_create_context() - Create a new VM context.
+ * @pvr_dev: Target PowerVR device.
+ * @is_userspace_context: %true if this context is for userspace. This will
+ * create a firmware memory context for the VM context
+ * and disable warnings when tearing down mappings.
+ *
+ * Return:
+ * * A handle to the newly-minted VM context on success,
+ * * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
+ * missing or has an unsupported value,
+ * * -%ENOMEM if allocation of the structure behind the opaque handle fails,
+ * or
+ * * Any error encountered while setting up internal structures.
+ */
+struct pvr_vm_context *
+pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+
+ struct pvr_vm_context *vm_ctx;
+ u16 device_addr_bits;
+
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
+ &device_addr_bits);
+ if (err) {
+ drm_err(drm_dev,
+ "Failed to get device virtual address space bits\n");
+ return ERR_PTR(err);
+ }
+
+ if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
+ drm_err(drm_dev,
+ "Device has unsupported virtual address space size\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
+ if (!vm_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
+
+ vm_ctx->pvr_dev = pvr_dev;
+ kref_init(&vm_ctx->ref_count);
+ mutex_init(&vm_ctx->lock);
+
+ drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
+ is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
+ 0, &pvr_dev->base, &vm_ctx->dummy_gem,
+ 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
+
+ vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
+ err = PTR_ERR_OR_ZERO(&vm_ctx->mmu_ctx);
+ if (err) {
+ vm_ctx->mmu_ctx = NULL;
+ goto err_put_ctx;
+ }
+
+ if (is_userspace_context) {
+ err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
+
+ if (err)
+ goto err_page_table_destroy;
+ }
+
+ return vm_ctx;
+
+err_page_table_destroy:
+ pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
+
+err_put_ctx:
+ pvr_vm_context_put(vm_ctx);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_vm_context_release() - Teardown a VM context.
+ * @ref_count: Pointer to reference counter of the VM context.
+ *
+ * This function ensures that no mappings are left dangling by unmapping them
+ * all in order of ascending device-virtual address.
+ */
+static void
+pvr_vm_context_release(struct kref *ref_count)
+{
+ struct pvr_vm_context *vm_ctx =
+ container_of(ref_count, struct pvr_vm_context, ref_count);
+
+ if (vm_ctx->fw_mem_ctx_obj)
+ pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
+
+ WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
+ vm_ctx->gpuvm_mgr.mm_range));
+
+ pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
+ drm_gem_private_object_fini(&vm_ctx->dummy_gem);
+ mutex_destroy(&vm_ctx->lock);
+
+ drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
+}
+
+/**
+ * pvr_vm_context_lookup() - Look up VM context from handle
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ *
+ * Takes reference on VM context object. Call pvr_vm_context_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, or is not a VM context)
+ */
+struct pvr_vm_context *
+pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_vm_context *vm_ctx;
+
+ xa_lock(&pvr_file->vm_ctx_handles);
+ vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
+ if (vm_ctx)
+ kref_get(&vm_ctx->ref_count);
+
+ xa_unlock(&pvr_file->vm_ctx_handles);
+
+ return vm_ctx;
+}
+
+/**
+ * pvr_vm_context_put() - Release a reference on a VM context
+ * @vm_ctx: Target VM context.
+ *
+ * Returns:
+ * * %true if the VM context was destroyed, or
+ * * %false if there are any references still remaining.
+ */
+bool
+pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
+{
+ if (vm_ctx)
+ return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
+
+ return true;
+}
+
+/**
+ * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
+ * given file.
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all vm_contexts associated with @pvr_file from the device VM context
+ * list and drops initial references. vm_contexts will then be destroyed once
+ * all outstanding references are dropped.
+ */
+void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
+{
+ struct pvr_vm_context *vm_ctx;
+ unsigned long handle;
+
+ xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
+ /* vm_ctx is not used here because that would create a race with xa_erase */
+ pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
+ }
+}
+
+static int
+pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
+{
+ struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
+ struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
+
+ /* Unmap operations don't have an object to lock. */
+ if (!pvr_obj)
+ return 0;
+
+ /* Acquire lock on the GEM being mapped. */
+ return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
+}
+
+/**
+ * pvr_vm_map() - Map a section of physical memory into a section of
+ * device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
+ * @pvr_obj_offset: Offset into @pvr_obj to map from.
+ * @device_addr: Virtual device address at the start of the requested mapping.
+ * @size: Size of the requested mapping.
+ *
+ * No handle is returned to represent the mapping. Instead, callers should
+ * remember @device_addr and use that as a handle.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
+ * address; the region specified by @pvr_obj_offset and @size does not fall
+ * entirely within @pvr_obj, or any part of the specified region of @pvr_obj
+ * is not device-virtual page-aligned,
+ * * Any error encountered while performing internal operations required to
+ * destroy the mapping (returned from pvr_vm_gpuva_map or
+ * pvr_vm_gpuva_remap).
+ */
+int
+pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
+ u64 pvr_obj_offset, u64 device_addr, u64 size)
+{
+ struct pvr_vm_bind_op bind_op = {0};
+ struct drm_gpuvm_exec vm_exec = {
+ .vm = &vm_ctx->gpuvm_mgr,
+ .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES,
+ .extra = {
+ .fn = pvr_vm_lock_extra,
+ .priv = &bind_op,
+ },
+ };
+
+ int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
+ pvr_obj_offset, device_addr,
+ size);
+
+ if (err)
+ return err;
+
+ pvr_gem_object_get(pvr_obj);
+
+ err = drm_gpuvm_exec_lock(&vm_exec);
+ if (err)
+ goto err_cleanup;
+
+ err = pvr_vm_bind_op_exec(&bind_op);
+
+ drm_gpuvm_exec_unlock(&vm_exec);
+
+err_cleanup:
+ pvr_vm_bind_op_fini(&bind_op);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
+ * address,
+ * * Any error encountered while performing internal operations required to
+ * destroy the mapping (returned from pvr_vm_gpuva_unmap or
+ * pvr_vm_gpuva_remap).
+ */
+int
+pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+{
+ struct pvr_vm_bind_op bind_op = {0};
+ struct drm_gpuvm_exec vm_exec = {
+ .vm = &vm_ctx->gpuvm_mgr,
+ .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES,
+ .extra = {
+ .fn = pvr_vm_lock_extra,
+ .priv = &bind_op,
+ },
+ };
+
+ int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
+ size);
+ if (err)
+ return err;
+
+ err = drm_gpuvm_exec_lock(&vm_exec);
+ if (err)
+ goto err_cleanup;
+
+ err = pvr_vm_bind_op_exec(&bind_op);
+
+ drm_gpuvm_exec_unlock(&vm_exec);
+
+err_cleanup:
+ pvr_vm_bind_op_fini(&bind_op);
+
+ return err;
+}
+
+/* Static data areas are determined by firmware. */
+static const struct drm_pvr_static_data_area static_data_areas[] = {
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
+ .location_heap_id = DRM_PVR_HEAP_GENERAL,
+ .offset = 0,
+ .size = 128,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
+ .location_heap_id = DRM_PVR_HEAP_GENERAL,
+ .offset = 128,
+ .size = 1024,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+ .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
+ .offset = 0,
+ .size = 128,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
+ .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
+ .offset = 128,
+ .size = 128,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+ .location_heap_id = DRM_PVR_HEAP_USC_CODE,
+ .offset = 0,
+ .size = 128,
+ },
+};
+
+#define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
+
+/*
+ * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
+ * static data area for each heap.
+ */
+static const struct drm_pvr_heap pvr_heaps[] = {
+ [DRM_PVR_HEAP_GENERAL] = {
+ .base = ROGUE_GENERAL_HEAP_BASE,
+ .size = ROGUE_GENERAL_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_PDS_CODE_DATA] = {
+ .base = ROGUE_PDSCODEDATA_HEAP_BASE,
+ .size = ROGUE_PDSCODEDATA_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_USC_CODE] = {
+ .base = ROGUE_USCCODE_HEAP_BASE,
+ .size = ROGUE_USCCODE_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_RGNHDR] = {
+ .base = ROGUE_RGNHDR_HEAP_BASE,
+ .size = ROGUE_RGNHDR_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_VIS_TEST] = {
+ .base = ROGUE_VISTEST_HEAP_BASE,
+ .size = ROGUE_VISTEST_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_TRANSFER_FRAG] = {
+ .base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
+ .size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+};
+
+int
+pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ struct drm_pvr_dev_query_static_data_areas query = {0};
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
+ return 0;
+ }
+
+ err = PVR_UOBJ_GET(query, args->size, args->pointer);
+ if (err < 0)
+ return err;
+
+ if (!query.static_data_areas.array) {
+ query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
+ query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
+ goto copy_out;
+ }
+
+ if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
+ query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
+
+ err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
+ if (err < 0)
+ return err;
+
+copy_out:
+ err = PVR_UOBJ_SET(args->pointer, args->size, query);
+ if (err < 0)
+ return err;
+
+ args->size = sizeof(query);
+ return 0;
+}
+
+int
+pvr_heap_info_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ struct drm_pvr_dev_query_heap_info query = {0};
+ u64 dest;
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_heap_info);
+ return 0;
+ }
+
+ err = PVR_UOBJ_GET(query, args->size, args->pointer);
+ if (err < 0)
+ return err;
+
+ if (!query.heaps.array) {
+ query.heaps.count = ARRAY_SIZE(pvr_heaps);
+ query.heaps.stride = sizeof(struct drm_pvr_heap);
+ goto copy_out;
+ }
+
+ if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
+ query.heaps.count = ARRAY_SIZE(pvr_heaps);
+
+ /* Region header heap is only present if BRN63142 is present. */
+ dest = query.heaps.array;
+ for (size_t i = 0; i < query.heaps.count; i++) {
+ struct drm_pvr_heap heap = pvr_heaps[i];
+
+ if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
+ heap.size = 0;
+
+ err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
+ if (err < 0)
+ return err;
+
+ dest += query.heaps.stride;
+ }
+
+copy_out:
+ err = PVR_UOBJ_SET(args->pointer, args->size, query);
+ if (err < 0)
+ return err;
+
+ args->size = sizeof(query);
+ return 0;
+}
+
+/**
+ * pvr_heap_contains_range() - Determine if a given heap contains the specified
+ * device-virtual address range.
+ * @pvr_heap: Target heap.
+ * @start: Inclusive start of the target range.
+ * @end: Inclusive end of the target range.
+ *
+ * It is an error to call this function with values of @start and @end that do
+ * not satisfy the condition @start <= @end.
+ */
+static __always_inline bool
+pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
+{
+ return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
+}
+
+/**
+ * pvr_find_heap_containing() - Find a heap which contains the specified
+ * device-virtual address range.
+ * @pvr_dev: Target PowerVR device.
+ * @start: Start of the target range.
+ * @size: Size of the target range.
+ *
+ * Return:
+ * * A pointer to a constant instance of struct drm_pvr_heap representing the
+ * heap containing the entire range specified by @start and @size on
+ * success, or
+ * * %NULL if no such heap exists.
+ */
+const struct drm_pvr_heap *
+pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
+{
+ u64 end;
+
+ if (check_add_overflow(start, size - 1, &end))
+ return NULL;
+
+ /*
+ * There are no guarantees about the order of address ranges in
+ * &pvr_heaps, so iterate over the entire array for a heap whose
+ * range completely encompasses the given range.
+ */
+ for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
+ /* Filter heaps that present only with an associated quirk */
+ if (heap_id == DRM_PVR_HEAP_RGNHDR &&
+ !PVR_HAS_QUIRK(pvr_dev, 63142)) {
+ continue;
+ }
+
+ if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
+ return &pvr_heaps[heap_id];
+ }
+
+ return NULL;
+}
+
+/**
+ * pvr_vm_find_gem_object() - Look up a buffer object from a given
+ * device-virtual address.
+ * @vm_ctx: [IN] Target VM context.
+ * @device_addr: [IN] Virtual device address at the start of the required
+ * object.
+ * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
+ * of the mapped region within the buffer object. May be
+ * %NULL if this information is not required.
+ * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
+ * region. May be %NULL if this information is not required.
+ *
+ * If successful, a reference will be taken on the buffer object. The caller
+ * must drop the reference with pvr_gem_object_put().
+ *
+ * Return:
+ * * The PowerVR buffer object mapped at @device_addr if one exists, or
+ * * %NULL otherwise.
+ */
+struct pvr_gem_object *
+pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
+ u64 *mapped_offset_out, u64 *mapped_size_out)
+{
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+
+ mutex_lock(&vm_ctx->lock);
+
+ va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
+ if (!va)
+ goto err_unlock;
+
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+ pvr_gem_object_get(pvr_obj);
+
+ if (mapped_offset_out)
+ *mapped_offset_out = va->gem.offset;
+ if (mapped_size_out)
+ *mapped_size_out = va->va.range;
+
+ mutex_unlock(&vm_ctx->lock);
+
+ return pvr_obj;
+
+err_unlock:
+ mutex_unlock(&vm_ctx->lock);
+
+ return NULL;
+}
+
+/**
+ * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
+ * @vm_ctx: Target VM context.
+ *
+ * Returns:
+ * * FW object representing firmware memory context, or
+ * * %NULL if this VM context does not have a firmware memory context.
+ */
+struct pvr_fw_object *
+pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
+{
+ return vm_ctx->fw_mem_ctx_obj;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
new file mode 100644
index 000000000000..f2a6463f2b05
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_VM_H
+#define PVR_VM_H
+
+#include "pvr_rogue_mmu_defs.h"
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <linux/types.h>
+
+/* Forward declaration from "pvr_device.h" */
+struct pvr_device;
+struct pvr_file;
+
+/* Forward declaration from "pvr_gem.h" */
+struct pvr_gem_object;
+
+/* Forward declaration from "pvr_vm.c" */
+struct pvr_vm_context;
+
+/* Forward declaration from <uapi/drm/pvr_drm.h> */
+struct drm_pvr_ioctl_get_heap_info_args;
+
+/* Forward declaration from <drm/drm_exec.h> */
+struct drm_exec;
+
+/* Functions defined in pvr_vm.c */
+
+bool pvr_device_addr_is_valid(u64 device_addr);
+bool pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
+ u64 device_addr, u64 size);
+
+struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
+ bool is_userspace_context);
+
+int pvr_vm_map(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
+ u64 device_addr, u64 size);
+int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
+
+dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx);
+struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx);
+
+int pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args);
+int pvr_heap_info_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args);
+const struct drm_pvr_heap *pvr_find_heap_containing(struct pvr_device *pvr_dev,
+ u64 addr, u64 size);
+
+struct pvr_gem_object *pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx,
+ u64 device_addr,
+ u64 *mapped_offset_out,
+ u64 *mapped_size_out);
+
+struct pvr_fw_object *
+pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx);
+
+struct pvr_vm_context *pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle);
+struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx);
+bool pvr_vm_context_put(struct pvr_vm_context *vm_ctx);
+void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file);
+
+#endif /* PVR_VM_H */
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
new file mode 100644
index 000000000000..2bc7181a4c3e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw_mips.h"
+#include "pvr_gem.h"
+#include "pvr_mmu.h"
+#include "pvr_rogue_mips.h"
+#include "pvr_vm.h"
+#include "pvr_vm_mips.h"
+
+#include <drm/drm_managed.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/**
+ * pvr_vm_mips_init() - Initialise MIPS FW pagetable
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%EINVAL,
+ * * Any error returned by pvr_gem_object_create(), or
+ * * And error returned by pvr_gem_object_vmap().
+ */
+int
+pvr_vm_mips_init(struct pvr_device *pvr_dev)
+{
+ u32 pt_size = 1 << ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_4K(pvr_dev);
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+ struct pvr_fw_mips_data *mips_data;
+ u32 phys_bus_width;
+ int page_nr;
+ int err;
+
+ /* Page table size must be at most ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * 4k pages. */
+ if (pt_size > ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * SZ_4K)
+ return -EINVAL;
+
+ if (PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width))
+ return -EINVAL;
+
+ mips_data = drmm_kzalloc(from_pvr_device(pvr_dev), sizeof(*mips_data), GFP_KERNEL);
+ if (!mips_data)
+ return -ENOMEM;
+
+ for (page_nr = 0; page_nr < ARRAY_SIZE(mips_data->pt_pages); page_nr++) {
+ mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!mips_data->pt_pages[page_nr]) {
+ err = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ mips_data->pt_dma_addr[page_nr] = dma_map_page(dev, mips_data->pt_pages[page_nr], 0,
+ PAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mips_data->pt_dma_addr[page_nr])) {
+ err = -ENOMEM;
+ __free_page(mips_data->pt_pages[page_nr]);
+ goto err_free_pages;
+ }
+ }
+
+ mips_data->pt = vmap(mips_data->pt_pages, pt_size >> PAGE_SHIFT, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!mips_data->pt) {
+ err = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ mips_data->pfn_mask = (phys_bus_width > 32) ? ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT :
+ ROGUE_MIPSFW_ENTRYLO_PFN_MASK;
+
+ mips_data->cache_policy = (phys_bus_width > 32) ? ROGUE_MIPSFW_CACHED_POLICY_ABOVE_32BIT :
+ ROGUE_MIPSFW_CACHED_POLICY;
+
+ pvr_dev->fw_dev.processor_data.mips_data = mips_data;
+
+ return 0;
+
+err_free_pages:
+ while (--page_nr >= 0) {
+ dma_unmap_page(from_pvr_device(pvr_dev)->dev,
+ mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
+
+ __free_page(mips_data->pt_pages[page_nr]);
+ }
+
+ return err;
+}
+
+/**
+ * pvr_vm_mips_fini() - Release MIPS FW pagetable
+ * @pvr_dev: Target PowerVR device.
+ */
+void
+pvr_vm_mips_fini(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+ int page_nr;
+
+ vunmap(mips_data->pt);
+ for (page_nr = ARRAY_SIZE(mips_data->pt_pages) - 1; page_nr >= 0; page_nr--) {
+ dma_unmap_page(from_pvr_device(pvr_dev)->dev,
+ mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
+
+ __free_page(mips_data->pt_pages[page_nr]);
+ }
+
+ fw_dev->processor_data.mips_data = NULL;
+}
+
+static u32
+get_mips_pte_flags(bool read, bool write, u32 cache_policy)
+{
+ u32 flags = 0;
+
+ if (read && write) /* Read/write. */
+ flags |= ROGUE_MIPSFW_ENTRYLO_DIRTY_EN;
+ else if (write) /* Write only. */
+ flags |= ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_EN;
+ else
+ WARN_ON(!read);
+
+ flags |= cache_policy << ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT;
+
+ flags |= ROGUE_MIPSFW_ENTRYLO_VALID_EN | ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN;
+
+ return flags;
+}
+
+/**
+ * pvr_vm_mips_map() - Map a FW object into MIPS address space
+ * @pvr_dev: Target PowerVR device.
+ * @fw_obj: FW object to map.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%EINVAL if object does not reside within FW address space, or
+ * * Any error returned by pvr_fw_object_get_dma_addr().
+ */
+int
+pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+ const u64 start = fw_obj->fw_mm_node.start;
+ const u64 size = fw_obj->fw_mm_node.size;
+ u64 end;
+ u32 cache_policy;
+ u32 pte_flags;
+ u32 start_pfn;
+ u32 end_pfn;
+ s32 pfn;
+ int err;
+
+ if (check_add_overflow(start, size - 1, &end))
+ return -EINVAL;
+
+ if (start < ROGUE_FW_HEAP_BASE ||
+ start >= ROGUE_FW_HEAP_BASE + fw_dev->fw_heap_info.raw_size ||
+ end < ROGUE_FW_HEAP_BASE ||
+ end >= ROGUE_FW_HEAP_BASE + fw_dev->fw_heap_info.raw_size ||
+ (start & ROGUE_MIPSFW_PAGE_MASK_4K) ||
+ ((end + 1) & ROGUE_MIPSFW_PAGE_MASK_4K))
+ return -EINVAL;
+
+ start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+ end_pfn = (end & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+
+ if (pvr_obj->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
+ cache_policy = ROGUE_MIPSFW_UNCACHED_CACHE_POLICY;
+ else
+ cache_policy = mips_data->cache_policy;
+
+ pte_flags = get_mips_pte_flags(true, true, cache_policy);
+
+ for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
+ dma_addr_t dma_addr;
+ u32 pte;
+
+ err = pvr_fw_object_get_dma_addr(fw_obj,
+ (pfn - start_pfn) <<
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K,
+ &dma_addr);
+ if (err)
+ goto err_unmap_pages;
+
+ pte = ((dma_addr >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+ << ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT) & mips_data->pfn_mask;
+ pte |= pte_flags;
+
+ WRITE_ONCE(mips_data->pt[pfn], pte);
+ }
+
+ pvr_mmu_flush_request_all(pvr_dev);
+
+ return 0;
+
+err_unmap_pages:
+ for (; pfn >= start_pfn; pfn--)
+ WRITE_ONCE(mips_data->pt[pfn], 0);
+
+ pvr_mmu_flush_request_all(pvr_dev);
+ WARN_ON(pvr_mmu_flush_exec(pvr_dev, true));
+
+ return err;
+}
+
+/**
+ * pvr_vm_mips_unmap() - Unmap a FW object into MIPS address space
+ * @pvr_dev: Target PowerVR device.
+ * @fw_obj: FW object to unmap.
+ */
+void
+pvr_vm_mips_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+ const u64 start = fw_obj->fw_mm_node.start;
+ const u64 size = fw_obj->fw_mm_node.size;
+ const u64 end = start + size;
+
+ const u32 start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >>
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+ const u32 end_pfn = (end & fw_dev->fw_heap_info.offset_mask) >>
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+
+ for (u32 pfn = start_pfn; pfn < end_pfn; pfn++)
+ WRITE_ONCE(mips_data->pt[pfn], 0);
+
+ pvr_mmu_flush_request_all(pvr_dev);
+ WARN_ON(pvr_mmu_flush_exec(pvr_dev, true));
+}
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.h b/drivers/gpu/drm/imagination/pvr_vm_mips.h
new file mode 100644
index 000000000000..0fd59f68fb5b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_VM_MIPS_H
+#define PVR_VM_MIPS_H
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+int
+pvr_vm_mips_init(struct pvr_device *pvr_dev);
+void
+pvr_vm_mips_fini(struct pvr_device *pvr_dev);
+int
+pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+void
+pvr_vm_mips_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+
+#endif /* PVR_VM_MIPS_H */
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index b61cec0cc79d..ad5f29ea8f6a 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -80,7 +80,7 @@ err:
return err;
}
-static int dcss_drv_platform_remove(struct platform_device *pdev)
+static void dcss_drv_platform_remove(struct platform_device *pdev)
{
struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
@@ -88,8 +88,6 @@ static int dcss_drv_platform_remove(struct platform_device *pdev)
dcss_dev_destroy(mdrv->dcss);
kfree(mdrv);
-
- return 0;
}
static void dcss_drv_platform_shutdown(struct platform_device *pdev)
@@ -120,7 +118,7 @@ MODULE_DEVICE_TABLE(of, dcss_of_match);
static struct platform_driver dcss_platform_driver = {
.probe = dcss_drv_platform_probe,
- .remove = dcss_drv_platform_remove,
+ .remove_new = dcss_drv_platform_remove,
.shutdown = dcss_drv_platform_shutdown,
.driver = {
.name = "imx-dcss",
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
index 989eca32d325..53840ab054c7 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
@@ -12,8 +12,10 @@
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
@@ -617,7 +619,6 @@ static int imx_ldb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev);
struct device_node *child;
struct imx_ldb *imx_ldb;
int dual;
@@ -638,9 +639,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
imx_ldb->dev = dev;
-
- if (of_id)
- imx_ldb->lvds_mux = of_id->data;
+ imx_ldb->lvds_mux = device_get_match_data(dev);
dual = of_property_read_bool(np, "fsl,dual-channel");
if (dual)
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index 22b65f4a0e30..43ddf3a9810b 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -342,21 +342,12 @@ static const struct drm_mode_config_helper_funcs imx_lcdc_mode_config_helpers =
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
-static void imx_lcdc_release(struct drm_device *drm)
-{
- struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(drm);
-
- drm_kms_helper_poll_fini(drm);
- kfree(lcdc);
-}
-
DEFINE_DRM_GEM_DMA_FOPS(imx_lcdc_drm_fops);
static struct drm_driver imx_lcdc_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &imx_lcdc_drm_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
- .release = imx_lcdc_release,
.name = "imx-lcdc",
.desc = "i.MX LCDC driver",
.date = "20200716",
@@ -515,14 +506,12 @@ static int imx_lcdc_probe(struct platform_device *pdev)
return 0;
}
-static int imx_lcdc_remove(struct platform_device *pdev)
+static void imx_lcdc_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
drm_dev_unregister(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void imx_lcdc_shutdown(struct platform_device *pdev)
@@ -536,7 +525,7 @@ static struct platform_driver imx_lcdc_driver = {
.of_match_table = imx_lcdc_of_dev_id,
},
.probe = imx_lcdc_probe,
- .remove = imx_lcdc_remove,
+ .remove_new = imx_lcdc_remove,
.shutdown = imx_lcdc_shutdown,
};
module_platform_driver(imx_lcdc_driver);
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 24035b53441c..169b83987ce2 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -448,7 +448,7 @@ static const struct drm_driver kmb_driver = {
.minor = DRIVER_MINOR,
};
-static int kmb_remove(struct platform_device *pdev)
+static void kmb_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct drm_device *drm = dev_get_drvdata(dev);
@@ -473,7 +473,6 @@ static int kmb_remove(struct platform_device *pdev)
/* Unregister DSI host */
kmb_dsi_host_unregister(kmb->kmb_dsi);
drm_atomic_helper_shutdown(drm);
- return 0;
}
static int kmb_probe(struct platform_device *pdev)
@@ -621,7 +620,7 @@ static SIMPLE_DEV_PM_OPS(kmb_pm_ops, kmb_pm_suspend, kmb_pm_resume);
static struct platform_driver kmb_platform_driver = {
.probe = kmb_probe,
- .remove = kmb_remove,
+ .remove_new = kmb_remove,
.driver = {
.name = "kmb-drm",
.pm = &kmb_pm_ops,
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index 02cef0cea657..0bf7105c8748 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -514,7 +514,7 @@ int lima_device_suspend(struct device *dev)
/* check any task running */
for (i = 0; i < lima_pipe_num; i++) {
- if (atomic_read(&ldev->pipe[i].base.hw_rq_count))
+ if (atomic_read(&ldev->pipe[i].base.credit_count))
return -EBUSY;
}
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 295f0353a02e..c3bf8cda8498 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -123,7 +123,7 @@ int lima_sched_task_init(struct lima_sched_task *task,
for (i = 0; i < num_bos; i++)
drm_gem_object_get(&bos[i]->base.base);
- err = drm_sched_job_init(&task->base, &context->base, vm);
+ err = drm_sched_job_init(&task->base, &context->base, 1, vm);
if (err) {
kfree(task->bos);
return err;
@@ -488,7 +488,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
- return drm_sched_init(&pipe->base, &lima_sched_ops,
+ return drm_sched_init(&pipe->base, &lima_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
1,
lima_job_hang_limit,
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
index df6946d505fa..8e59753e532d 100644
--- a/drivers/gpu/drm/loongson/Kconfig
+++ b/drivers/gpu/drm/loongson/Kconfig
@@ -3,6 +3,7 @@
config DRM_LOONGSON
tristate "DRM support for Loongson Graphics"
depends on DRM && PCI && MMU
+ depends on LOONGARCH || MIPS || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_TTM
select I2C
diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c
index 0d5094633222..d227a2c1dcf1 100644
--- a/drivers/gpu/drm/loongson/lsdc_plane.c
+++ b/drivers/gpu/drm/loongson/lsdc_plane.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "lsdc_drv.h"
#include "lsdc_regs.h"
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index 6bf6367853fb..3fdef3ad4ffd 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -531,16 +531,15 @@ static int mtk_disp_ovl_adaptor_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_ovl_adaptor_remove(struct platform_device *pdev)
+static void mtk_disp_ovl_adaptor_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &mtk_disp_ovl_adaptor_master_ops);
pm_runtime_disable(&pdev->dev);
- return 0;
}
struct platform_driver mtk_disp_ovl_adaptor_driver = {
.probe = mtk_disp_ovl_adaptor_probe,
- .remove = mtk_disp_ovl_adaptor_remove,
+ .remove_new = mtk_disp_ovl_adaptor_remove,
.driver = {
.name = "mediatek-disp-ovl-adaptor",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index db7ac666ec5e..6a5d0c345aab 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -346,10 +346,9 @@ static int mtk_ethdr_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_ethdr_remove(struct platform_device *pdev)
+static void mtk_ethdr_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_ethdr_component_ops);
- return 0;
}
static const struct of_device_id mtk_ethdr_driver_dt_match[] = {
@@ -361,7 +360,7 @@ MODULE_DEVICE_TABLE(of, mtk_ethdr_driver_dt_match);
struct platform_driver mtk_ethdr_driver = {
.probe = mtk_ethdr_probe,
- .remove = mtk_ethdr_remove,
+ .remove_new = mtk_ethdr_remove,
.driver = {
.name = "mediatek-disp-ethdr",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
index e5fe4e994f43..a6bc1bdb3d0d 100644
--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
@@ -323,13 +323,11 @@ static int meson_dw_mipi_dsi_probe(struct platform_device *pdev)
return 0;
}
-static int meson_dw_mipi_dsi_remove(struct platform_device *pdev)
+static void meson_dw_mipi_dsi_remove(struct platform_device *pdev)
{
struct meson_dw_mipi_dsi *mipi_dsi = platform_get_drvdata(pdev);
dw_mipi_dsi_remove(mipi_dsi->dmd);
-
- return 0;
}
static const struct of_device_id meson_dw_mipi_dsi_of_table[] = {
@@ -340,7 +338,7 @@ MODULE_DEVICE_TABLE(of, meson_dw_mipi_dsi_of_table);
static struct platform_driver meson_dw_mipi_dsi_platform_driver = {
.probe = meson_dw_mipi_dsi_probe,
- .remove = meson_dw_mipi_dsi_remove,
+ .remove_new = meson_dw_mipi_dsi_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_mipi_dsi_of_table,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 41b13dec9bef..f62ab5257e66 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -841,7 +841,8 @@ static void suspend_scheduler(struct msm_gpu *gpu)
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
- kthread_park(sched->thread);
+
+ drm_sched_wqueue_stop(sched);
}
}
@@ -851,7 +852,8 @@ static void resume_scheduler(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) {
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
- kthread_unpark(sched->thread);
+
+ drm_sched_wqueue_start(sched);
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 8b5c5031e2d9..0d143e390eca 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -405,6 +405,7 @@ static const struct dpu_perf_cfg sc8280xp_perf_data = {
.min_llcc_ib = 0,
.min_dram_ib = 800000,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc8180x_qos_linear),
.entries = sc8180x_qos_linear
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 48f447f4e183..0827634664ae 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -825,8 +825,7 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
return 0;
fail:
- if (mdp5_kms)
- mdp5_destroy(mdp5_kms);
+ mdp5_destroy(mdp5_kms);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index f8a9e8403c26..0405ff08d762 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -337,9 +337,11 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
/* reset video pattern flag on disconnect */
if (!hpd) {
dp->panel->video_test = false;
- drm_dp_set_subconnector_property(dp->dp_display.connector,
- connector_status_disconnected,
- dp->panel->dpcd, dp->panel->downstream_ports);
+ if (!dp->dp_display.is_edp)
+ drm_dp_set_subconnector_property(dp->dp_display.connector,
+ connector_status_disconnected,
+ dp->panel->dpcd,
+ dp->panel->downstream_ports);
}
dp->dp_display.link_ready = hpd;
@@ -368,8 +370,11 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
dp_link_process_request(dp->link);
- drm_dp_set_subconnector_property(dp->dp_display.connector, connector_status_connected,
- dp->panel->dpcd, dp->panel->downstream_ports);
+ if (!dp->dp_display.is_edp)
+ drm_dp_set_subconnector_property(dp->dp_display.connector,
+ connector_status_connected,
+ dp->panel->dpcd,
+ dp->panel->downstream_ports);
edid = dp->panel->edid;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 0efb501490cc..46e6889037e8 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -361,6 +361,9 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct dr
if (IS_ERR(connector))
return connector;
+ if (!dp_display->is_edp)
+ drm_connector_attach_dp_subconnector_property(connector);
+
drm_connector_attach_encoder(connector, encoder);
return connector;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index c66193f2dc0d..82d015aa2d63 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -918,7 +918,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (phy->cphy_mode) {
vreg_ctrl_0 = 0x45;
- vreg_ctrl_1 = 0x45;
+ vreg_ctrl_1 = 0x41;
glbl_rescode_top_ctrl = 0x00;
glbl_rescode_bot_ctrl = 0x00;
} else {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 75d60aab5780..50b65ffc24b1 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -289,8 +289,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
- drm_kms_helper_poll_init(ddev);
-
if (priv->kms_init) {
drm_kms_helper_poll_init(ddev);
msm_fbdev_setup(ddev);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 30d72191cee6..7aa5e14ab9aa 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -54,7 +54,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return ERR_PTR(ret);
}
- ret = drm_sched_job_init(&submit->base, queue->entity, queue);
+ ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue);
if (ret) {
kfree(submit->hw_fence);
kfree(submit);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 4252e3839fbc..2bfcb222e353 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
* DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
* cases, so we don't use it (no need for kernel generated jobs).
*/
-#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
+#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
/**
* struct msm_file_private - per-drm_file context
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 95257ab0185d..4968568e3b54 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -94,7 +94,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
/* currently managing hangcheck ourselves: */
sched_timeout = MAX_SCHEDULE_TIMEOUT;
- ret = drm_sched_init(&ring->sched, &msm_sched_ops,
+ ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
num_hw_submissions, 0, sched_timeout,
NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 625c1bfc4173..b483ef48216a 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -11,9 +11,10 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
@@ -346,18 +347,13 @@ MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
static int mxsfb_probe(struct platform_device *pdev)
{
struct drm_device *drm;
- const struct of_device_id *of_id =
- of_match_device(mxsfb_dt_ids, &pdev->dev);
int ret;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
- ret = mxsfb_load(drm, of_id->data);
+ ret = mxsfb_load(drm, device_get_match_data(&pdev->dev));
if (ret)
goto err_free;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 7840b6428afb..11fe75b68e95 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -38,7 +38,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -945,7 +947,8 @@ nv50_msto_prepare(struct drm_atomic_state *state,
if (ret == 0) {
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index,
payload->vc_start_slot, payload->time_slots,
- payload->pbn, payload->time_slots * mst_state->pbn_div);
+ payload->pbn,
+ payload->time_slots * dfixed_trunc(mst_state->pbn_div));
} else {
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
}
@@ -982,15 +985,14 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
const int clock = crtc_state->adjusted_mode.clock;
asyh->or.bpc = connector->display_info.bpc;
- asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
- false);
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3 << 4);
}
mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- if (!mst_state->pbn_div) {
+ if (!mst_state->pbn_div.full) {
struct nouveau_encoder *outp = mstc->mstm->outp;
mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
index 82b267c11147..460459af272d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
@@ -14,7 +14,7 @@ struct nvkm_event {
int index_nr;
spinlock_t refs_lock;
- spinlock_t list_lock;
+ rwlock_t list_lock;
int *refs;
struct list_head ntfy;
@@ -38,7 +38,7 @@ nvkm_event_init(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
int types_nr, int index_nr, struct nvkm_event *event)
{
spin_lock_init(&event->refs_lock);
- spin_lock_init(&event->list_lock);
+ rwlock_init(&event->list_lock);
return __nvkm_event_init(func, subdev, types_nr, index_nr, event);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 2edd7bb13fae..a04156ca8390 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -127,21 +127,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{
struct nouveau_abi16_ntfy *ntfy, *temp;
- /* When a client exits without waiting for it's queued up jobs to
- * finish it might happen that we fault the channel. This is due to
- * drm_file_free() calling drm_gem_release() before the postclose()
- * callback. Hence, we can't tear down this scheduler entity before
- * uvmm mappings are unmapped. Currently, we can't detect this case.
- *
- * However, this should be rare and harmless, since the channel isn't
- * needed anymore.
- */
- nouveau_sched_entity_fini(&chan->sched_entity);
+ /* Cancel all jobs from the entity's queue. */
+ drm_sched_entity_fini(&chan->sched.entity);
- /* wait for all activity to stop before cleaning up */
if (chan->chan)
nouveau_channel_idle(chan->chan);
+ nouveau_sched_fini(&chan->sched);
+
/* cleanup notifier state */
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -344,8 +337,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
- drm->sched_wq);
+ ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq,
+ chan->chan->dma.ib_max);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 9f538486c10e..1f5e243c0c75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -26,7 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
- struct nouveau_sched_entity sched_entity;
+ struct nouveau_sched sched;
};
struct nouveau_abi16 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 0f3bd187ede6..b7dda486a7ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,10 +148,17 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
* If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it.
*/
- if (bo->base.dev)
+ if (bo->base.dev) {
+ /* Gem objects not being shared with other VMs get their
+ * dma_resv from a root GEM object.
+ */
+ if (nvbo->no_share)
+ drm_gem_object_put(nvbo->r_obj);
+
drm_gem_object_release(&bo->base);
- else
+ } else {
dma_resv_fini(&bo->base._resv);
+ }
kfree(nvbo);
}
@@ -1054,17 +1061,18 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct drm_gem_object *obj = &bo->base;
struct ttm_resource *old_reg = bo->resource;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
-
if (new_reg->mem_type == TTM_PL_TT) {
ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
if (ret)
return ret;
}
+ drm_gpuvm_bo_gem_evict(obj, evict);
nouveau_bo_move_ntfy(bo, new_reg);
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
@@ -1129,6 +1137,7 @@ out:
out_ntfy:
if (ret) {
nouveau_bo_move_ntfy(bo, bo->resource);
+ drm_gpuvm_bo_gem_evict(obj, !evict);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 07f671cf895e..70c551921a9e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -26,6 +26,11 @@ struct nouveau_bo {
struct list_head entry;
int pbbo_index;
bool validate_mapped;
+
+ /* Root GEM object we derive the dma_resv of in case this BO is not
+ * shared between VMs.
+ */
+ struct drm_gem_object *r_obj;
bool no_share;
/* GPU address space is independent of CPU word size */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index d8c92521226d..f28f9a857458 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -726,6 +726,11 @@ nouveau_display_create(struct drm_device *dev)
if (nouveau_modeset != 2) {
ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0, &disp->disp);
+ /* no display hw */
+ if (ret == -ENODEV) {
+ ret = 0;
+ goto disp_create_err;
+ }
if (!ret && (disp->disp.outp_mask || drm->vbios.dcb.entries)) {
nouveau_display_create_properties(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 50589f982d1a..6f6c31a9937b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -190,6 +190,8 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
static void
nouveau_cli_fini(struct nouveau_cli *cli)
{
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm_locked(cli);
+
/* All our channels are dead now, which means all the fences they
* own are signalled, and all callback functions have been called.
*
@@ -199,8 +201,9 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
- nouveau_uvmm_fini(&cli->uvmm);
- nouveau_sched_entity_fini(&cli->sched_entity);
+ nouveau_sched_fini(&cli->sched);
+ if (uvmm)
+ nouveau_uvmm_fini(uvmm);
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
@@ -307,8 +310,17 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
cli->mem = &mems[ret];
- ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
- drm->sched_wq);
+ /* Don't pass in the (shared) sched_wq in order to let
+ * nouveau_sched_init() create a dedicated one for VM_BIND jobs.
+ *
+ * This is required to ensure that for VM_BIND jobs free_job() work and
+ * run_job() work can always run concurrently and hence, free_job() work
+ * can never stall run_job() work. For EXEC jobs we don't have this
+ * requirement, since EXEC job's free_job() does not require to take any
+ * locks which indirectly or directly are held for allocations
+ * elsewhere.
+ */
+ ret = nouveau_sched_init(&cli->sched, drm, NULL, 1);
if (ret)
goto done;
@@ -579,13 +591,16 @@ nouveau_drm_device_init(struct drm_device *dev)
nvif_parent_ctor(&nouveau_parent, &drm->parent);
drm->master.base.object.parent = &drm->parent;
- ret = nouveau_sched_init(drm);
- if (ret)
+ drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
+ WQ_MAX_ACTIVE);
+ if (!drm->sched_wq) {
+ ret = -ENOMEM;
goto fail_alloc;
+ }
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
if (ret)
- goto fail_sched;
+ goto fail_wq;
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
@@ -655,8 +670,8 @@ fail_ttm:
nouveau_cli_fini(&drm->client);
fail_master:
nouveau_cli_fini(&drm->master);
-fail_sched:
- nouveau_sched_fini(drm);
+fail_wq:
+ destroy_workqueue(drm->sched_wq);
fail_alloc:
nvif_parent_dtor(&drm->parent);
kfree(drm);
@@ -708,10 +723,9 @@ nouveau_drm_device_fini(struct drm_device *dev)
}
mutex_unlock(&drm->clients_lock);
- nouveau_sched_fini(drm);
-
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
+ destroy_workqueue(drm->sched_wq);
nvif_parent_dtor(&drm->parent);
mutex_destroy(&drm->clients_lock);
kfree(drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e73a233c6572..8a6d94c8b163 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -93,9 +93,12 @@ struct nouveau_cli {
struct nvif_mmu mmu;
struct nouveau_vmm vmm;
struct nouveau_vmm svm;
- struct nouveau_uvmm uvmm;
+ struct {
+ struct nouveau_uvmm *ptr;
+ bool disabled;
+ } uvmm;
- struct nouveau_sched_entity sched_entity;
+ struct nouveau_sched sched;
const struct nvif_mclass *mem;
@@ -121,10 +124,7 @@ struct nouveau_cli_work {
static inline struct nouveau_uvmm *
nouveau_cli_uvmm(struct nouveau_cli *cli)
{
- if (!cli || !cli->uvmm.vmm.cli)
- return NULL;
-
- return &cli->uvmm;
+ return cli ? cli->uvmm.ptr : NULL;
}
static inline struct nouveau_uvmm *
@@ -258,6 +258,9 @@ struct nouveau_drm {
u64 context_base;
} *runl;
+ /* Workqueue used for channel schedulers. */
+ struct workqueue_struct *sched_wq;
+
/* context for accelerated drm-internal operations */
struct nouveau_channel *cechan;
struct nouveau_channel *channel;
@@ -298,10 +301,6 @@ struct nouveau_drm {
struct mutex lock;
bool component_registered;
} audio;
-
- struct drm_gpu_scheduler sched;
- struct workqueue_struct *sched_wq;
-
};
static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index 9a5ef574744b..bc5d71b79ab2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: MIT
-#include <drm/drm_exec.h>
-
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
@@ -86,14 +84,12 @@
*/
static int
-nouveau_exec_job_submit(struct nouveau_job *job)
+nouveau_exec_job_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_cli *cli = job->cli;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
- struct drm_exec *exec = &job->exec;
- struct drm_gem_object *obj;
- unsigned long index;
int ret;
/* Create a new fence, but do not emit yet. */
@@ -102,52 +98,29 @@ nouveau_exec_job_submit(struct nouveau_job *job)
return ret;
nouveau_uvmm_lock(uvmm);
- drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
- drm_exec_until_all_locked(exec) {
- struct drm_gpuva *va;
-
- drm_gpuvm_for_each_va(va, &uvmm->base) {
- if (unlikely(va == &uvmm->base.kernel_alloc_node))
- continue;
-
- ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
- drm_exec_retry_on_contention(exec);
- if (ret)
- goto err_uvmm_unlock;
- }
+ ret = drm_gpuvm_exec_lock(vme);
+ if (ret) {
+ nouveau_uvmm_unlock(uvmm);
+ return ret;
}
nouveau_uvmm_unlock(uvmm);
- drm_exec_for_each_locked_object(exec, index, obj) {
- struct nouveau_bo *nvbo = nouveau_gem_object(obj);
-
- ret = nouveau_bo_validate(nvbo, true, false);
- if (ret)
- goto err_exec_fini;
+ ret = drm_gpuvm_exec_validate(vme);
+ if (ret) {
+ drm_gpuvm_exec_unlock(vme);
+ return ret;
}
return 0;
-
-err_uvmm_unlock:
- nouveau_uvmm_unlock(uvmm);
-err_exec_fini:
- drm_exec_fini(exec);
- return ret;
-
}
static void
-nouveau_exec_job_armed_submit(struct nouveau_job *job)
+nouveau_exec_job_armed_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
- struct drm_exec *exec = &job->exec;
- struct drm_gem_object *obj;
- unsigned long index;
-
- drm_exec_for_each_locked_object(exec, index, obj)
- dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
- drm_exec_fini(exec);
+ drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+ job->resv_usage, job->resv_usage);
+ drm_gpuvm_exec_unlock(vme);
}
static struct dma_fence *
@@ -192,6 +165,7 @@ nouveau_exec_job_free(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+ nouveau_job_done(job);
nouveau_job_free(job);
kfree(exec_job->fence);
@@ -211,8 +185,6 @@ nouveau_exec_job_timeout(struct nouveau_job *job)
NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
chan->chid);
- nouveau_sched_entity_fini(job->entity);
-
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -259,10 +231,12 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob,
}
}
+ args.file_priv = __args->file_priv;
job->chan = __args->chan;
- args.sched_entity = __args->sched_entity;
- args.file_priv = __args->file_priv;
+ args.sched = __args->sched;
+ /* Plus one to account for the HW fence. */
+ args.credits = job->push.count + 1;
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
@@ -415,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (ret)
goto out;
- args.sched_entity = &chan16->sched_entity;
+ args.sched = &chan16->sched;
args.file_priv = file_priv;
args.chan = chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h
index 5488d337bcc0..9b3b151facfd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.h
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.h
@@ -3,16 +3,12 @@
#ifndef __NOUVEAU_EXEC_H__
#define __NOUVEAU_EXEC_H__
-#include <drm/drm_exec.h>
-
#include "nouveau_drv.h"
#include "nouveau_sched.h"
struct nouveau_exec_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
-
- struct drm_exec exec;
+ struct nouveau_sched *sched;
struct nouveau_channel *chan;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index a0d303e5ce3d..49c2bcbef129 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -111,7 +111,8 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
- if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv)
+ if (nvbo->no_share && uvmm &&
+ drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv)
return -EPERM;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
@@ -245,7 +246,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (unlikely(!uvmm))
return -EINVAL;
- resv = &uvmm->resv;
+ resv = drm_gpuvm_resv(&uvmm->base);
}
if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
@@ -288,6 +289,11 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
+ if (nvbo->no_share) {
+ nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base);
+ drm_gem_object_get(nvbo->r_obj);
+ }
+
*pnvbo = nvbo;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 23cd43a7fd19..bf2dc7567ea4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -43,11 +43,10 @@ static int nouveau_platform_probe(struct platform_device *pdev)
return 0;
}
-static int nouveau_platform_remove(struct platform_device *pdev)
+static void nouveau_platform_remove(struct platform_device *pdev)
{
struct drm_device *dev = platform_get_drvdata(pdev);
nouveau_drm_device_remove(dev);
- return 0;
}
#if IS_ENABLED(CONFIG_OF)
@@ -93,5 +92,5 @@ struct platform_driver nouveau_platform_driver = {
.of_match_table = of_match_ptr(nouveau_platform_match),
},
.probe = nouveau_platform_probe,
- .remove = nouveau_platform_remove,
+ .remove_new = nouveau_platform_remove,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 7c376c4ccdcf..dd98f6910f9c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -12,30 +12,28 @@
#include "nouveau_abi16.h"
#include "nouveau_sched.h"
-/* FIXME
- *
- * We want to make sure that jobs currently executing can't be deferred by
- * other jobs competing for the hardware. Otherwise we might end up with job
- * timeouts just because of too many clients submitting too many jobs. We don't
- * want jobs to time out because of system load, but because of the job being
- * too bulky.
- *
- * For now allow for up to 16 concurrent jobs in flight until we know how many
- * rings the hardware can process in parallel.
- */
-#define NOUVEAU_SCHED_HW_SUBMISSIONS 16
#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
+/* Starts at 0, since the DRM scheduler interprets those parameters as (initial)
+ * index to the run-queue array.
+ */
+enum nouveau_sched_priority {
+ NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_KERNEL,
+ NOUVEAU_SCHED_PRIORITY_COUNT,
+};
+
int
nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args)
{
- struct nouveau_sched_entity *entity = args->sched_entity;
+ struct nouveau_sched *sched = args->sched;
int ret;
+ INIT_LIST_HEAD(&job->entry);
+
job->file_priv = args->file_priv;
job->cli = nouveau_cli(args->file_priv);
- job->entity = entity;
+ job->sched = sched;
job->sync = args->sync;
job->resv_usage = args->resv_usage;
@@ -86,10 +84,10 @@ nouveau_job_init(struct nouveau_job *job,
ret = -ENOMEM;
goto err_free_objs;
}
-
}
- ret = drm_sched_job_init(&job->base, &entity->base, NULL);
+ ret = drm_sched_job_init(&job->base, &sched->entity,
+ args->credits, NULL);
if (ret)
goto err_free_chains;
@@ -109,6 +107,27 @@ return ret;
}
void
+nouveau_job_fini(struct nouveau_job *job)
+{
+ dma_fence_put(job->done_fence);
+ drm_sched_job_cleanup(&job->base);
+
+ job->ops->free(job);
+}
+
+void
+nouveau_job_done(struct nouveau_job *job)
+{
+ struct nouveau_sched *sched = job->sched;
+
+ spin_lock(&sched->job.list.lock);
+ list_del(&job->entry);
+ spin_unlock(&sched->job.list.lock);
+
+ wake_up(&sched->job.wq);
+}
+
+void
nouveau_job_free(struct nouveau_job *job)
{
kfree(job->in_sync.data);
@@ -117,13 +136,6 @@ nouveau_job_free(struct nouveau_job *job)
kfree(job->out_sync.chains);
}
-void nouveau_job_fini(struct nouveau_job *job)
-{
- dma_fence_put(job->done_fence);
- drm_sched_job_cleanup(&job->base);
- job->ops->free(job);
-}
-
static int
sync_find_fence(struct nouveau_job *job,
struct drm_nouveau_sync *sync,
@@ -261,8 +273,13 @@ nouveau_job_fence_attach(struct nouveau_job *job)
int
nouveau_job_submit(struct nouveau_job *job)
{
- struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
+ struct nouveau_sched *sched = job->sched;
struct dma_fence *done_fence = NULL;
+ struct drm_gpuvm_exec vm_exec = {
+ .vm = &nouveau_cli_uvmm(job->cli)->base,
+ .flags = DRM_EXEC_IGNORE_DUPLICATES,
+ .num_fences = 1,
+ };
int ret;
ret = nouveau_job_add_deps(job);
@@ -276,46 +293,29 @@ nouveau_job_submit(struct nouveau_job *job)
/* Make sure the job appears on the sched_entity's queue in the same
* order as it was submitted.
*/
- mutex_lock(&entity->mutex);
+ mutex_lock(&sched->mutex);
/* Guarantee we won't fail after the submit() callback returned
* successfully.
*/
if (job->ops->submit) {
- ret = job->ops->submit(job);
+ ret = job->ops->submit(job, &vm_exec);
if (ret)
goto err_cleanup;
}
+ /* Submit was successful; add the job to the schedulers job list. */
+ spin_lock(&sched->job.list.lock);
+ list_add(&job->entry, &sched->job.list.head);
+ spin_unlock(&sched->job.list.lock);
+
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
if (job->sync)
done_fence = dma_fence_get(job->done_fence);
- /* If a sched job depends on a dma-fence from a job from the same GPU
- * scheduler instance, but a different scheduler entity, the GPU
- * scheduler does only wait for the particular job to be scheduled,
- * rather than for the job to fully complete. This is due to the GPU
- * scheduler assuming that there is a scheduler instance per ring.
- * However, the current implementation, in order to avoid arbitrary
- * amounts of kthreads, has a single scheduler instance while scheduler
- * entities represent rings.
- *
- * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all
- * out-fences in order to force the scheduler to wait for full job
- * completion for dependent jobs from different entities and same
- * scheduler instance.
- *
- * There is some work in progress [1] to address the issues of firmware
- * schedulers; once it is in-tree the scheduler topology in Nouveau
- * should be re-worked accordingly.
- *
- * [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/
- */
- set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags);
-
if (job->ops->armed_submit)
- job->ops->armed_submit(job);
+ job->ops->armed_submit(job, &vm_exec);
nouveau_job_fence_attach(job);
@@ -326,7 +326,7 @@ nouveau_job_submit(struct nouveau_job *job)
drm_sched_entity_push_job(&job->base);
- mutex_unlock(&entity->mutex);
+ mutex_unlock(&sched->mutex);
if (done_fence) {
dma_fence_wait(done_fence, true);
@@ -336,20 +336,13 @@ nouveau_job_submit(struct nouveau_job *job)
return 0;
err_cleanup:
- mutex_unlock(&entity->mutex);
+ mutex_unlock(&sched->mutex);
nouveau_job_fence_attach_cleanup(job);
err:
job->state = NOUVEAU_JOB_SUBMIT_FAILED;
return ret;
}
-bool
-nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
- struct work_struct *work)
-{
- return queue_work(entity->sched_wq, work);
-}
-
static struct dma_fence *
nouveau_job_run(struct nouveau_job *job)
{
@@ -399,50 +392,82 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job)
nouveau_job_fini(job);
}
-int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
- struct drm_gpu_scheduler *sched,
- struct workqueue_struct *sched_wq)
-{
- mutex_init(&entity->mutex);
- spin_lock_init(&entity->job.list.lock);
- INIT_LIST_HEAD(&entity->job.list.head);
- init_waitqueue_head(&entity->job.wq);
-
- entity->sched_wq = sched_wq;
- return drm_sched_entity_init(&entity->base,
- DRM_SCHED_PRIORITY_NORMAL,
- &sched, 1, NULL);
-}
-
-void
-nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
-{
- drm_sched_entity_destroy(&entity->base);
-}
-
static const struct drm_sched_backend_ops nouveau_sched_ops = {
.run_job = nouveau_sched_run_job,
.timedout_job = nouveau_sched_timedout_job,
.free_job = nouveau_sched_free_job,
};
-int nouveau_sched_init(struct nouveau_drm *drm)
+int
+nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit)
{
- struct drm_gpu_scheduler *sched = &drm->sched;
+ struct drm_gpu_scheduler *drm_sched = &sched->base;
+ struct drm_sched_entity *entity = &sched->entity;
long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+ int ret;
- drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
- if (!drm->sched_wq)
- return -ENOMEM;
+ if (!wq) {
+ wq = alloc_workqueue("nouveau_sched_wq_%d", 0, WQ_MAX_ACTIVE,
+ current->pid);
+ if (!wq)
+ return -ENOMEM;
+
+ sched->wq = wq;
+ }
- return drm_sched_init(sched, &nouveau_sched_ops,
- DRM_SCHED_PRIORITY_COUNT,
- NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
- NULL, NULL, "nouveau_sched", drm->dev->dev);
+ ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
+ NOUVEAU_SCHED_PRIORITY_COUNT,
+ credit_limit, 0, job_hang_limit,
+ NULL, NULL, "nouveau_sched", drm->dev->dev);
+ if (ret)
+ goto fail_wq;
+
+ /* Using DRM_SCHED_PRIORITY_KERNEL, since that's what we're required to use
+ * when we want to have a single run-queue only.
+ *
+ * It's not documented, but one will find out when trying to use any
+ * other priority running into faults, because the scheduler uses the
+ * priority as array index.
+ *
+ * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not
+ * matching the enum type used in drm_sched_entity_init().
+ */
+ ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_KERNEL,
+ &drm_sched, 1, NULL);
+ if (ret)
+ goto fail_sched;
+
+ mutex_init(&sched->mutex);
+ spin_lock_init(&sched->job.list.lock);
+ INIT_LIST_HEAD(&sched->job.list.head);
+ init_waitqueue_head(&sched->job.wq);
+
+ return 0;
+
+fail_sched:
+ drm_sched_fini(drm_sched);
+fail_wq:
+ if (sched->wq)
+ destroy_workqueue(sched->wq);
+ return ret;
}
-void nouveau_sched_fini(struct nouveau_drm *drm)
+void
+nouveau_sched_fini(struct nouveau_sched *sched)
{
- destroy_workqueue(drm->sched_wq);
- drm_sched_fini(&drm->sched);
+ struct drm_gpu_scheduler *drm_sched = &sched->base;
+ struct drm_sched_entity *entity = &sched->entity;
+
+ rmb(); /* for list_empty to work without lock */
+ wait_event(sched->job.wq, list_empty(&sched->job.list.head));
+
+ drm_sched_entity_fini(entity);
+ drm_sched_fini(drm_sched);
+
+ /* Destroy workqueue after scheduler tear down, otherwise it might still
+ * be in use.
+ */
+ if (sched->wq)
+ destroy_workqueue(sched->wq);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index 27ac19792597..a6528f5981e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -5,7 +5,7 @@
#include <linux/types.h>
-#include <drm/drm_exec.h>
+#include <drm/drm_gpuvm.h>
#include <drm/gpu_scheduler.h>
#include "nouveau_drv.h"
@@ -26,7 +26,8 @@ enum nouveau_job_state {
struct nouveau_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
+ struct nouveau_sched *sched;
+ u32 credits;
enum dma_resv_usage resv_usage;
bool sync;
@@ -49,12 +50,12 @@ struct nouveau_job {
enum nouveau_job_state state;
- struct nouveau_sched_entity *entity;
+ struct nouveau_sched *sched;
+ struct list_head entry;
struct drm_file *file_priv;
struct nouveau_cli *cli;
- struct drm_exec exec;
enum dma_resv_usage resv_usage;
struct dma_fence *done_fence;
@@ -76,8 +77,8 @@ struct nouveau_job {
/* If .submit() returns without any error, it is guaranteed that
* armed_submit() is called.
*/
- int (*submit)(struct nouveau_job *);
- void (*armed_submit)(struct nouveau_job *);
+ int (*submit)(struct nouveau_job *, struct drm_gpuvm_exec *);
+ void (*armed_submit)(struct nouveau_job *, struct drm_gpuvm_exec *);
struct dma_fence *(*run)(struct nouveau_job *);
void (*free)(struct nouveau_job *);
enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
@@ -90,20 +91,17 @@ int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
int nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args);
-void nouveau_job_free(struct nouveau_job *job);
-
-int nouveau_job_submit(struct nouveau_job *job);
void nouveau_job_fini(struct nouveau_job *job);
+int nouveau_job_submit(struct nouveau_job *job);
+void nouveau_job_done(struct nouveau_job *job);
+void nouveau_job_free(struct nouveau_job *job);
-#define to_nouveau_sched_entity(entity) \
- container_of((entity), struct nouveau_sched_entity, base)
-
-struct nouveau_sched_entity {
- struct drm_sched_entity base;
+struct nouveau_sched {
+ struct drm_gpu_scheduler base;
+ struct drm_sched_entity entity;
+ struct workqueue_struct *wq;
struct mutex mutex;
- struct workqueue_struct *sched_wq;
-
struct {
struct {
struct list_head head;
@@ -113,15 +111,8 @@ struct nouveau_sched_entity {
} job;
};
-int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
- struct drm_gpu_scheduler *sched,
- struct workqueue_struct *sched_wq);
-void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
-
-bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
- struct work_struct *work);
-
-int nouveau_sched_init(struct nouveau_drm *drm);
-void nouveau_sched_fini(struct nouveau_drm *drm);
+int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit);
+void nouveau_sched_fini(struct nouveau_sched *sched);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 5cf892c50f43..dae3baf707a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -62,6 +62,8 @@ struct bind_job_op {
enum vm_bind_op op;
u32 flags;
+ struct drm_gpuvm_bo *vm_bo;
+
struct {
u64 addr;
u64 range;
@@ -436,8 +438,9 @@ nouveau_uvma_region_complete(struct nouveau_uvma_region *reg)
static void
op_map_prepare_unwind(struct nouveau_uvma *uvma)
{
+ struct drm_gpuva *va = &uvma->va;
nouveau_uvma_gem_put(uvma);
- drm_gpuva_remove(&uvma->va);
+ drm_gpuva_remove(va);
nouveau_uvma_free(uvma);
}
@@ -466,6 +469,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
break;
case DRM_GPUVA_OP_REMAP: {
struct drm_gpuva_op_remap *r = &op->remap;
+ struct drm_gpuva *va = r->unmap->va;
if (r->next)
op_map_prepare_unwind(new->next);
@@ -473,7 +477,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
if (r->prev)
op_map_prepare_unwind(new->prev);
- op_unmap_prepare_unwind(r->unmap->va);
+ op_unmap_prepare_unwind(va);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -604,6 +608,9 @@ op_unmap_prepare(struct drm_gpuva_op_unmap *u)
drm_gpuva_unmap(u);
}
+/*
+ * Note: @args should not be NULL when calling for a map operation.
+ */
static int
nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
@@ -624,7 +631,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
if (ret)
goto unwind;
- if (args && vmm_get_range) {
+ if (vmm_get_range) {
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
vmm_get_range);
if (ret) {
@@ -632,6 +639,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
goto unwind;
}
}
+
break;
}
case DRM_GPUVA_OP_REMAP: {
@@ -929,25 +937,13 @@ nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
static int
nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
{
- u64 end = addr + range;
- u64 kernel_managed_end = uvmm->kernel_managed_addr +
- uvmm->kernel_managed_size;
-
if (addr & ~PAGE_MASK)
return -EINVAL;
if (range & ~PAGE_MASK)
return -EINVAL;
- if (end <= addr)
- return -EINVAL;
-
- if (addr < NOUVEAU_VA_SPACE_START ||
- end > NOUVEAU_VA_SPACE_END)
- return -EINVAL;
-
- if (addr < kernel_managed_end &&
- end > uvmm->kernel_managed_addr)
+ if (!drm_gpuvm_range_valid(&uvmm->base, addr, range))
return -EINVAL;
return 0;
@@ -970,6 +966,12 @@ nouveau_uvmm_bind_job_free(struct kref *kref)
{
struct nouveau_uvmm_bind_job *job =
container_of(kref, struct nouveau_uvmm_bind_job, kref);
+ struct bind_job_op *op, *next;
+
+ list_for_each_op_safe(op, next, &job->ops) {
+ list_del(&op->entry);
+ kfree(op);
+ }
nouveau_job_free(&job->base);
kfree(job);
@@ -1011,14 +1013,16 @@ bind_validate_op(struct nouveau_job *job,
static void
bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
{
- struct nouveau_uvmm_bind_job *bind_job;
- struct nouveau_sched_entity *entity = job->entity;
+ struct nouveau_sched *sched = job->sched;
+ struct nouveau_job *__job;
struct bind_job_op *op;
u64 end = addr + range;
again:
- spin_lock(&entity->job.list.lock);
- list_for_each_entry(bind_job, &entity->job.list.head, entry) {
+ spin_lock(&sched->job.list.lock);
+ list_for_each_entry(__job, &sched->job.list.head, entry) {
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
+
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_UNMAP) {
u64 op_addr = op->va.addr;
@@ -1026,7 +1030,7 @@ again:
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
- spin_unlock(&entity->job.list.lock);
+ spin_unlock(&sched->job.list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
@@ -1034,7 +1038,7 @@ again:
}
}
}
- spin_unlock(&entity->job.list.lock);
+ spin_unlock(&sched->job.list.lock);
}
static int
@@ -1113,22 +1117,28 @@ bind_validate_region(struct nouveau_job *job)
}
static void
-bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
+bind_link_gpuvas(struct bind_job_op *bop)
{
+ struct nouveau_uvma_prealloc *new = &bop->new;
+ struct drm_gpuvm_bo *vm_bo = bop->vm_bo;
+ struct drm_gpuva_ops *ops = bop->ops;
struct drm_gpuva_op *op;
drm_gpuva_for_each_op(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP:
- drm_gpuva_link(&new->map->va);
+ drm_gpuva_link(&new->map->va, vm_bo);
break;
- case DRM_GPUVA_OP_REMAP:
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva *va = op->remap.unmap->va;
+
if (op->remap.prev)
- drm_gpuva_link(&new->prev->va);
+ drm_gpuva_link(&new->prev->va, va->vm_bo);
if (op->remap.next)
- drm_gpuva_link(&new->next->va);
- drm_gpuva_unlink(op->remap.unmap->va);
+ drm_gpuva_link(&new->next->va, va->vm_bo);
+ drm_gpuva_unlink(va);
break;
+ }
case DRM_GPUVA_OP_UNMAP:
drm_gpuva_unlink(op->unmap.va);
break;
@@ -1139,21 +1149,70 @@ bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
}
static int
-nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+bind_lock_validate(struct nouveau_job *job, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct bind_job_op *op;
+ int ret;
+
+ list_for_each_op(op, &bind_job->ops) {
+ struct drm_gpuva_op *va_op;
+
+ if (!op->ops)
+ continue;
+
+ drm_gpuva_for_each_op(va_op, op->ops) {
+ struct drm_gem_object *obj = op_gem_obj(va_op);
+
+ if (unlikely(!obj))
+ continue;
+
+ ret = drm_exec_prepare_obj(exec, obj, num_fences);
+ if (ret)
+ return ret;
+
+ /* Don't validate GEMs backing mappings we're about to
+ * unmap, it's not worth the effort.
+ */
+ if (va_op->op == DRM_GPUVA_OP_UNMAP)
+ continue;
+
+ ret = nouveau_bo_validate(nouveau_gem_object(obj),
+ true, false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
- struct nouveau_sched_entity *entity = job->entity;
- struct drm_exec *exec = &job->exec;
+ struct drm_exec *exec = &vme->exec;
struct bind_job_op *op;
int ret;
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_MAP) {
- op->gem.obj = drm_gem_object_lookup(job->file_priv,
- op->gem.handle);
- if (!op->gem.obj)
+ struct drm_gem_object *obj = op->gem.obj =
+ drm_gem_object_lookup(job->file_priv,
+ op->gem.handle);
+ if (!obj)
return -ENOENT;
+
+ dma_resv_lock(obj->resv, NULL);
+ op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj);
+ dma_resv_unlock(obj->resv);
+ if (IS_ERR(op->vm_bo))
+ return PTR_ERR(op->vm_bo);
+
+ drm_gpuvm_bo_extobj_add(op->vm_bo);
}
ret = bind_validate_op(job, op);
@@ -1176,6 +1235,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
* unwind all GPU VA space changes on failure.
*/
nouveau_uvmm_lock(uvmm);
+
list_for_each_op(op, &bind_job->ops) {
switch (op->op) {
case OP_MAP_SPARSE:
@@ -1287,55 +1347,13 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
}
}
- drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_init(exec, vme->flags);
drm_exec_until_all_locked(exec) {
- list_for_each_op(op, &bind_job->ops) {
- struct drm_gpuva_op *va_op;
-
- if (IS_ERR_OR_NULL(op->ops))
- continue;
-
- drm_gpuva_for_each_op(va_op, op->ops) {
- struct drm_gem_object *obj = op_gem_obj(va_op);
-
- if (unlikely(!obj))
- continue;
-
- ret = drm_exec_prepare_obj(exec, obj, 1);
- drm_exec_retry_on_contention(exec);
- if (ret) {
- op = list_last_op(&bind_job->ops);
- goto unwind;
- }
- }
- }
- }
-
- list_for_each_op(op, &bind_job->ops) {
- struct drm_gpuva_op *va_op;
-
- if (IS_ERR_OR_NULL(op->ops))
- continue;
-
- drm_gpuva_for_each_op(va_op, op->ops) {
- struct drm_gem_object *obj = op_gem_obj(va_op);
-
- if (unlikely(!obj))
- continue;
-
- /* Don't validate GEMs backing mappings we're about to
- * unmap, it's not worth the effort.
- */
- if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
- continue;
-
- ret = nouveau_bo_validate(nouveau_gem_object(obj),
- true, false);
- if (ret) {
- op = list_last_op(&bind_job->ops);
- goto unwind;
- }
+ ret = bind_lock_validate(job, exec, vme->num_fences);
+ drm_exec_retry_on_contention(exec);
+ if (ret) {
+ op = list_last_op(&bind_job->ops);
+ goto unwind;
}
}
@@ -1364,7 +1382,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
case OP_UNMAP_SPARSE:
case OP_MAP:
case OP_UNMAP:
- bind_link_gpuvas(op->ops, &op->new);
+ bind_link_gpuvas(op);
break;
default:
break;
@@ -1372,10 +1390,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
}
nouveau_uvmm_unlock(uvmm);
- spin_lock(&entity->job.list.lock);
- list_add(&bind_job->entry, &entity->job.list.head);
- spin_unlock(&entity->job.list.lock);
-
return 0;
unwind_continue:
@@ -1410,21 +1424,17 @@ unwind:
}
nouveau_uvmm_unlock(uvmm);
- drm_exec_fini(exec);
+ drm_gpuvm_exec_unlock(vme);
return ret;
}
static void
-nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job)
+nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
- struct drm_exec *exec = &job->exec;
- struct drm_gem_object *obj;
- unsigned long index;
-
- drm_exec_for_each_locked_object(exec, index, obj)
- dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
- drm_exec_fini(exec);
+ drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+ job->resv_usage, job->resv_usage);
+ drm_gpuvm_exec_unlock(vme);
}
static struct dma_fence *
@@ -1462,14 +1472,11 @@ out:
}
static void
-nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
+nouveau_uvmm_bind_job_cleanup(struct nouveau_job *job)
{
- struct nouveau_uvmm_bind_job *bind_job =
- container_of(work, struct nouveau_uvmm_bind_job, work);
- struct nouveau_job *job = &bind_job->base;
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
- struct nouveau_sched_entity *entity = job->entity;
- struct bind_job_op *op, *next;
+ struct bind_job_op *op;
list_for_each_op(op, &bind_job->ops) {
struct drm_gem_object *obj = op->gem.obj;
@@ -1511,42 +1518,27 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
if (!IS_ERR_OR_NULL(op->ops))
drm_gpuva_ops_free(&uvmm->base, op->ops);
+ if (!IS_ERR_OR_NULL(op->vm_bo)) {
+ dma_resv_lock(obj->resv, NULL);
+ drm_gpuvm_bo_put(op->vm_bo);
+ dma_resv_unlock(obj->resv);
+ }
+
if (obj)
drm_gem_object_put(obj);
}
- spin_lock(&entity->job.list.lock);
- list_del(&bind_job->entry);
- spin_unlock(&entity->job.list.lock);
-
+ nouveau_job_done(job);
complete_all(&bind_job->complete);
- wake_up(&entity->job.wq);
-
- /* Remove and free ops after removing the bind job from the job list to
- * avoid races against bind_validate_map_sparse().
- */
- list_for_each_op_safe(op, next, &bind_job->ops) {
- list_del(&op->entry);
- kfree(op);
- }
nouveau_uvmm_bind_job_put(bind_job);
}
-static void
-nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job)
-{
- struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
- struct nouveau_sched_entity *entity = job->entity;
-
- nouveau_sched_entity_qwork(entity, &bind_job->work);
-}
-
static struct nouveau_job_ops nouveau_bind_job_ops = {
.submit = nouveau_uvmm_bind_job_submit,
.armed_submit = nouveau_uvmm_bind_job_armed_submit,
.run = nouveau_uvmm_bind_job_run,
- .free = nouveau_uvmm_bind_job_free_qwork,
+ .free = nouveau_uvmm_bind_job_cleanup,
};
static int
@@ -1607,7 +1599,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
return ret;
INIT_LIST_HEAD(&job->ops);
- INIT_LIST_HEAD(&job->entry);
for (i = 0; i < __args->op.count; i++) {
ret = bind_job_op_from_uop(&op, &__args->op.s[i]);
@@ -1618,11 +1609,12 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
}
init_completion(&job->complete);
- INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn);
- args.sched_entity = __args->sched_entity;
args.file_priv = __args->file_priv;
+ args.sched = __args->sched;
+ args.credits = 1;
+
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
@@ -1648,18 +1640,6 @@ err_free:
return ret;
}
-int
-nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
- void *data,
- struct drm_file *file_priv)
-{
- struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct drm_nouveau_vm_init *init = data;
-
- return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
- init->kernel_managed_size);
-}
-
static int
nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
{
@@ -1760,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
if (ret)
return ret;
- args.sched_entity = &cli->sched_entity;
+ args.sched = &cli->sched;
args.file_priv = file_priv;
ret = nouveau_uvmm_vm_bind(&args);
@@ -1776,15 +1756,18 @@ void
nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem)
{
struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
- drm_gem_for_each_gpuva(va, obj) {
- struct nouveau_uvma *uvma = uvma_from_va(va);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
- nouveau_uvma_map(uvma, mem);
- drm_gpuva_invalidate(va, false);
+ nouveau_uvma_map(uvma, mem);
+ drm_gpuva_invalidate(va, false);
+ }
}
}
@@ -1792,29 +1775,62 @@ void
nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
{
struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
- drm_gem_for_each_gpuva(va, obj) {
- struct nouveau_uvma *uvma = uvma_from_va(va);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
- nouveau_uvma_unmap(uvma);
- drm_gpuva_invalidate(va, true);
+ nouveau_uvma_unmap(uvma);
+ drm_gpuva_invalidate(va, true);
+ }
}
}
+static void
+nouveau_uvmm_free(struct drm_gpuvm *gpuvm)
+{
+ struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm);
+
+ kfree(uvmm);
+}
+
+static int
+nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
+
+ return nouveau_bo_validate(nvbo, true, false);
+}
+
+static const struct drm_gpuvm_ops gpuvm_ops = {
+ .vm_free = nouveau_uvmm_free,
+ .vm_bo_validate = nouveau_uvmm_bo_validate,
+};
+
int
-nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
- u64 kernel_managed_addr, u64 kernel_managed_size)
+nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
{
+ struct nouveau_uvmm *uvmm;
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct drm_device *drm = cli->drm->dev;
+ struct drm_gem_object *r_obj;
+ struct drm_nouveau_vm_init *init = data;
+ u64 kernel_managed_end;
int ret;
- u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
- mutex_init(&uvmm->mutex);
- dma_resv_init(&uvmm->resv);
- mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
- mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
+ if (check_add_overflow(init->kernel_managed_addr,
+ init->kernel_managed_size,
+ &kernel_managed_end))
+ return -EINVAL;
+
+ if (kernel_managed_end > NOUVEAU_VA_SPACE_END)
+ return -EINVAL;
mutex_lock(&cli->mutex);
@@ -1823,39 +1839,48 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
goto out_unlock;
}
- if (kernel_managed_end <= kernel_managed_addr) {
- ret = -EINVAL;
+ uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL);
+ if (!uvmm) {
+ ret = -ENOMEM;
goto out_unlock;
}
- if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
- ret = -EINVAL;
+ r_obj = drm_gpuvm_resv_object_alloc(drm);
+ if (!r_obj) {
+ kfree(uvmm);
+ ret = -ENOMEM;
goto out_unlock;
}
- uvmm->kernel_managed_addr = kernel_managed_addr;
- uvmm->kernel_managed_size = kernel_managed_size;
+ mutex_init(&uvmm->mutex);
+ mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
- drm_gpuvm_init(&uvmm->base, cli->name,
+ drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj,
NOUVEAU_VA_SPACE_START,
NOUVEAU_VA_SPACE_END,
- kernel_managed_addr, kernel_managed_size,
- NULL);
+ init->kernel_managed_addr,
+ init->kernel_managed_size,
+ &gpuvm_ops);
+ /* GPUVM takes care from here on. */
+ drm_gem_object_put(r_obj);
ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
cli->vmm.vmm.object.oclass, RAW,
- kernel_managed_addr, kernel_managed_size,
- NULL, 0, &cli->uvmm.vmm.vmm);
+ init->kernel_managed_addr,
+ init->kernel_managed_size,
+ NULL, 0, &uvmm->vmm.vmm);
if (ret)
- goto out_free_gpuva_mgr;
+ goto out_gpuvm_fini;
- cli->uvmm.vmm.cli = cli;
+ uvmm->vmm.cli = cli;
+ cli->uvmm.ptr = uvmm;
mutex_unlock(&cli->mutex);
return 0;
-out_free_gpuva_mgr:
- drm_gpuvm_destroy(&uvmm->base);
+out_gpuvm_fini:
+ drm_gpuvm_put(&uvmm->base);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
@@ -1867,15 +1892,8 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
MA_STATE(mas, &uvmm->region_mt, 0, 0);
struct nouveau_uvma_region *reg;
struct nouveau_cli *cli = uvmm->vmm.cli;
- struct nouveau_sched_entity *entity = &cli->sched_entity;
struct drm_gpuva *va, *next;
- if (!cli)
- return;
-
- rmb(); /* for list_empty to work without lock */
- wait_event(entity->job.wq, list_empty(&entity->job.list.head));
-
nouveau_uvmm_lock(uvmm);
drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
struct nouveau_uvma *uvma = uvma_from_va(va);
@@ -1910,8 +1928,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
mutex_lock(&cli->mutex);
nouveau_vmm_fini(&uvmm->vmm);
- drm_gpuvm_destroy(&uvmm->base);
+ drm_gpuvm_put(&uvmm->base);
mutex_unlock(&cli->mutex);
-
- dma_resv_fini(&uvmm->resv);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
index a308c59760a5..9d3c348581eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -12,12 +12,6 @@ struct nouveau_uvmm {
struct nouveau_vmm vmm;
struct maple_tree region_mt;
struct mutex mutex;
- struct dma_resv resv;
-
- u64 kernel_managed_addr;
- u64 kernel_managed_size;
-
- bool disabled;
};
struct nouveau_uvma_region {
@@ -50,8 +44,6 @@ struct nouveau_uvmm_bind_job {
struct nouveau_job base;
struct kref kref;
- struct list_head entry;
- struct work_struct work;
struct completion complete;
/* struct bind_job_op */
@@ -60,7 +52,7 @@ struct nouveau_uvmm_bind_job {
struct nouveau_uvmm_bind_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
+ struct nouveau_sched *sched;
unsigned int flags;
@@ -82,8 +74,6 @@ struct nouveau_uvmm_bind_job_args {
#define to_uvmm_bind_job(job) container_of((job), struct nouveau_uvmm_bind_job, base)
-int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
- u64 kernel_managed_addr, u64 kernel_managed_size);
void nouveau_uvmm_fini(struct nouveau_uvmm *uvmm);
void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbov, struct nouveau_mem *mem);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 5b71a5a5cd85..cdbc75e3d1f6 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -39,7 +39,7 @@ struct nv04_fence_priv {
static int
nv04_fence_emit(struct nouveau_fence *fence)
{
- struct nvif_push *push = fence->channel->chan.push;
+ struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/event.c b/drivers/gpu/drm/nouveau/nvkm/core/event.c
index a6c877135598..61fed7792e41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/event.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/event.c
@@ -81,17 +81,17 @@ nvkm_event_ntfy_state(struct nvkm_event_ntfy *ntfy)
static void
nvkm_event_ntfy_remove(struct nvkm_event_ntfy *ntfy)
{
- spin_lock_irq(&ntfy->event->list_lock);
+ write_lock_irq(&ntfy->event->list_lock);
list_del_init(&ntfy->head);
- spin_unlock_irq(&ntfy->event->list_lock);
+ write_unlock_irq(&ntfy->event->list_lock);
}
static void
nvkm_event_ntfy_insert(struct nvkm_event_ntfy *ntfy)
{
- spin_lock_irq(&ntfy->event->list_lock);
+ write_lock_irq(&ntfy->event->list_lock);
list_add_tail(&ntfy->head, &ntfy->event->ntfy);
- spin_unlock_irq(&ntfy->event->list_lock);
+ write_unlock_irq(&ntfy->event->list_lock);
}
static void
@@ -176,7 +176,7 @@ nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits)
return;
nvkm_trace(event->subdev, "event: ntfy %08x on %d\n", bits, id);
- spin_lock_irqsave(&event->list_lock, flags);
+ read_lock_irqsave(&event->list_lock, flags);
list_for_each_entry_safe(ntfy, ntmp, &event->ntfy, head) {
if (ntfy->id == id && ntfy->bits & bits) {
@@ -185,7 +185,7 @@ nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits)
}
}
- spin_unlock_irqrestore(&event->list_lock, flags);
+ read_unlock_irqrestore(&event->list_lock, flags);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index 87a62d4ff4bd..7d4716dcd512 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -24,7 +24,6 @@
#include "chan.h"
#include "chid.h"
#include "cgrp.h"
-#include "chid.h"
#include "runl.h"
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
index 3adbb05ff587..d088e636edc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
@@ -539,7 +539,7 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
struct nvkm_runl *runl;
struct nvkm_engn *engn;
u32 cgids = 2048;
- u32 chids = 2048 / CHID_PER_USERD;
+ u32 chids = 2048;
int ret;
NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index e31f9641114b..dc44f5c7833f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -689,8 +689,8 @@ r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
struct nvfw_gsp_rpc *rpc;
rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64)));
- if (!rpc)
- return NULL;
+ if (IS_ERR(rpc))
+ return ERR_CAST(rpc);
rpc->header_version = 0x03000000;
rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
@@ -1159,7 +1159,7 @@ static void
r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
MUX_METHOD_DATA_ELEMENT *part)
{
- acpi_handle iter = NULL, handle_mux;
+ acpi_handle iter = NULL, handle_mux = NULL;
acpi_status status;
unsigned long long value;
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index c26aab4939fa..993691b3cc7e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -22,11 +22,11 @@
#include <linux/hardirq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/component.h>
#include <linux/sys_soc.h>
#include <drm/drm_fourcc.h>
@@ -4765,7 +4765,7 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
if (soc)
dispc->feat = soc->data;
else
- dispc->feat = of_match_device(dispc_of_match, &pdev->dev)->data;
+ dispc->feat = device_get_match_data(&pdev->dev);
r = dispc_errata_i734_wa_init(dispc);
if (r)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 02955f976845..988888e164d7 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -22,12 +22,13 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/gfp.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/suspend.h>
@@ -1445,7 +1446,7 @@ static int dss_probe(struct platform_device *pdev)
if (soc)
dss->feat = soc->data;
else
- dss->feat = of_match_device(dss_of_match, &pdev->dev)->data;
+ dss->feat = device_get_match_data(&pdev->dev);
/* Map I/O registers, get and setup clocks. */
dss->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index b2835b3ea6f5..6598c9c08ba1 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -69,7 +69,6 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
struct omap_drm_private *priv = dev->dev_private;
- bool fence_cookie = dma_fence_begin_signalling();
dispc_runtime_get(priv->dispc);
@@ -92,6 +91,8 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
omap_atomic_wait_for_completion(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_hw_done(old_state);
} else {
/*
* OMAP3 DSS seems to have issues with the work-around above,
@@ -101,11 +102,9 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_planes(dev, old_state, 0);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
- }
- drm_atomic_helper_commit_hw_done(old_state);
-
- dma_fence_end_signalling(fence_cookie);
+ drm_atomic_helper_commit_hw_done(old_state);
+ }
/*
* Wait for completion of the page flips to ensure that old buffers
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index c48fa531ca32..3421e8389222 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -48,7 +48,7 @@ struct omap_gem_object {
* OMAP_BO_MEM_DMA_API flag set)
*
* - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
- * if they are physically contiguous (when sgt->orig_nents == 1)
+ * if they are physically contiguous
*
* - buffers mapped through the TILER when pin_cnt is not zero, in which
* case the DMA address points to the TILER aperture
@@ -148,12 +148,18 @@ u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
return drm_vma_node_offset_addr(&obj->vma_node);
}
+static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size)
+{
+ return !(drm_prime_get_contiguous_size(sgt) < size);
+}
+
static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
{
if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
return true;
- if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
+ if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) &&
+ omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size))
return true;
return false;
@@ -1385,7 +1391,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
union omap_gem_size gsize;
/* Without a DMM only physically contiguous buffers can be supported. */
- if (sgt->orig_nents != 1 && !priv->has_dmm)
+ if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm)
return ERR_PTR(-EINVAL);
gsize.bytes = PAGE_ALIGN(size);
@@ -1399,7 +1405,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
omap_obj->sgt = sgt;
- if (sgt->orig_nents == 1) {
+ if (omap_gem_sgt_is_contiguous(sgt, size)) {
omap_obj->dma_addr = sg_dma_address(sgt->sgl);
} else {
/* Create pages list from sgt */
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 9323e7b9e384..be8f48e3c1db 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1709,6 +1709,7 @@ static const struct panel_desc auo_b101uan08_3_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = auo_b101uan08_3_init_cmd,
+ .lp11_before_reset = true,
};
static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
@@ -1766,11 +1767,11 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
};
static const struct drm_display_mode starry_himax83102_j02_default_mode = {
- .clock = 161600,
+ .clock = 162850,
.hdisplay = 1200,
- .hsync_start = 1200 + 40,
- .hsync_end = 1200 + 40 + 20,
- .htotal = 1200 + 40 + 20 + 40,
+ .hsync_start = 1200 + 50,
+ .hsync_end = 1200 + 50 + 20,
+ .htotal = 1200 + 50 + 20 + 50,
.vdisplay = 1920,
.vsync_start = 1920 + 116,
.vsync_end = 1920 + 116 + 8,
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 95c8472d878a..a0b6f69b916f 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -203,6 +203,9 @@ struct edp_panel_entry {
/** @name: Name of this panel (for printing to logs). */
const char *name;
+
+ /** @override_edid_mode: Override the mode obtained by edid. */
+ const struct drm_display_mode *override_edid_mode;
};
struct panel_edp {
@@ -301,6 +304,24 @@ static unsigned int panel_edp_get_display_modes(struct panel_edp *panel,
return num;
}
+static int panel_edp_override_edid_mode(struct panel_edp *panel,
+ struct drm_connector *connector,
+ const struct drm_display_mode *override_mode)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, override_mode);
+ if (!mode) {
+ dev_err(panel->base.dev, "failed to add additional mode\n");
+ return 0;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
static int panel_edp_get_non_edid_modes(struct panel_edp *panel,
struct drm_connector *connector)
{
@@ -568,6 +589,10 @@ static int panel_edp_get_modes(struct drm_panel *panel,
{
struct panel_edp *p = to_panel_edp(panel);
int num = 0;
+ bool has_hard_coded_modes = p->desc->num_timings || p->desc->num_modes;
+ bool has_override_edid_mode = p->detected_panel &&
+ p->detected_panel != ERR_PTR(-EINVAL) &&
+ p->detected_panel->override_edid_mode;
/* probe EDID if a DDC bus is available */
if (p->ddc) {
@@ -575,20 +600,28 @@ static int panel_edp_get_modes(struct drm_panel *panel,
if (!p->edid)
p->edid = drm_get_edid(connector, p->ddc);
-
- if (p->edid)
- num += drm_add_edid_modes(connector, p->edid);
+ /*
+ * If both edid and hard-coded modes exists, skip edid modes to
+ * avoid multiple preferred modes.
+ */
+ if (p->edid && !has_hard_coded_modes) {
+ if (has_override_edid_mode) {
+ /*
+ * override_edid_mode is specified. Use
+ * override_edid_mode instead of from edid.
+ */
+ num += panel_edp_override_edid_mode(p, connector,
+ p->detected_panel->override_edid_mode);
+ } else {
+ num += drm_add_edid_modes(connector, p->edid);
+ }
+ }
pm_runtime_mark_last_busy(panel->dev);
pm_runtime_put_autosuspend(panel->dev);
}
- /*
- * Add hard-coded panel modes. Don't call this if there are no timings
- * and no modes (the generic edp-panel case) because it will clobber
- * the display_info that was already set by drm_add_edid_modes().
- */
- if (p->desc->num_timings || p->desc->num_modes)
+ if (has_hard_coded_modes)
num += panel_edp_get_non_edid_modes(p, connector);
else if (!num)
dev_warn(p->base.dev, "No display modes\n");
@@ -950,6 +983,19 @@ static const struct panel_desc auo_b101ean01 = {
},
};
+static const struct drm_display_mode auo_b116xa3_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
static const struct drm_display_mode auo_b116xak01_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -973,6 +1019,8 @@ static const struct panel_desc auo_b116xak01 = {
},
.delay = {
.hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
},
};
@@ -1801,6 +1849,12 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_200_500_e80 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 80,
+};
+
static const struct panel_delay delay_200_500_e80_d50 = {
.hpd_absent = 200,
.unprepare = 500,
@@ -1820,6 +1874,19 @@ static const struct panel_delay delay_200_500_e200 = {
.enable = 200,
};
+static const struct panel_delay delay_200_500_e200_d10 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 200,
+ .disable = 10,
+};
+
+static const struct panel_delay delay_200_150_e200 = {
+ .hpd_absent = 200,
+ .unprepare = 150,
+ .enable = 200,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.name = _name, \
@@ -1828,6 +1895,15 @@ static const struct panel_delay delay_200_500_e200 = {
.delay = _delay \
}
+#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \
+{ \
+ .name = _name, \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ .delay = _delay, \
+ .override_edid_mode = _mode \
+}
+
/*
* This table is used to figure out power sequencing delays for panels that
* are detected by EDID. Entries here may point to entries in the
@@ -1840,36 +1916,76 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
- EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x208d, &delay_200_500_e50, "B140HTN02.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
+ EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
+ &auo_b116xa3_mode),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
- EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY2('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1",
+ &auo_b116xa3_mode),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0715, &delay_200_150_e200, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0731, &delay_200_500_e80, "NT116WHM-N42"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0741, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07f6, &delay_200_500_e200, "NT140FHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x08b2, &delay_200_500_e200, "NT140WHM-N49"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09c3, &delay_200_500_e50, "NT116WHM-N21,836X2"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0951, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ae, &delay_200_500_e200, "NT140FHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1138, &innolux_n116bca_ea1.delay, "N116BCA-EA1-RC4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1139, &delay_200_500_e80_d50, "N116BGE-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1145, &delay_200_500_e80_d50, "N116BCN-EB1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+
+ EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"),
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x048e, &delay_200_500_e200_d10, "M116NWR6 R5"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x854a, &delay_200_500_p2e100, "M133NW4J"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"),
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x8c4d, &delay_200_150_e200, "R140NWFM R1"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
+
+ EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"),
+
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index e7be15b68102..00791ea81e90 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Elida kd35t133 5.5" MIPI-DSI panel driver
+ * Elida kd35t133 3.5" MIPI-DSI panel driver
* Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
*
* based on
@@ -43,7 +43,6 @@ struct kd35t133 {
struct regulator *vdd;
struct regulator *iovcc;
enum drm_panel_orientation orientation;
- bool prepared;
};
static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
@@ -91,9 +90,6 @@ static int kd35t133_unprepare(struct drm_panel *panel)
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
dev_err(ctx->dev, "failed to set display off: %d\n", ret);
@@ -104,11 +100,11 @@ static int kd35t133_unprepare(struct drm_panel *panel)
return ret;
}
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vdd);
- ctx->prepared = false;
-
return 0;
}
@@ -118,9 +114,6 @@ static int kd35t133_prepare(struct drm_panel *panel)
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
- if (ctx->prepared)
- return 0;
-
dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vdd);
if (ret < 0) {
@@ -164,8 +157,6 @@ static int kd35t133_prepare(struct drm_panel *panel)
msleep(50);
- ctx->prepared = true;
-
return 0;
disable_iovcc:
@@ -209,11 +200,6 @@ static int kd35t133_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
- /*
- * TODO: Remove once all drm drivers call
- * drm_connector_set_orientation_from_panel()
- */
- drm_connector_set_panel_orientation(connector, ctx->orientation);
return 1;
}
@@ -299,27 +285,11 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
-{
- struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
- int ret;
-
- ret = drm_panel_unprepare(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
-
- ret = drm_panel_disable(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
-}
-
static void kd35t133_remove(struct mipi_dsi_device *dsi)
{
struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
- kd35t133_shutdown(dsi);
-
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
@@ -340,7 +310,6 @@ static struct mipi_dsi_driver kd35t133_driver = {
},
.probe = kd35t133_probe,
.remove = kd35t133_remove,
- .shutdown = kd35t133_shutdown,
};
module_mipi_dsi_driver(kd35t133_driver);
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index c73243d85de7..ff0dc08b9829 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -38,6 +38,7 @@
#define HX8394_CMD_SETMIPI 0xba
#define HX8394_CMD_SETOTP 0xbb
#define HX8394_CMD_SETREGBANK 0xbd
+#define HX8394_CMD_UNKNOWN5 0xbf
#define HX8394_CMD_UNKNOWN1 0xc0
#define HX8394_CMD_SETDGCLUT 0xc1
#define HX8394_CMD_SETID 0xc3
@@ -52,6 +53,7 @@
#define HX8394_CMD_SETGIP1 0xd5
#define HX8394_CMD_SETGIP2 0xd6
#define HX8394_CMD_SETGPO 0xd6
+#define HX8394_CMD_UNKNOWN4 0xd8
#define HX8394_CMD_SETSCALING 0xdd
#define HX8394_CMD_SETIDLE 0xdf
#define HX8394_CMD_SETGAMMA 0xe0
@@ -68,7 +70,7 @@ struct hx8394 {
struct gpio_desc *reset_gpio;
struct regulator *vcc;
struct regulator *iovcc;
- bool prepared;
+ enum drm_panel_orientation orientation;
const struct hx8394_panel_desc *desc;
};
@@ -203,6 +205,140 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = {
.init_sequence = hsd060bhw4_init_sequence,
};
+static int powkiddy_x55_init_sequence(struct hx8394 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* 5.19.8 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
+
+ /* 5.19.9 SETMIPI: Set MIPI control (BAh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
+
+ /* 5.19.3 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
+
+ /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
+ 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
+ 0x86);
+
+ /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
+
+ /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
+ 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
+ 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
+
+ /* 5.19.20 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
+ 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
+ 0x18, 0x18, 0x18, 0x18);
+
+ /* 5.19.21 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+ 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
+ 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
+ 0x18, 0x18, 0x18, 0x18);
+
+ /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
+ 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
+ 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
+ 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
+ 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
+
+ /* 5.19.17 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
+ 0x0b);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+ 0x02);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x02);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5,
+ 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
+ 0xed);
+
+ return 0;
+}
+
+static const struct drm_display_mode powkiddy_x55_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 44,
+ .hsync_end = 720 + 44 + 20,
+ .htotal = 720 + 44 + 20 + 20,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 12,
+ .vsync_end = 1280 + 12 + 10,
+ .vtotal = 1280 + 12 + 10 + 10,
+ .clock = 63290,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 67,
+ .height_mm = 121,
+};
+
+static const struct hx8394_panel_desc powkiddy_x55_desc = {
+ .mode = &powkiddy_x55_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = powkiddy_x55_init_sequence,
+};
+
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
@@ -262,16 +398,11 @@ static int hx8394_unprepare(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
- if (!ctx->prepared)
- return 0;
-
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vcc);
- ctx->prepared = false;
-
return 0;
}
@@ -280,9 +411,6 @@ static int hx8394_prepare(struct drm_panel *panel)
struct hx8394 *ctx = panel_to_hx8394(panel);
int ret;
- if (ctx->prepared)
- return 0;
-
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
ret = regulator_enable(ctx->vcc);
@@ -301,8 +429,6 @@ static int hx8394_prepare(struct drm_panel *panel)
msleep(180);
- ctx->prepared = true;
-
return 0;
disable_vcc:
@@ -335,12 +461,20 @@ static int hx8394_get_modes(struct drm_panel *panel,
return 1;
}
+static enum drm_panel_orientation hx8394_get_orientation(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+
+ return ctx->orientation;
+}
+
static const struct drm_panel_funcs hx8394_drm_funcs = {
.disable = hx8394_disable,
.unprepare = hx8394_unprepare,
.prepare = hx8394_prepare,
.enable = hx8394_enable,
.get_modes = hx8394_get_modes,
+ .get_orientation = hx8394_get_orientation,
};
static int hx8394_probe(struct mipi_dsi_device *dsi)
@@ -358,6 +492,12 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset gpio\n");
+ ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
+ if (ret < 0) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret);
+ return ret;
+ }
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
@@ -401,27 +541,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static void hx8394_shutdown(struct mipi_dsi_device *dsi)
-{
- struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
- int ret;
-
- ret = drm_panel_disable(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
-
- ret = drm_panel_unprepare(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
-}
-
static void hx8394_remove(struct mipi_dsi_device *dsi)
{
struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
- hx8394_shutdown(dsi);
-
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
@@ -431,6 +555,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
+ { .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, hx8394_of_match);
@@ -438,7 +563,6 @@ MODULE_DEVICE_TABLE(of, hx8394_of_match);
static struct mipi_dsi_driver hx8394_driver = {
.probe = hx8394_probe,
.remove = hx8394_remove,
- .shutdown = hx8394_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = hx8394_of_match,
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 7838947a1bf3..2ffe5f68a890 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -830,6 +830,203 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(0),
};
+static const struct ili9881c_instr am8001280g_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0xD3),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x07, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x10, 0x01),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x06),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x03),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x03),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x00),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+
+ ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x00),
+ ILI9881C_COMMAND_INSTR(0x61, 0x01),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x65, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x66, 0x06),
+ ILI9881C_COMMAND_INSTR(0x67, 0x07),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x08),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x02),
+ ILI9881C_COMMAND_INSTR(0x76, 0x00),
+ ILI9881C_COMMAND_INSTR(0x77, 0x01),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x80, 0x08),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x02),
+ ILI9881C_COMMAND_INSTR(0x83, 0x02),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x30),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x15),
+ ILI9881C_COMMAND_INSTR(0x3a, 0xa4),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x31, 0x0B),
+ ILI9881C_COMMAND_INSTR(0x50, 0xa5),
+ ILI9881C_COMMAND_INSTR(0x51, 0xa0),
+ ILI9881C_COMMAND_INSTR(0x53, 0x70),
+ ILI9881C_COMMAND_INSTR(0x55, 0x7A),
+ ILI9881C_COMMAND_INSTR(0x60, 0x14),
+
+ ILI9881C_COMMAND_INSTR(0xA0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x53),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x50),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x20),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x27),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x33),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x25),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x25),
+ ILI9881C_COMMAND_INSTR(0xA8, 0xD4),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x2B),
+ ILI9881C_COMMAND_INSTR(0xAB, 0xB5),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x19),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x53),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x25),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x62),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x6A),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x31),
+
+ ILI9881C_COMMAND_INSTR(0xC0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x53),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x50),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x20),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x27),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x33),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x25),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x25),
+ ILI9881C_COMMAND_INSTR(0xC8, 0xD4),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x2B),
+ ILI9881C_COMMAND_INSTR(0xCB, 0xB5),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x19),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x53),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x25),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x62),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x6A),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x31),
+ ILI9881C_SWITCH_PAGE_INSTR(0),
+ ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c),
+ ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00),
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -1014,6 +1211,23 @@ static const struct drm_display_mode w552946aba_default_mode = {
.height_mm = 121,
};
+static const struct drm_display_mode am8001280g_default_mode = {
+ .clock = 67911,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 20,
+ .hsync_end = 800 + 20 + 32,
+ .htotal = 800 + 20 + 32 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 6,
+ .vsync_end = 1280 + 6 + 8,
+ .vtotal = 1280 + 6 + 8 + 4,
+
+ .width_mm = 94,
+ .height_mm = 151,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1094,6 +1308,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
return ret;
}
+ ctx->panel.prepare_prev_first = true;
+
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -1145,11 +1361,20 @@ static const struct ili9881c_desc w552946aba_desc = {
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
};
+static const struct ili9881c_desc am8001280g_desc = {
+ .init = am8001280g_init,
+ .init_length = ARRAY_SIZE(am8001280g_init),
+ .mode = &am8001280g_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
+ { .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
{ }
};
MODULE_DEVICE_TABLE(of, ili9881c_of_match);
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index 79de6c886292..94d89ffd596b 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -28,6 +28,7 @@ struct nv3051d_panel_info {
unsigned int num_modes;
u16 width_mm, height_mm;
u32 bus_flags;
+ u32 mode_flags;
};
struct panel_nv3051d {
@@ -261,6 +262,8 @@ static int panel_nv3051d_unprepare(struct drm_panel *panel)
usleep_range(10000, 15000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
regulator_disable(ctx->vdd);
return 0;
@@ -385,15 +388,7 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
-
- /*
- * The panel in the RG351V is identical to the 353P, except it
- * requires MIPI_DSI_CLOCK_NON_CONTINUOUS to operate correctly.
- */
- if (of_device_is_compatible(dev->of_node, "anbernic,rg351v-panel"))
- dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ dsi->mode_flags = ctx->panel_info->mode_flags;
drm_panel_init(&ctx->panel, &dsi->dev, &panel_nv3051d_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -481,16 +476,56 @@ static const struct drm_display_mode nv3051d_rgxx3_modes[] = {
},
};
-static const struct nv3051d_panel_info nv3051d_rgxx3_info = {
+static const struct drm_display_mode nv3051d_rk2023_modes[] = {
+ {
+ .hdisplay = 640,
+ .hsync_start = 640 + 40,
+ .hsync_end = 640 + 40 + 2,
+ .htotal = 640 + 40 + 2 + 80,
+ .vdisplay = 480,
+ .vsync_start = 480 + 18,
+ .vsync_end = 480 + 18 + 2,
+ .vtotal = 480 + 18 + 2 + 4,
+ .clock = 24150,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct nv3051d_panel_info nv3051d_rg351v_info = {
.display_modes = nv3051d_rgxx3_modes,
.num_modes = ARRAY_SIZE(nv3051d_rgxx3_modes),
.width_mm = 70,
.height_mm = 57,
.bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS,
+};
+
+static const struct nv3051d_panel_info nv3051d_rg353p_info = {
+ .display_modes = nv3051d_rgxx3_modes,
+ .num_modes = ARRAY_SIZE(nv3051d_rgxx3_modes),
+ .width_mm = 70,
+ .height_mm = 57,
+ .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+};
+
+static const struct nv3051d_panel_info nv3051d_rk2023_info = {
+ .display_modes = nv3051d_rk2023_modes,
+ .num_modes = ARRAY_SIZE(nv3051d_rk2023_modes),
+ .width_mm = 70,
+ .height_mm = 57,
+ .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
};
static const struct of_device_id newvision_nv3051d_of_match[] = {
- { .compatible = "newvision,nv3051d", .data = &nv3051d_rgxx3_info },
+ { .compatible = "anbernic,rg351v-panel", .data = &nv3051d_rg351v_info },
+ { .compatible = "anbernic,rg353p-panel", .data = &nv3051d_rg353p_info },
+ { .compatible = "powkiddy,rk2023-panel", .data = &nv3051d_rk2023_info },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, newvision_nv3051d_of_match);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index d6dceb858008..83a9cf53d269 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -1023,7 +1023,7 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.hdisplay = 480,
.hsync_start = 480 + 2, /* HFP = 2 */
.hsync_end = 480 + 2 + 0, /* HSync = 0 */
- .htotal = 480 + 2 + 0 + 5, /* HFP = 5 */
+ .htotal = 480 + 2 + 0 + 5, /* HBP = 5 */
.vdisplay = 800,
.vsync_start = 800 + 2, /* VFP = 2 */
.vsync_end = 800 + 2 + 0, /* VSync = 0 */
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 6cd32b909087..8017ad33cf18 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1324,6 +1324,35 @@ static const struct panel_desc bananapi_s070wv20_ct16 = {
},
};
+static const struct drm_display_mode boe_bp101wx1_100_mode = {
+ .clock = 78945,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 0,
+ .hsync_end = 1280 + 0 + 2,
+ .htotal = 1280 + 62 + 0 + 2,
+ .vdisplay = 800,
+ .vsync_start = 800 + 8,
+ .vsync_end = 800 + 8 + 2,
+ .vtotal = 800 + 6 + 8 + 2,
+};
+
+static const struct panel_desc boe_bp101wx1_100 = {
+ .modes = &boe_bp101wx1_100_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing boe_ev121wxm_n10_1850_timing = {
.pixelclock = { 69922000, 71000000, 72293000 },
.hactive = { 1280, 1280, 1280 },
@@ -1973,6 +2002,33 @@ static const struct panel_desc eink_vb3300_kca = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing evervision_vgg644804_timing = {
+ .pixelclock = { 25175000, 25175000, 25175000 },
+ .hactive = { 640, 640, 640 },
+ .hfront_porch = { 16, 16, 16 },
+ .hback_porch = { 82, 114, 170 },
+ .hsync_len = { 5, 30, 30 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 10, 10, 10 },
+ .vback_porch = { 30, 32, 34 },
+ .vsync_len = { 1, 3, 5 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc evervision_vgg644804 = {
+ .timings = &evervision_vgg644804_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 115,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+};
+
static const struct display_timing evervision_vgg804821_timing = {
.pixelclock = { 27600000, 33300000, 50000000 },
.hactive = { 800, 800, 800 },
@@ -2379,13 +2435,13 @@ static const struct panel_desc innolux_g070y2_t02 = {
static const struct display_timing innolux_g101ice_l01_timing = {
.pixelclock = { 60400000, 71100000, 74700000 },
.hactive = { 1280, 1280, 1280 },
- .hfront_porch = { 41, 80, 100 },
- .hback_porch = { 40, 79, 99 },
- .hsync_len = { 1, 1, 1 },
+ .hfront_porch = { 30, 60, 70 },
+ .hback_porch = { 30, 60, 70 },
+ .hsync_len = { 22, 40, 60 },
.vactive = { 800, 800, 800 },
- .vfront_porch = { 5, 11, 14 },
- .vback_porch = { 4, 11, 14 },
- .vsync_len = { 1, 1, 1 },
+ .vfront_porch = { 3, 8, 14 },
+ .vback_porch = { 3, 8, 14 },
+ .vsync_len = { 4, 7, 12 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
@@ -2402,6 +2458,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -4253,6 +4310,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "bananapi,s070wv20-ct16",
.data = &bananapi_s070wv20_ct16,
}, {
+ .compatible = "boe,bp101wx1-100",
+ .data = &boe_bp101wx1_100,
+ }, {
.compatible = "boe,ev121wxm-n10-1850",
.data = &boe_ev121wxm_n10_1850,
}, {
@@ -4334,6 +4394,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "eink,vb3300-kca",
.data = &eink_vb3300_kca,
}, {
+ .compatible = "evervision,vgg644804",
+ .data = &evervision_vgg644804,
+ }, {
.compatible = "evervision,vgg804821",
.data = &evervision_vgg804821,
}, {
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 28f7046e1b1a..a45e4addcc19 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -403,7 +403,7 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
panfrost_job_enable_interrupts(pfdev);
}
-static int panfrost_device_resume(struct device *dev)
+static int panfrost_device_runtime_resume(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
@@ -413,7 +413,7 @@ static int panfrost_device_resume(struct device *dev)
return 0;
}
-static int panfrost_device_suspend(struct device *dev)
+static int panfrost_device_runtime_suspend(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
@@ -421,10 +421,83 @@ static int panfrost_device_suspend(struct device *dev)
return -EBUSY;
panfrost_devfreq_suspend(pfdev);
+ panfrost_job_suspend_irq(pfdev);
+ panfrost_mmu_suspend_irq(pfdev);
+ panfrost_gpu_suspend_irq(pfdev);
panfrost_gpu_power_off(pfdev);
return 0;
}
-EXPORT_GPL_RUNTIME_DEV_PM_OPS(panfrost_pm_ops, panfrost_device_suspend,
- panfrost_device_resume, NULL);
+static int panfrost_device_resume(struct device *dev)
+{
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF)) {
+ unsigned long freq = pfdev->pfdevfreq.fast_rate;
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ dev_pm_opp_set_opp(dev, opp);
+ dev_pm_opp_put(opp);
+ }
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) {
+ ret = clk_enable(pfdev->clock);
+ if (ret)
+ goto err_clk;
+
+ if (pfdev->bus_clock) {
+ ret = clk_enable(pfdev->bus_clock);
+ if (ret)
+ goto err_bus_clk;
+ }
+ }
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ goto err_resume;
+
+ return 0;
+
+err_resume:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS) && pfdev->bus_clock)
+ clk_disable(pfdev->bus_clock);
+err_bus_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS))
+ clk_disable(pfdev->clock);
+err_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF))
+ dev_pm_opp_set_opp(dev, NULL);
+ return ret;
+}
+
+static int panfrost_device_suspend(struct device *dev)
+{
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) {
+ if (pfdev->bus_clock)
+ clk_disable(pfdev->bus_clock);
+
+ clk_disable(pfdev->clock);
+ }
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF))
+ dev_pm_opp_set_opp(dev, NULL);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(panfrost_pm_ops) = {
+ RUNTIME_PM_OPS(panfrost_device_runtime_suspend, panfrost_device_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(panfrost_device_suspend, panfrost_device_resume)
+};
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 1ef38f60d5dc..62f7e3527385 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -25,6 +25,23 @@ struct panfrost_perfcnt;
#define NUM_JOB_SLOTS 3
#define MAX_PM_DOMAINS 5
+enum panfrost_drv_comp_bits {
+ PANFROST_COMP_BIT_GPU,
+ PANFROST_COMP_BIT_JOB,
+ PANFROST_COMP_BIT_MMU,
+ PANFROST_COMP_BIT_MAX
+};
+
+/**
+ * enum panfrost_gpu_pm - Supported kernel power management features
+ * @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend
+ * @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend
+ */
+enum panfrost_gpu_pm {
+ GPU_PM_CLK_DIS,
+ GPU_PM_VREG_OFF,
+};
+
struct panfrost_features {
u16 id;
u16 revision;
@@ -75,12 +92,17 @@ struct panfrost_compatible {
/* Vendor implementation quirks callback */
void (*vendor_quirk)(struct panfrost_device *pfdev);
+
+ /* Allowed PM features */
+ u8 pm_features;
};
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
struct platform_device *pdev;
+ int gpu_irq;
+ int mmu_irq;
void __iomem *iomem;
struct clk *clock;
@@ -94,6 +116,7 @@ struct panfrost_device {
struct panfrost_features features;
const struct panfrost_compatible *comp;
+ DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX);
spinlock_t as_lock;
unsigned long as_in_use_mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 7cabf4e3d1f2..a926d71e8131 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -274,7 +274,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&job->base,
&file_priv->sched_entity[slot],
- NULL);
+ 1, NULL);
if (ret)
goto out_put_job;
@@ -734,6 +734,7 @@ static const struct panfrost_compatible mediatek_mt8183_b_data = {
.supply_names = mediatek_mt8183_b_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" };
@@ -742,6 +743,7 @@ static const struct panfrost_compatible mediatek_mt8186_data = {
.supply_names = mediatek_mt8183_b_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains),
.pm_domain_names = mediatek_mt8186_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
@@ -752,6 +754,7 @@ static const struct panfrost_compatible mediatek_mt8192_data = {
.supply_names = mediatek_mt8192_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
.pm_domain_names = mediatek_mt8192_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
static const struct of_device_id dt_match[] = {
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index e7942ac449c6..47751302f1bc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -220,16 +220,8 @@ void panfrost_core_dump(struct panfrost_job *job)
iter.hdr->bomap.data[0] = bomap - bomap_start;
- for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
- struct page *page = sg_page_iter_page(&page_iter);
-
- if (!IS_ERR(page)) {
- *bomap++ = page_to_phys(page);
- } else {
- dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
- *bomap++ = 0;
- }
- }
+ for_each_sgtable_page(bo->base.sgt, &page_iter, 0)
+ *bomap++ = page_to_phys(sg_page_iter_page(&page_iter));
iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index f0be7e19b13e..9063ce254642 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -22,9 +22,13 @@
static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
- u32 state = gpu_read(pfdev, GPU_INT_STAT);
- u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+ u32 fault_status, state;
+ if (test_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended))
+ return IRQ_NONE;
+
+ fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+ state = gpu_read(pfdev, GPU_INT_STAT);
if (!state)
return IRQ_NONE;
@@ -60,18 +64,32 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
gpu_write(pfdev, GPU_INT_MASK, 0);
gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
- gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
+ clear_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended);
+
+ gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
- val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
+ val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000);
if (ret) {
- dev_err(pfdev->dev, "gpu soft reset timed out\n");
- return ret;
+ dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n");
+
+ gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val,
+ val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
+ if (ret) {
+ dev_err(pfdev->dev, "gpu hard reset timed out\n");
+ return ret;
+ }
}
gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
- gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
+
+ /* Only enable the interrupts we care about */
+ gpu_write(pfdev, GPU_INT_MASK,
+ GPU_IRQ_MASK_ERROR |
+ GPU_IRQ_PERFCNT_SAMPLE_COMPLETED |
+ GPU_IRQ_CLEAN_CACHES_COMPLETED);
/*
* All in-flight jobs should have released their cycle
@@ -362,32 +380,42 @@ unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev)
return ((u64)hi << 32) | lo;
}
+static u64 panfrost_get_core_mask(struct panfrost_device *pfdev)
+{
+ u64 core_mask;
+
+ if (pfdev->features.l2_present == 1)
+ return U64_MAX;
+
+ /*
+ * Only support one core group now.
+ * ~(l2_present - 1) unsets all bits in l2_present except
+ * the bottom bit. (l2_present - 2) has all the bits in
+ * the first core group set. AND them together to generate
+ * a mask of cores in the first core group.
+ */
+ core_mask = ~(pfdev->features.l2_present - 1) &
+ (pfdev->features.l2_present - 2);
+ dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
+ hweight64(core_mask),
+ hweight64(pfdev->features.shader_present));
+
+ return core_mask;
+}
+
void panfrost_gpu_power_on(struct panfrost_device *pfdev)
{
int ret;
u32 val;
- u64 core_mask = U64_MAX;
+ u64 core_mask;
panfrost_gpu_init_quirks(pfdev);
+ core_mask = panfrost_get_core_mask(pfdev);
- if (pfdev->features.l2_present != 1) {
- /*
- * Only support one core group now.
- * ~(l2_present - 1) unsets all bits in l2_present except
- * the bottom bit. (l2_present - 2) has all the bits in
- * the first core group set. AND them together to generate
- * a mask of cores in the first core group.
- */
- core_mask = ~(pfdev->features.l2_present - 1) &
- (pfdev->features.l2_present - 2);
- dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
- hweight64(core_mask),
- hweight64(pfdev->features.shader_present));
- }
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
val, val == (pfdev->features.l2_present & core_mask),
- 100, 20000);
+ 10, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu L2");
@@ -395,27 +423,52 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
pfdev->features.shader_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
val, val == (pfdev->features.shader_present & core_mask),
- 100, 20000);
+ 10, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu shader");
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
- val, val == pfdev->features.tiler_present, 100, 1000);
+ val, val == pfdev->features.tiler_present, 10, 1000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu tiler");
}
void panfrost_gpu_power_off(struct panfrost_device *pfdev)
{
- gpu_write(pfdev, TILER_PWROFF_LO, 0);
- gpu_write(pfdev, SHADER_PWROFF_LO, 0);
- gpu_write(pfdev, L2_PWROFF_LO, 0);
+ int ret;
+ u32 val;
+
+ gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
+ val, !val, 1, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "shader power transition timeout");
+
+ gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
+ val, !val, 1, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "tiler power transition timeout");
+
+ gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
+ ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
+ val, !val, 0, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "l2 power transition timeout");
+}
+
+void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev)
+{
+ set_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended);
+
+ gpu_write(pfdev, GPU_INT_MASK, 0);
+ synchronize_irq(pfdev->gpu_irq);
}
int panfrost_gpu_init(struct panfrost_device *pfdev)
{
- int err, irq;
+ int err;
err = panfrost_gpu_soft_reset(pfdev);
if (err)
@@ -430,11 +483,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
dma_set_max_seg_size(pfdev->dev, UINT_MAX);
- irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
- if (irq < 0)
- return irq;
+ pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
+ if (pfdev->gpu_irq < 0)
+ return pfdev->gpu_irq;
- err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
+ err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler,
IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request gpu irq");
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
index 876fdad9f721..d841b86504ea 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
@@ -15,6 +15,7 @@ u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev);
int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
void panfrost_gpu_power_on(struct panfrost_device *pfdev);
void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev);
void panfrost_cycle_counter_get(struct panfrost_device *pfdev);
void panfrost_cycle_counter_put(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index ecd2e035147f..0c2dbf6ef2a5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -405,6 +405,8 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
int j;
u32 irq_mask = 0;
+ clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
+
for (j = 0; j < NUM_JOB_SLOTS; j++) {
irq_mask |= MK_JS_MASK(j);
}
@@ -413,6 +415,14 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
job_write(pfdev, JOB_INT_MASK, irq_mask);
}
+void panfrost_job_suspend_irq(struct panfrost_device *pfdev)
+{
+ set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
+
+ job_write(pfdev, JOB_INT_MASK, 0);
+ synchronize_irq(pfdev->js->irq);
+}
+
static void panfrost_job_handle_err(struct panfrost_device *pfdev,
struct panfrost_job *job,
unsigned int js)
@@ -792,17 +802,25 @@ static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
struct panfrost_device *pfdev = data;
panfrost_job_handle_irqs(pfdev);
- job_write(pfdev, JOB_INT_MASK,
- GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
- GENMASK(NUM_JOB_SLOTS - 1, 0));
+
+ /* Enable interrupts only if we're not about to get suspended */
+ if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended))
+ job_write(pfdev, JOB_INT_MASK,
+ GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
+ GENMASK(NUM_JOB_SLOTS - 1, 0));
+
return IRQ_HANDLED;
}
static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
- u32 status = job_read(pfdev, JOB_INT_STAT);
+ u32 status;
+
+ if (test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended))
+ return IRQ_NONE;
+ status = job_read(pfdev, JOB_INT_STAT);
if (!status)
return IRQ_NONE;
@@ -852,7 +870,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
js->queue[j].fence_context = dma_fence_context_alloc(1);
ret = drm_sched_init(&js->queue[j].sched,
- &panfrost_sched_ops,
+ &panfrost_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
nentries, 0,
msecs_to_jiffies(JOB_TIMEOUT_MS),
@@ -963,7 +981,7 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
- if (atomic_read(&js->queue[i].sched.hw_rq_count))
+ if (atomic_read(&js->queue[i].sched.credit_count))
return false;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 17ff808dba07..ec581b97852b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -47,6 +47,7 @@ int panfrost_job_get_slot(struct panfrost_job *job);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
+void panfrost_job_suspend_irq(struct panfrost_device *pfdev);
int panfrost_job_is_idle(struct panfrost_device *pfdev);
#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 846dd697c410..f38385fe76bb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -231,6 +231,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev)
{
struct panfrost_mmu *mmu, *mmu_tmp;
+ clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
+
spin_lock(&pfdev->as_lock);
pfdev->as_alloc_mask = 0;
@@ -670,6 +672,9 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
+ if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended))
+ return IRQ_NONE;
+
if (!mmu_read(pfdev, MMU_INT_STAT))
return IRQ_NONE;
@@ -744,22 +749,25 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
}
- spin_lock(&pfdev->as_lock);
- mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
- spin_unlock(&pfdev->as_lock);
+ /* Enable interrupts only if we're not about to get suspended */
+ if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) {
+ spin_lock(&pfdev->as_lock);
+ mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
+ spin_unlock(&pfdev->as_lock);
+ }
return IRQ_HANDLED;
};
int panfrost_mmu_init(struct panfrost_device *pfdev)
{
- int err, irq;
+ int err;
- irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
- if (irq < 0)
- return irq;
+ pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
+ if (pfdev->mmu_irq < 0)
+ return pfdev->mmu_irq;
- err = devm_request_threaded_irq(pfdev->dev, irq,
+ err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq,
panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
IRQF_SHARED, KBUILD_MODNAME "-mmu",
@@ -777,3 +785,11 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev)
{
mmu_write(pfdev, MMU_INT_MASK, 0);
}
+
+void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev)
+{
+ set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
+
+ mmu_write(pfdev, MMU_INT_MASK, 0);
+ synchronize_irq(pfdev->mmu_irq);
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index cc2a0d307feb..022a9a74a114 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -14,6 +14,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
void panfrost_mmu_reset(struct panfrost_device *pfdev);
+void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev);
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index 55ec807550b3..c25743b05c55 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -44,6 +44,7 @@
GPU_IRQ_MULTIPLE_FAULT)
#define GPU_CMD 0x30
#define GPU_CMD_SOFT_RESET 0x01
+#define GPU_CMD_HARD_RESET 0x02
#define GPU_CMD_PERFCNT_CLEAR 0x03
#define GPU_CMD_PERFCNT_SAMPLE 0x04
#define GPU_CMD_CYCLE_COUNT_START 0x05
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 404b0483bb7c..c6d35c33d5d6 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -485,7 +485,6 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
static int qxl_primary_apply_cursor(struct qxl_device *qdev,
struct drm_plane_state *plane_state)
{
- struct drm_framebuffer *fb = plane_state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
struct qxl_cursor_cmd *cmd;
struct qxl_release *release;
@@ -510,8 +509,8 @@ static int qxl_primary_apply_cursor(struct qxl_device *qdev,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x;
- cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y;
+ cmd->u.set.position.x = plane_state->crtc_x + plane_state->hotspot_x;
+ cmd->u.set.position.y = plane_state->crtc_y + plane_state->hotspot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
@@ -531,7 +530,6 @@ out_free_release:
static int qxl_primary_move_cursor(struct qxl_device *qdev,
struct drm_plane_state *plane_state)
{
- struct drm_framebuffer *fb = plane_state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
struct qxl_cursor_cmd *cmd;
struct qxl_release *release;
@@ -554,8 +552,8 @@ static int qxl_primary_move_cursor(struct qxl_device *qdev,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = plane_state->crtc_x + fb->hot_x;
- cmd->u.position.y = plane_state->crtc_y + fb->hot_y;
+ cmd->u.position.x = plane_state->crtc_x + plane_state->hotspot_x;
+ cmd->u.position.y = plane_state->crtc_y + plane_state->hotspot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_release_fence_buffer_objects(release);
@@ -851,8 +849,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo;
qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo,
- new_state->fb->hot_x,
- new_state->fb->hot_y);
+ new_state->hotspot_x,
+ new_state->hotspot_y);
qxl_free_cursor(old_cursor_bo);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 46de4f171970..beee5563031a 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -285,7 +285,7 @@ static const struct drm_ioctl_desc qxl_ioctls[] = {
};
static struct drm_driver qxl_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT,
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 307a890fde13..32069acd93f8 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -119,7 +119,6 @@ struct qxl_output {
#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
-#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
struct ttm_device bdev;
@@ -256,8 +255,6 @@ struct qxl_device {
#define to_qxl(dev) container_of(dev, struct qxl_device, ddev)
-int qxl_debugfs_fence_init(struct qxl_device *rdev);
-
int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
void qxl_device_fini(struct qxl_device *qdev);
@@ -344,8 +341,6 @@ qxl_image_alloc_objects(struct qxl_device *qdev,
int height, int stride);
void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
-void qxl_update_screen(struct qxl_device *qxl);
-
/* qxl io operations (qxl_cmd.c) */
void qxl_io_create_primary(struct qxl_device *qdev,
@@ -445,8 +440,6 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
-struct qxl_drv_surface *
-qxl_surface_lookup(struct drm_device *dev, int surface_id);
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
/* qxl_ioctl.c */
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index d6ccaf24ee0c..279bf130a18c 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -26,6 +26,7 @@
#include <linux/component.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_eld.h>
#include "dce6_afmt.h"
#include "evergreen_hdmi.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 34a1c73d3938..02a65971d140 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -33,7 +33,6 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
-#include <drm/drm_legacy.h>
#include "radeon_family.h"
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
index 8f9a728affde..07ad17d24294 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
@@ -14,7 +14,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
-#include <drm/drm_plane_helper.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index fa6e592e0276..7d561c5a650f 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -4,6 +4,7 @@
* Zheng Yang <zhengyang@rock-chips.com>
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -55,7 +56,6 @@ struct rk3066_hdmi {
unsigned int tmdsclk;
struct hdmi_data_info hdmi_data;
- struct drm_display_mode previous_mode;
};
static struct rk3066_hdmi *encoder_to_rk3066_hdmi(struct drm_encoder *encoder)
@@ -387,21 +387,21 @@ static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
return 0;
}
-static void
-rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ int mux, val;
- /* Store the display mode for plugin/DPMS poweron events. */
- drm_mode_copy(&hdmi->previous_mode, adj_mode);
-}
+ conn_state = drm_atomic_get_new_connector_state(state, &hdmi->connector);
+ if (WARN_ON(!conn_state))
+ return;
-static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
-{
- struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
- int mux, val;
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
if (mux)
@@ -414,10 +414,11 @@ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n",
(mux) ? "1" : "0");
- rk3066_hdmi_setup(hdmi, &hdmi->previous_mode);
+ rk3066_hdmi_setup(hdmi, &crtc_state->adjusted_mode);
}
-static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
@@ -434,14 +435,6 @@ static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder)
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A);
}
-static bool
-rk3066_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
-{
- return true;
-}
-
static int
rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -457,11 +450,9 @@ rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
static const
struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
- .enable = rk3066_hdmi_encoder_enable,
- .disable = rk3066_hdmi_encoder_disable,
- .mode_fixup = rk3066_hdmi_encoder_mode_fixup,
- .mode_set = rk3066_hdmi_encoder_mode_set,
- .atomic_check = rk3066_hdmi_encoder_atomic_check,
+ .atomic_check = rk3066_hdmi_encoder_atomic_check,
+ .atomic_enable = rk3066_hdmi_encoder_enable,
+ .atomic_disable = rk3066_hdmi_encoder_disable,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 066299894d04..a13473b2d54c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -247,14 +247,22 @@ static inline void vop_cfg_done(struct vop *vop)
VOP_REG_SET(vop, common, cfg_done, 1);
}
-static bool has_rb_swapped(uint32_t format)
+static bool has_rb_swapped(uint32_t version, uint32_t format)
{
switch (format) {
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_BGR888:
case DRM_FORMAT_BGR565:
return true;
+ /*
+ * full framework (IP version 3.x) only need rb swapped for RGB888 and
+ * little framework (IP version 2.x) only need rb swapped for BGR888,
+ * check for 3.x to also only rb swap BGR888 for unknown vop version
+ */
+ case DRM_FORMAT_RGB888:
+ return VOP_MAJOR(version) == 3;
+ case DRM_FORMAT_BGR888:
+ return VOP_MAJOR(version) != 3;
default:
return false;
}
@@ -1030,7 +1038,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
VOP_WIN_SET(vop, win, dsp_info, dsp_info);
VOP_WIN_SET(vop, win, dsp_st, dsp_st);
- rb_swap = has_rb_swapped(fb->format->format);
+ rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
/*
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 6862fb146ace..312da5783362 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -325,11 +325,14 @@ static enum vop2_data_format vop2_convert_format(u32 format)
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
return VOP2_FMT_YUV422SP;
+ case DRM_FORMAT_NV20:
case DRM_FORMAT_Y210:
return VOP2_FMT_YUV422SP_10;
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
return VOP2_FMT_YUV444SP;
+ case DRM_FORMAT_NV30:
+ return VOP2_FMT_YUV444SP_10;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
return VOP2_FMT_VYUY422;
@@ -414,6 +417,8 @@ static bool vop2_win_uv_swap(u32 format)
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV15:
+ case DRM_FORMAT_NV20:
+ case DRM_FORMAT_NV30:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
return true;
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 22288ad7f326..2c45d81983a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -48,8 +48,10 @@ static const uint32_t formats_rk356x_esmart[] = {
DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */
DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */
DRM_FORMAT_NV61, /* yuv422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */
DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */
DRM_FORMAT_NV42, /* yuv444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */
DRM_FORMAT_YVYU, /* yuv422_8bit[YVYU] linear mode */
DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */
};
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index 3143ecaaff86..f8ed093b7356 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -51,7 +51,7 @@ DECLARE_EVENT_CLASS(drm_sched_job,
__assign_str(name, sched_job->sched->name);
__entry->job_count = spsc_queue_count(&entity->job_queue);
__entry->hw_job_count = atomic_read(
- &sched_job->sched->hw_rq_count);
+ &sched_job->sched->credit_count);
),
TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
__entry->entity, __entry->id,
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 409e4256f6e7..3c4f5a392b06 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -81,12 +81,16 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
*/
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) {
- /* The "priority" of an entity cannot exceed the number
- * of run-queues of a scheduler.
+ /* The "priority" of an entity cannot exceed the number of run-queues of a
+ * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
+ * the lowest priority available.
*/
- if (entity->priority >= sched_list[0]->num_rqs)
- entity->priority = max_t(u32, sched_list[0]->num_rqs,
- DRM_SCHED_PRIORITY_MIN);
+ if (entity->priority >= sched_list[0]->num_rqs) {
+ drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
+ entity->priority, sched_list[0]->num_rqs);
+ entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
+ (s32) DRM_SCHED_PRIORITY_KERNEL);
+ }
entity->rq = sched_list[0]->sched_rq[entity->priority];
}
@@ -370,7 +374,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
container_of(cb, struct drm_sched_entity, cb);
drm_sched_entity_clear_dep(f, cb);
- drm_sched_wakeup_if_can_queue(entity->rq->sched);
+ drm_sched_wakeup(entity->rq->sched, entity);
}
/**
@@ -602,7 +606,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_update_fifo(entity, submit_ts);
- drm_sched_wakeup_if_can_queue(entity->rq->sched);
+ drm_sched_wakeup(entity->rq->sched, entity);
}
}
EXPORT_SYMBOL(drm_sched_entity_push_job);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 99797a8c836a..550492a7a031 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -48,7 +48,30 @@
* through the jobs entity pointer.
*/
-#include <linux/kthread.h>
+/**
+ * DOC: Flow Control
+ *
+ * The DRM GPU scheduler provides a flow control mechanism to regulate the rate
+ * in which the jobs fetched from scheduler entities are executed.
+ *
+ * In this context the &drm_gpu_scheduler keeps track of a driver specified
+ * credit limit representing the capacity of this scheduler and a credit count;
+ * every &drm_sched_job carries a driver specified number of credits.
+ *
+ * Once a job is executed (but not yet finished), the job's credits contribute
+ * to the scheduler's credit count until the job is finished. If by executing
+ * one more job the scheduler's credit count would exceed the scheduler's
+ * credit limit, the job won't be executed. Instead, the scheduler will wait
+ * until the credit count has decreased enough to not overflow its credit limit.
+ * This implies waiting for previously executed jobs.
+ *
+ * Optionally, drivers may register a callback (update_job_credits) provided by
+ * struct drm_sched_backend_ops to update the job's credits dynamically. The
+ * scheduler executes this callback every time the scheduler considers a job for
+ * execution and subsequently checks whether the job fits the scheduler's credit
+ * limit.
+ */
+
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
@@ -76,6 +99,51 @@ int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
module_param_named(sched_policy, drm_sched_policy, int, 0444);
+static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
+{
+ u32 credits;
+
+ drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit,
+ atomic_read(&sched->credit_count),
+ &credits));
+
+ return credits;
+}
+
+/**
+ * drm_sched_can_queue -- Can we queue more to the hardware?
+ * @sched: scheduler instance
+ * @entity: the scheduler entity
+ *
+ * Return true if we can push at least one more job from @entity, false
+ * otherwise.
+ */
+static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
+{
+ struct drm_sched_job *s_job;
+
+ s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ if (!s_job)
+ return false;
+
+ if (sched->ops->update_job_credits) {
+ s_job->credits = sched->ops->update_job_credits(s_job);
+
+ drm_WARN(sched, !s_job->credits,
+ "Jobs with zero credits bypass job-flow control.\n");
+ }
+
+ /* If a job exceeds the credit limit, truncate it to the credit limit
+ * itself to guarantee forward progress.
+ */
+ if (drm_WARN(sched, s_job->credits > sched->credit_limit,
+ "Jobs may not exceed the credit limit, truncate.\n"))
+ s_job->credits = sched->credit_limit;
+
+ return drm_sched_available_credits(sched) >= s_job->credits;
+}
+
static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
const struct rb_node *b)
{
@@ -187,12 +255,18 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
/**
* drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
*
+ * @sched: the gpu scheduler
* @rq: scheduler run queue to check.
*
- * Try to find a ready entity, returns NULL if none found.
+ * Try to find the next ready entity.
+ *
+ * Return an entity if one is found; return an error-pointer (!NULL) if an
+ * entity was ready, but the scheduler had insufficient credits to accommodate
+ * its job; return NULL, if no ready entity was found.
*/
static struct drm_sched_entity *
-drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
+drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
+ struct drm_sched_rq *rq)
{
struct drm_sched_entity *entity;
@@ -202,6 +276,14 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
if (drm_sched_entity_is_ready(entity)) {
+ /* If we can't queue yet, preserve the current
+ * entity in terms of fairness.
+ */
+ if (!drm_sched_can_queue(sched, entity)) {
+ spin_unlock(&rq->lock);
+ return ERR_PTR(-ENOSPC);
+ }
+
rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
@@ -211,8 +293,15 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
}
list_for_each_entry(entity, &rq->entities, list) {
-
if (drm_sched_entity_is_ready(entity)) {
+ /* If we can't queue yet, preserve the current entity in
+ * terms of fairness.
+ */
+ if (!drm_sched_can_queue(sched, entity)) {
+ spin_unlock(&rq->lock);
+ return ERR_PTR(-ENOSPC);
+ }
+
rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
@@ -231,12 +320,18 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
/**
* drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
*
+ * @sched: the gpu scheduler
* @rq: scheduler run queue to check.
*
- * Find oldest waiting ready entity, returns NULL if none found.
+ * Find oldest waiting ready entity.
+ *
+ * Return an entity if one is found; return an error-pointer (!NULL) if an
+ * entity was ready, but the scheduler had insufficient credits to accommodate
+ * its job; return NULL, if no ready entity was found.
*/
static struct drm_sched_entity *
-drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
+drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
+ struct drm_sched_rq *rq)
{
struct rb_node *rb;
@@ -246,6 +341,14 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
if (drm_sched_entity_is_ready(entity)) {
+ /* If we can't queue yet, preserve the current entity in
+ * terms of fairness.
+ */
+ if (!drm_sched_can_queue(sched, entity)) {
+ spin_unlock(&rq->lock);
+ return ERR_PTR(-ENOSPC);
+ }
+
rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
break;
@@ -257,6 +360,42 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
}
/**
+ * drm_sched_run_job_queue - enqueue run-job work
+ * @sched: scheduler instance
+ */
+static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
+{
+ if (!READ_ONCE(sched->pause_submit))
+ queue_work(sched->submit_wq, &sched->work_run_job);
+}
+
+/**
+ * __drm_sched_run_free_queue - enqueue free-job work
+ * @sched: scheduler instance
+ */
+static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
+{
+ if (!READ_ONCE(sched->pause_submit))
+ queue_work(sched->submit_wq, &sched->work_free_job);
+}
+
+/**
+ * drm_sched_run_free_queue - enqueue free-job work if ready
+ * @sched: scheduler instance
+ */
+static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *job;
+
+ spin_lock(&sched->job_list_lock);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
+ if (job && dma_fence_is_signaled(&job->s_fence->finished))
+ __drm_sched_run_free_queue(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
+/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
@@ -267,7 +406,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
struct drm_sched_fence *s_fence = s_job->s_fence;
struct drm_gpu_scheduler *sched = s_fence->sched;
- atomic_dec(&sched->hw_rq_count);
+ atomic_sub(s_job->credits, &sched->credit_count);
atomic_dec(sched->score);
trace_drm_sched_process_job(s_fence);
@@ -275,7 +414,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
dma_fence_put(&s_fence->finished);
- wake_up_interruptible(&sched->wake_up_worker);
+ __drm_sched_run_free_queue(sched);
}
/**
@@ -299,10 +438,35 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
*/
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
+ lockdep_assert_held(&sched->job_list_lock);
+
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!list_empty(&sched->pending_list))
- queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
+ mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
+}
+
+static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
+{
+ spin_lock(&sched->job_list_lock);
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
+/**
+ * drm_sched_tdr_queue_imm: - immediately start job timeout handler
+ *
+ * @sched: scheduler for which the timeout handling should be started.
+ *
+ * Start timeout handling immediately for the named scheduler.
+ */
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
+{
+ spin_lock(&sched->job_list_lock);
+ sched->timeout = 0;
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
}
+EXPORT_SYMBOL(drm_sched_tdr_queue_imm);
/**
* drm_sched_fault - immediately start timeout handler
@@ -388,7 +552,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
- /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+ /* Protects against concurrent deletion in drm_sched_get_finished_job */
spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
@@ -416,11 +580,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
spin_unlock(&sched->job_list_lock);
}
- if (status != DRM_GPU_SCHED_STAT_ENODEV) {
- spin_lock(&sched->job_list_lock);
- drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
- }
+ if (status != DRM_GPU_SCHED_STAT_ENODEV)
+ drm_sched_start_timeout_unlocked(sched);
}
/**
@@ -439,13 +600,13 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job, *tmp;
- kthread_park(sched->thread);
+ drm_sched_wqueue_stop(sched);
/*
* Reinsert back the bad job here - now it's safe as
- * drm_sched_get_cleanup_job cannot race against us and release the
+ * drm_sched_get_finished_job cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress
- * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+ * (earlier) cleanups and drm_sched_get_finished_job will not be called
* now until the scheduler thread is unparked.
*/
if (bad && bad->sched == sched)
@@ -468,7 +629,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
&s_job->cb)) {
dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
- atomic_dec(&sched->hw_rq_count);
+ atomic_sub(s_job->credits, &sched->credit_count);
} else {
/*
* remove job from pending_list.
@@ -529,7 +690,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
- atomic_inc(&sched->hw_rq_count);
+ atomic_add(s_job->credits, &sched->credit_count);
if (!full_recovery)
continue;
@@ -546,13 +707,10 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
drm_sched_job_done(s_job, -ECANCELED);
}
- if (full_recovery) {
- spin_lock(&sched->job_list_lock);
- drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
- }
+ if (full_recovery)
+ drm_sched_start_timeout_unlocked(sched);
- kthread_unpark(sched->thread);
+ drm_sched_wqueue_start(sched);
}
EXPORT_SYMBOL(drm_sched_start);
@@ -613,6 +771,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
* drm_sched_job_init - init a scheduler job
* @job: scheduler job to init
* @entity: scheduler entity to use
+ * @credits: the number of credits this job contributes to the schedulers
+ * credit limit
* @owner: job owner for debugging
*
* Refer to drm_sched_entity_push_job() documentation
@@ -630,7 +790,7 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- void *owner)
+ u32 credits, void *owner)
{
if (!entity->rq) {
/* This will most likely be followed by missing frames
@@ -641,7 +801,13 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -ENOENT;
}
+ if (unlikely(!credits)) {
+ pr_err("*ERROR* %s: credits cannot be 0!\n", __func__);
+ return -EINVAL;
+ }
+
job->entity = entity;
+ job->credits = credits;
job->s_fence = drm_sched_fence_alloc(entity, owner);
if (!job->s_fence)
return -ENOMEM;
@@ -854,27 +1020,17 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
EXPORT_SYMBOL(drm_sched_job_cleanup);
/**
- * drm_sched_can_queue -- Can we queue more to the hardware?
- * @sched: scheduler instance
- *
- * Return true if we can push more jobs to the hw, otherwise false.
- */
-static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
-{
- return atomic_read(&sched->hw_rq_count) <
- sched->hw_submission_limit;
-}
-
-/**
- * drm_sched_wakeup_if_can_queue - Wake up the scheduler
+ * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
* @sched: scheduler instance
+ * @entity: the scheduler entity
*
* Wake up the scheduler if we can queue jobs.
*/
-void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
{
- if (drm_sched_can_queue(sched))
- wake_up_interruptible(&sched->wake_up_worker);
+ if (drm_sched_can_queue(sched, entity))
+ drm_sched_run_job_queue(sched);
}
/**
@@ -882,7 +1038,11 @@ void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
*
* @sched: scheduler instance
*
- * Returns the entity to process or NULL if none are found.
+ * Return an entity to process or NULL if none are found.
+ *
+ * Note, that we break out of the for-loop when "entity" is non-null, which can
+ * also be an error-pointer--this assures we don't process lower priority
+ * run-queues. See comments in the respectively called functions.
*/
static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler *sched)
@@ -890,23 +1050,21 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
struct drm_sched_entity *entity;
int i;
- if (!drm_sched_can_queue(sched))
- return NULL;
-
- /* Kernel run queue has higher priority than normal run queue*/
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ /* Start with the highest priority.
+ */
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
- drm_sched_rq_select_entity_fifo(sched->sched_rq[i]) :
- drm_sched_rq_select_entity_rr(sched->sched_rq[i]);
+ drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
+ drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
if (entity)
break;
}
- return entity;
+ return IS_ERR(entity) ? NULL : entity;
}
/**
- * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
+ * drm_sched_get_finished_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
@@ -914,7 +1072,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
-drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
+drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job, *next;
@@ -934,8 +1092,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
typeof(*next), list);
if (next) {
- next->s_fence->scheduled.timestamp =
- dma_fence_timestamp(&job->s_fence->finished);
+ if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
+ &next->s_fence->scheduled.flags))
+ next->s_fence->scheduled.timestamp =
+ dma_fence_timestamp(&job->s_fence->finished);
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
@@ -985,91 +1145,82 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
EXPORT_SYMBOL(drm_sched_pick_best);
/**
- * drm_sched_blocked - check if the scheduler is blocked
- *
- * @sched: scheduler instance
+ * drm_sched_free_job_work - worker to call free_job
*
- * Returns true if blocked, otherwise false.
+ * @w: free job work
*/
-static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
+static void drm_sched_free_job_work(struct work_struct *w)
{
- if (kthread_should_park()) {
- kthread_parkme();
- return true;
- }
+ struct drm_gpu_scheduler *sched =
+ container_of(w, struct drm_gpu_scheduler, work_free_job);
+ struct drm_sched_job *job;
+
+ if (READ_ONCE(sched->pause_submit))
+ return;
- return false;
+ job = drm_sched_get_finished_job(sched);
+ if (job)
+ sched->ops->free_job(job);
+
+ drm_sched_run_free_queue(sched);
+ drm_sched_run_job_queue(sched);
}
/**
- * drm_sched_main - main scheduler thread
- *
- * @param: scheduler instance
+ * drm_sched_run_job_work - worker to call run_job
*
- * Returns 0.
+ * @w: run job work
*/
-static int drm_sched_main(void *param)
+static void drm_sched_run_job_work(struct work_struct *w)
{
- struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
+ struct drm_gpu_scheduler *sched =
+ container_of(w, struct drm_gpu_scheduler, work_run_job);
+ struct drm_sched_entity *entity;
+ struct dma_fence *fence;
+ struct drm_sched_fence *s_fence;
+ struct drm_sched_job *sched_job;
int r;
- sched_set_fifo_low(current);
-
- while (!kthread_should_stop()) {
- struct drm_sched_entity *entity = NULL;
- struct drm_sched_fence *s_fence;
- struct drm_sched_job *sched_job;
- struct dma_fence *fence;
- struct drm_sched_job *cleanup_job = NULL;
-
- wait_event_interruptible(sched->wake_up_worker,
- (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
- (!drm_sched_blocked(sched) &&
- (entity = drm_sched_select_entity(sched))) ||
- kthread_should_stop());
-
- if (cleanup_job)
- sched->ops->free_job(cleanup_job);
-
- if (!entity)
- continue;
-
- sched_job = drm_sched_entity_pop_job(entity);
+ if (READ_ONCE(sched->pause_submit))
+ return;
- if (!sched_job) {
- complete_all(&entity->entity_idle);
- continue;
- }
+ entity = drm_sched_select_entity(sched);
+ if (!entity)
+ return;
- s_fence = sched_job->s_fence;
+ sched_job = drm_sched_entity_pop_job(entity);
+ if (!sched_job) {
+ complete_all(&entity->entity_idle);
+ return; /* No more work */
+ }
- atomic_inc(&sched->hw_rq_count);
- drm_sched_job_begin(sched_job);
+ s_fence = sched_job->s_fence;
- trace_drm_run_job(sched_job, entity);
- fence = sched->ops->run_job(sched_job);
- complete_all(&entity->entity_idle);
- drm_sched_fence_scheduled(s_fence, fence);
+ atomic_add(sched_job->credits, &sched->credit_count);
+ drm_sched_job_begin(sched_job);
- if (!IS_ERR_OR_NULL(fence)) {
- /* Drop for original kref_init of the fence */
- dma_fence_put(fence);
+ trace_drm_run_job(sched_job, entity);
+ fence = sched->ops->run_job(sched_job);
+ complete_all(&entity->entity_idle);
+ drm_sched_fence_scheduled(s_fence, fence);
- r = dma_fence_add_callback(fence, &sched_job->cb,
- drm_sched_job_done_cb);
- if (r == -ENOENT)
- drm_sched_job_done(sched_job, fence->error);
- else if (r)
- DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
- r);
- } else {
- drm_sched_job_done(sched_job, IS_ERR(fence) ?
- PTR_ERR(fence) : 0);
- }
+ if (!IS_ERR_OR_NULL(fence)) {
+ /* Drop for original kref_init of the fence */
+ dma_fence_put(fence);
- wake_up(&sched->job_scheduled);
+ r = dma_fence_add_callback(fence, &sched_job->cb,
+ drm_sched_job_done_cb);
+ if (r == -ENOENT)
+ drm_sched_job_done(sched_job, fence->error);
+ else if (r)
+ DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
+ } else {
+ drm_sched_job_done(sched_job, IS_ERR(fence) ?
+ PTR_ERR(fence) : 0);
}
- return 0;
+
+ wake_up(&sched->job_scheduled);
+ drm_sched_run_job_queue(sched);
}
/**
@@ -1077,8 +1228,10 @@ static int drm_sched_main(void *param)
*
* @sched: scheduler instance
* @ops: backend operations for this scheduler
+ * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
+ * allocated and used
* @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
- * @hw_submission: number of hw submissions that can be in flight
+ * @credit_limit: the number of credits this scheduler can hold from all jobs
* @hang_limit: number of times to allow a job to hang before dropping it
* @timeout: timeout value in jiffies for the scheduler
* @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
@@ -1091,14 +1244,15 @@ static int drm_sched_main(void *param)
*/
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
- u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit,
+ struct workqueue_struct *submit_wq,
+ u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name, struct device *dev)
{
int i, ret;
sched->ops = ops;
- sched->hw_submission_limit = hw_submission;
+ sched->credit_limit = credit_limit;
sched->name = name;
sched->timeout = timeout;
sched->timeout_wq = timeout_wq ? : system_wq;
@@ -1121,46 +1275,50 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
return 0;
}
+ if (submit_wq) {
+ sched->submit_wq = submit_wq;
+ sched->own_submit_wq = false;
+ } else {
+ sched->submit_wq = alloc_ordered_workqueue(name, 0);
+ if (!sched->submit_wq)
+ return -ENOMEM;
+
+ sched->own_submit_wq = true;
+ }
+ ret = -ENOMEM;
sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
GFP_KERNEL | __GFP_ZERO);
- if (!sched->sched_rq) {
- drm_err(sched, "%s: out of memory for sched_rq\n", __func__);
- return -ENOMEM;
- }
+ if (!sched->sched_rq)
+ goto Out_free;
sched->num_rqs = num_rqs;
- ret = -ENOMEM;
- for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
if (!sched->sched_rq[i])
goto Out_unroll;
drm_sched_rq_init(sched, sched->sched_rq[i]);
}
- init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
INIT_LIST_HEAD(&sched->pending_list);
spin_lock_init(&sched->job_list_lock);
- atomic_set(&sched->hw_rq_count, 0);
+ atomic_set(&sched->credit_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
+ INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
+ INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
atomic_set(&sched->_score, 0);
atomic64_set(&sched->job_id_count, 0);
-
- /* Each scheduler will run on a seperate kernel thread */
- sched->thread = kthread_run(drm_sched_main, sched, sched->name);
- if (IS_ERR(sched->thread)) {
- ret = PTR_ERR(sched->thread);
- sched->thread = NULL;
- DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
- goto Out_unroll;
- }
+ sched->pause_submit = false;
sched->ready = true;
return 0;
Out_unroll:
- for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--)
+ for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
kfree(sched->sched_rq[i]);
+Out_free:
kfree(sched->sched_rq);
sched->sched_rq = NULL;
+ if (sched->own_submit_wq)
+ destroy_workqueue(sched->submit_wq);
drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
return ret;
}
@@ -1178,10 +1336,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
struct drm_sched_entity *s_entity;
int i;
- if (sched->thread)
- kthread_stop(sched->thread);
+ drm_sched_wqueue_stop(sched);
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
@@ -1202,6 +1359,8 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);
+ if (sched->own_submit_wq)
+ destroy_workqueue(sched->submit_wq);
sched->ready = false;
kfree(sched->sched_rq);
sched->sched_rq = NULL;
@@ -1231,9 +1390,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
- for (i = DRM_SCHED_PRIORITY_MIN;
- i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL);
- i++) {
+ for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
@@ -1252,3 +1409,42 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
}
}
EXPORT_SYMBOL(drm_sched_increase_karma);
+
+/**
+ * drm_sched_wqueue_ready - Is the scheduler ready for submission
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if submission is ready
+ */
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
+{
+ return sched->ready;
+}
+EXPORT_SYMBOL(drm_sched_wqueue_ready);
+
+/**
+ * drm_sched_wqueue_stop - stop scheduler submission
+ *
+ * @sched: scheduler instance
+ */
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
+{
+ WRITE_ONCE(sched->pause_submit, true);
+ cancel_work_sync(&sched->work_run_job);
+ cancel_work_sync(&sched->work_free_job);
+}
+EXPORT_SYMBOL(drm_sched_wqueue_stop);
+
+/**
+ * drm_sched_wqueue_start - start scheduler submission
+ *
+ * @sched: scheduler instance
+ */
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
+{
+ WRITE_ONCE(sched->pause_submit, false);
+ queue_work(sched->submit_wq, &sched->work_run_job);
+ queue_work(sched->submit_wq, &sched->work_free_job);
+}
+EXPORT_SYMBOL(drm_sched_wqueue_start);
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index e0174f82e353..bef293922b98 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -808,7 +808,8 @@ static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
const struct iosys_map *vmap,
struct drm_rect *rect,
- u8 *buf, u8 *data_array)
+ u8 *buf, u8 *data_array,
+ struct drm_format_conv_state *fmtcnv_state)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
struct iosys_map dst;
@@ -826,7 +827,7 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
return ret;
iosys_map_set_vaddr(&dst, buf);
- drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -838,7 +839,8 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
const struct iosys_map *vmap,
struct drm_rect *rect, u8 *buf,
- u8 *data_array)
+ u8 *data_array,
+ struct drm_format_conv_state *fmtcnv_state)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
unsigned int dst_pitch = drm_rect_width(rect);
@@ -855,7 +857,7 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
return ret;
iosys_map_set_vaddr(&dst, buf);
- drm_fb_xrgb8888_to_gray8(&dst, &dst_pitch, vmap, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -871,6 +873,7 @@ static int ssd130x_primary_plane_atomic_check(struct drm_plane *plane,
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
+ struct drm_shadow_plane_state *shadow_plane_state = &ssd130x_state->base;
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state = NULL;
const struct drm_format_info *fi;
@@ -895,6 +898,16 @@ static int ssd130x_primary_plane_atomic_check(struct drm_plane *plane,
pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
+ if (plane_state->fb->format != fi) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&shadow_plane_state->fmtcnv_state,
+ pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
ssd130x_state->buffer = kcalloc(pitch, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->buffer)
return -ENOMEM;
@@ -909,6 +922,7 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane,
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
+ struct drm_shadow_plane_state *shadow_plane_state = &ssd130x_state->base;
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state = NULL;
const struct drm_format_info *fi;
@@ -933,6 +947,16 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane,
pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
+ if (plane_state->fb->format != fi) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&shadow_plane_state->fmtcnv_state,
+ pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
ssd130x_state->buffer = kcalloc(pitch, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->buffer)
return -ENOMEM;
@@ -968,7 +992,8 @@ static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane,
ssd130x_fb_blit_rect(fb, &shadow_plane_state->data[0], &dst_clip,
ssd130x_plane_state->buffer,
- ssd130x_crtc_state->data_array);
+ ssd130x_crtc_state->data_array,
+ &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
@@ -1002,7 +1027,8 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane,
ssd132x_fb_blit_rect(fb, &shadow_plane_state->data[0], &dst_clip,
ssd130x_plane_state->buffer,
- ssd130x_crtc_state->data_array);
+ ssd130x_crtc_state->data_array,
+ &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index acf7cedf0c1a..075c5c3ee75a 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -17,7 +17,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_plane_helper.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index 48183bbd0590..deb3bb96e2a8 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -859,16 +859,14 @@ static int sprd_dpu_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &dpu_component_ops);
}
-static int sprd_dpu_remove(struct platform_device *pdev)
+static void sprd_dpu_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &dpu_component_ops);
-
- return 0;
}
struct platform_driver sprd_dpu_driver = {
.probe = sprd_dpu_probe,
- .remove = sprd_dpu_remove,
+ .remove_new = sprd_dpu_remove,
.driver = {
.name = "sprd-dpu-drv",
.of_match_table = dpu_match_table,
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
index 0aa39156f2fa..a74cd0caf645 100644
--- a/drivers/gpu/drm/sprd/sprd_drm.c
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -138,10 +138,9 @@ static int sprd_drm_probe(struct platform_device *pdev)
return drm_of_component_probe(&pdev->dev, component_compare_of, &drm_component_ops);
}
-static int sprd_drm_remove(struct platform_device *pdev)
+static void sprd_drm_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &drm_component_ops);
- return 0;
}
static void sprd_drm_shutdown(struct platform_device *pdev)
@@ -164,7 +163,7 @@ MODULE_DEVICE_TABLE(of, drm_match_table);
static struct platform_driver sprd_drm_driver = {
.probe = sprd_drm_probe,
- .remove = sprd_drm_remove,
+ .remove_new = sprd_drm_remove,
.shutdown = sprd_drm_shutdown,
.driver = {
.name = "sprd-drm-drv",
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index d7b143a75601..0b69c140eab3 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -1051,18 +1051,16 @@ static int sprd_dsi_probe(struct platform_device *pdev)
return mipi_dsi_host_register(&dsi->host);
}
-static int sprd_dsi_remove(struct platform_device *pdev)
+static void sprd_dsi_remove(struct platform_device *pdev)
{
struct sprd_dsi *dsi = dev_get_drvdata(&pdev->dev);
mipi_dsi_host_unregister(&dsi->host);
-
- return 0;
}
struct platform_driver sprd_dsi_driver = {
.probe = sprd_dsi_probe,
- .remove = sprd_dsi_remove,
+ .remove_new = sprd_dsi_remove,
.driver = {
.name = "sprd-dsi-drv",
.of_match_table = dsi_match_table,
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 0ba3ca3ac509..a1fcee665023 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -24,6 +24,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_eld.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index d5a3d3f4fece..83341576630d 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -20,6 +20,7 @@
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_eld.h>
#include <drm/drm_file.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index ba7baa622675..d6183b3d7688 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -9,15 +9,16 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_connector_test.o \
drm_damage_helper_test.o \
drm_dp_mst_helper_test.o \
+ drm_exec_test.o \
drm_format_helper_test.o \
drm_format_test.o \
drm_framebuffer_test.o \
+ drm_gem_shmem_test.o \
drm_managed_test.o \
drm_mm_test.o \
drm_modes_test.o \
drm_plane_helper_test.o \
drm_probe_helper_test.o \
- drm_rect_test.o \
- drm_exec_test.o
+ drm_rect_test.o
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 09ee6f6af896..ea2af6bd9abe 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -13,315 +13,11 @@
#include "../lib/drm_random.h"
-#define TIMEOUT(name__) \
- unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
-
-static unsigned int random_seed;
-
static inline u64 get_size(int order, u64 chunk_size)
{
return (1 << order) * chunk_size;
}
-__printf(2, 3)
-static bool __timeout(unsigned long timeout, const char *fmt, ...)
-{
- va_list va;
-
- if (!signal_pending(current)) {
- cond_resched();
- if (time_before(jiffies, timeout))
- return false;
- }
-
- if (fmt) {
- va_start(va, fmt);
- vprintk(fmt, va);
- va_end(va);
- }
-
- return true;
-}
-
-static void __dump_block(struct kunit *test, struct drm_buddy *mm,
- struct drm_buddy_block *block, bool buddy)
-{
- kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n",
- block->header, drm_buddy_block_state(block),
- drm_buddy_block_order(block), drm_buddy_block_offset(block),
- drm_buddy_block_size(mm, block), !block->parent, buddy);
-}
-
-static void dump_block(struct kunit *test, struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- struct drm_buddy_block *buddy;
-
- __dump_block(test, mm, block, false);
-
- buddy = drm_get_buddy(block);
- if (buddy)
- __dump_block(test, mm, buddy, true);
-}
-
-static int check_block(struct kunit *test, struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- struct drm_buddy_block *buddy;
- unsigned int block_state;
- u64 block_size;
- u64 offset;
- int err = 0;
-
- block_state = drm_buddy_block_state(block);
-
- if (block_state != DRM_BUDDY_ALLOCATED &&
- block_state != DRM_BUDDY_FREE && block_state != DRM_BUDDY_SPLIT) {
- kunit_err(test, "block state mismatch\n");
- err = -EINVAL;
- }
-
- block_size = drm_buddy_block_size(mm, block);
- offset = drm_buddy_block_offset(block);
-
- if (block_size < mm->chunk_size) {
- kunit_err(test, "block size smaller than min size\n");
- err = -EINVAL;
- }
-
- /* We can't use is_power_of_2() for a u64 on 32-bit systems. */
- if (block_size & (block_size - 1)) {
- kunit_err(test, "block size not power of two\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(block_size, mm->chunk_size)) {
- kunit_err(test, "block size not aligned to min size\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(offset, mm->chunk_size)) {
- kunit_err(test, "block offset not aligned to min size\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(offset, block_size)) {
- kunit_err(test, "block offset not aligned to block size\n");
- err = -EINVAL;
- }
-
- buddy = drm_get_buddy(block);
-
- if (!buddy && block->parent) {
- kunit_err(test, "buddy has gone fishing\n");
- err = -EINVAL;
- }
-
- if (buddy) {
- if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
- kunit_err(test, "buddy has wrong offset\n");
- err = -EINVAL;
- }
-
- if (drm_buddy_block_size(mm, buddy) != block_size) {
- kunit_err(test, "buddy size mismatch\n");
- err = -EINVAL;
- }
-
- if (drm_buddy_block_state(buddy) == block_state &&
- block_state == DRM_BUDDY_FREE) {
- kunit_err(test, "block and its buddy are free\n");
- err = -EINVAL;
- }
- }
-
- return err;
-}
-
-static int check_blocks(struct kunit *test, struct drm_buddy *mm,
- struct list_head *blocks, u64 expected_size, bool is_contiguous)
-{
- struct drm_buddy_block *block;
- struct drm_buddy_block *prev;
- u64 total;
- int err = 0;
-
- block = NULL;
- prev = NULL;
- total = 0;
-
- list_for_each_entry(block, blocks, link) {
- err = check_block(test, mm, block);
-
- if (!drm_buddy_block_is_allocated(block)) {
- kunit_err(test, "block not allocated\n");
- err = -EINVAL;
- }
-
- if (is_contiguous && prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = drm_buddy_block_offset(prev);
- prev_block_size = drm_buddy_block_size(mm, prev);
- offset = drm_buddy_block_offset(block);
-
- if (offset != (prev_offset + prev_block_size)) {
- kunit_err(test, "block offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- if (err)
- break;
-
- total += drm_buddy_block_size(mm, block);
- prev = block;
- }
-
- if (!err) {
- if (total != expected_size) {
- kunit_err(test, "size mismatch, expected=%llx, found=%llx\n",
- expected_size, total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- kunit_err(test, "prev block, dump:\n");
- dump_block(test, mm, prev);
- }
-
- kunit_err(test, "bad block, dump:\n");
- dump_block(test, mm, block);
-
- return err;
-}
-
-static int check_mm(struct kunit *test, struct drm_buddy *mm)
-{
- struct drm_buddy_block *root;
- struct drm_buddy_block *prev;
- unsigned int i;
- u64 total;
- int err = 0;
-
- if (!mm->n_roots) {
- kunit_err(test, "n_roots is zero\n");
- return -EINVAL;
- }
-
- if (mm->n_roots != hweight64(mm->size)) {
- kunit_err(test, "n_roots mismatch, n_roots=%u, expected=%lu\n",
- mm->n_roots, hweight64(mm->size));
- return -EINVAL;
- }
-
- root = NULL;
- prev = NULL;
- total = 0;
-
- for (i = 0; i < mm->n_roots; ++i) {
- struct drm_buddy_block *block;
- unsigned int order;
-
- root = mm->roots[i];
- if (!root) {
- kunit_err(test, "root(%u) is NULL\n", i);
- err = -EINVAL;
- break;
- }
-
- err = check_block(test, mm, root);
-
- if (!drm_buddy_block_is_free(root)) {
- kunit_err(test, "root not free\n");
- err = -EINVAL;
- }
-
- order = drm_buddy_block_order(root);
-
- if (!i) {
- if (order != mm->max_order) {
- kunit_err(test, "max order root missing\n");
- err = -EINVAL;
- }
- }
-
- if (prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = drm_buddy_block_offset(prev);
- prev_block_size = drm_buddy_block_size(mm, prev);
- offset = drm_buddy_block_offset(root);
-
- if (offset != (prev_offset + prev_block_size)) {
- kunit_err(test, "root offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- block = list_first_entry_or_null(&mm->free_list[order],
- struct drm_buddy_block, link);
- if (block != root) {
- kunit_err(test, "root mismatch at order=%u\n", order);
- err = -EINVAL;
- }
-
- if (err)
- break;
-
- prev = root;
- total += drm_buddy_block_size(mm, root);
- }
-
- if (!err) {
- if (total != mm->size) {
- kunit_err(test, "expected mm size=%llx, found=%llx\n",
- mm->size, total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- kunit_err(test, "prev root(%u), dump:\n", i - 1);
- dump_block(test, mm, prev);
- }
-
- if (root) {
- kunit_err(test, "bad root(%u), dump:\n", i);
- dump_block(test, mm, root);
- }
-
- return err;
-}
-
-static void mm_config(u64 *size, u64 *chunk_size)
-{
- DRM_RND_STATE(prng, random_seed);
- u32 s, ms;
-
- /* Nothing fancy, just try to get an interesting bit pattern */
-
- prandom_seed_state(&prng, random_seed);
-
- /* Let size be a random number of pages up to 8 GB (2M pages) */
- s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
- /* Let the chunk size be a random power of 2 less than size */
- ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
- /* Round size down to the chunk size */
- s &= -ms;
-
- /* Convert from pages to bytes */
- *chunk_size = (u64)ms << 12;
- *size = (u64)s << 12;
-}
-
static void drm_test_buddy_alloc_pathological(struct kunit *test)
{
u64 mm_size, size, start = 0;
@@ -403,96 +99,6 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
drm_buddy_fini(&mm);
}
-static void drm_test_buddy_alloc_smoke(struct kunit *test)
-{
- u64 mm_size, chunk_size, start = 0;
- unsigned long flags = 0;
- struct drm_buddy mm;
- int *order;
- int i;
-
- DRM_RND_STATE(prng, random_seed);
- TIMEOUT(end_time);
-
- mm_config(&mm_size, &chunk_size);
-
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size),
- "buddy_init failed\n");
-
- order = drm_random_order(mm.max_order + 1, &prng);
- KUNIT_ASSERT_TRUE(test, order);
-
- for (i = 0; i <= mm.max_order; ++i) {
- struct drm_buddy_block *block;
- int max_order = order[i];
- bool timeout = false;
- LIST_HEAD(blocks);
- u64 total, size;
- LIST_HEAD(tmp);
- int order, err;
-
- KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
- "pre-mm check failed, abort\n");
-
- order = max_order;
- total = 0;
-
- do {
-retry:
- size = get_size(order, chunk_size);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags);
- if (err) {
- if (err == -ENOMEM) {
- KUNIT_FAIL(test, "buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- } else {
- if (order--) {
- err = 0;
- goto retry;
- }
-
- KUNIT_FAIL(test, "buddy_alloc with order=%d failed\n",
- order);
- }
-
- break;
- }
-
- block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
- KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
-
- list_move_tail(&block->link, &blocks);
- KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), order,
- "buddy_alloc order mismatch\n");
-
- total += drm_buddy_block_size(&mm, block);
-
- if (__timeout(end_time, NULL)) {
- timeout = true;
- break;
- }
- } while (total < mm.size);
-
- if (!err)
- err = check_blocks(test, &mm, &blocks, total, false);
-
- drm_buddy_free_list(&mm, &blocks);
-
- if (!err) {
- KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm),
- "post-mm check failed\n");
- }
-
- if (err || timeout)
- break;
-
- cond_resched();
- }
-
- kfree(order);
- drm_buddy_fini(&mm);
-}
-
static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
{
u64 mm_size, size, start = 0;
@@ -634,64 +240,6 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
drm_buddy_fini(&mm);
}
-static void drm_test_buddy_alloc_range(struct kunit *test)
-{
- unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
- u64 offset, size, rem, chunk_size, end;
- unsigned long page_num;
- struct drm_buddy mm;
- LIST_HEAD(blocks);
-
- mm_config(&size, &chunk_size);
-
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size),
- "buddy_init failed");
-
- KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
- "pre-mm check failed, abort!");
-
- rem = mm.size;
- offset = 0;
-
- for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
- struct drm_buddy_block *block;
- LIST_HEAD(tmp);
-
- size = min(page_num * mm.chunk_size, rem);
- end = offset + size;
-
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, offset, end,
- size, mm.chunk_size,
- &tmp, flags),
- "alloc_range with offset=%llx, size=%llx failed\n", offset, size);
-
- block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
- KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_range has no blocks\n");
-
- KUNIT_ASSERT_EQ_MSG(test, drm_buddy_block_offset(block), offset,
- "alloc_range start offset mismatch, found=%llx, expected=%llx\n",
- drm_buddy_block_offset(block), offset);
-
- KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true));
-
- list_splice_tail(&tmp, &blocks);
-
- offset += size;
-
- rem -= size;
- if (!rem)
- break;
-
- cond_resched();
- }
-
- drm_buddy_free_list(&mm, &blocks);
-
- KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n");
-
- drm_buddy_fini(&mm);
-}
-
static void drm_test_buddy_alloc_limit(struct kunit *test)
{
u64 size = U64_MAX, start = 0;
@@ -727,29 +275,16 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_fini(&mm);
}
-static int drm_buddy_suite_init(struct kunit_suite *suite)
-{
- while (!random_seed)
- random_seed = get_random_u32();
-
- kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", random_seed);
-
- return 0;
-}
-
static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_limit),
- KUNIT_CASE(drm_test_buddy_alloc_range),
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
- KUNIT_CASE(drm_test_buddy_alloc_smoke),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
{}
};
static struct kunit_suite drm_buddy_test_suite = {
.name = "drm_buddy",
- .suite_init = drm_buddy_suite_init,
.test_cases = drm_buddy_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
index 545beea33e8c..d916e548fcb1 100644
--- a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
@@ -42,13 +42,13 @@ static const struct drm_dp_mst_calc_pbn_mode_test drm_dp_mst_calc_pbn_mode_cases
.clock = 332880,
.bpp = 24,
.dsc = true,
- .expected = 50
+ .expected = 1191
},
{
.clock = 324540,
.bpp = 24,
.dsc = true,
- .expected = 49
+ .expected = 1161
},
};
@@ -56,7 +56,7 @@ static void drm_test_dp_mst_calc_pbn_mode(struct kunit *test)
{
const struct drm_dp_mst_calc_pbn_mode_test *params = test->param_value;
- KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp, params->dsc),
+ KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp << 4),
params->expected);
}
@@ -68,6 +68,152 @@ static void dp_mst_calc_pbn_mode_desc(const struct drm_dp_mst_calc_pbn_mode_test
KUNIT_ARRAY_PARAM(drm_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_cases,
dp_mst_calc_pbn_mode_desc);
+struct drm_dp_mst_calc_pbn_div_test {
+ int link_rate;
+ int lane_count;
+ fixed20_12 expected;
+};
+
+#define fp_init(__int, __frac) { \
+ .full = (__int) * (1 << 12) + \
+ (__frac) * (1 << 12) / 100000 \
+}
+
+static const struct drm_dp_mst_calc_pbn_div_test drm_dp_mst_calc_pbn_div_dp1_4_cases[] = {
+ /*
+ * UHBR rates (DP Standard v2.1 2.7.6.3, specifying the rounded to
+ * closest value to 2 decimal places):
+ * .expected = .link_rate * .lane_count * 0.9671 / 8 / 54 / 100
+ * DP1.4 rates (DP Standard v2.1 2.6.4.2):
+ * .expected = .link_rate * .lane_count * 0.8000 / 8 / 54 / 100
+ *
+ * truncated to 5 decimal places.
+ */
+ {
+ .link_rate = 2000000,
+ .lane_count = 4,
+ .expected = fp_init(179, 9259), /* 179.09259 */
+ },
+ {
+ .link_rate = 2000000,
+ .lane_count = 2,
+ .expected = fp_init(89, 54629),
+ },
+ {
+ .link_rate = 2000000,
+ .lane_count = 1,
+ .expected = fp_init(44, 77314),
+ },
+ {
+ .link_rate = 1350000,
+ .lane_count = 4,
+ .expected = fp_init(120, 88750),
+ },
+ {
+ .link_rate = 1350000,
+ .lane_count = 2,
+ .expected = fp_init(60, 44375),
+ },
+ {
+ .link_rate = 1350000,
+ .lane_count = 1,
+ .expected = fp_init(30, 22187),
+ },
+ {
+ .link_rate = 1000000,
+ .lane_count = 4,
+ .expected = fp_init(89, 54629),
+ },
+ {
+ .link_rate = 1000000,
+ .lane_count = 2,
+ .expected = fp_init(44, 77314),
+ },
+ {
+ .link_rate = 1000000,
+ .lane_count = 1,
+ .expected = fp_init(22, 38657),
+ },
+ {
+ .link_rate = 810000,
+ .lane_count = 4,
+ .expected = fp_init(60, 0),
+ },
+ {
+ .link_rate = 810000,
+ .lane_count = 2,
+ .expected = fp_init(30, 0),
+ },
+ {
+ .link_rate = 810000,
+ .lane_count = 1,
+ .expected = fp_init(15, 0),
+ },
+ {
+ .link_rate = 540000,
+ .lane_count = 4,
+ .expected = fp_init(40, 0),
+ },
+ {
+ .link_rate = 540000,
+ .lane_count = 2,
+ .expected = fp_init(20, 0),
+ },
+ {
+ .link_rate = 540000,
+ .lane_count = 1,
+ .expected = fp_init(10, 0),
+ },
+ {
+ .link_rate = 270000,
+ .lane_count = 4,
+ .expected = fp_init(20, 0),
+ },
+ {
+ .link_rate = 270000,
+ .lane_count = 2,
+ .expected = fp_init(10, 0),
+ },
+ {
+ .link_rate = 270000,
+ .lane_count = 1,
+ .expected = fp_init(5, 0),
+ },
+ {
+ .link_rate = 162000,
+ .lane_count = 4,
+ .expected = fp_init(12, 0),
+ },
+ {
+ .link_rate = 162000,
+ .lane_count = 2,
+ .expected = fp_init(6, 0),
+ },
+ {
+ .link_rate = 162000,
+ .lane_count = 1,
+ .expected = fp_init(3, 0),
+ },
+};
+
+static void drm_test_dp_mst_calc_pbn_div(struct kunit *test)
+{
+ const struct drm_dp_mst_calc_pbn_div_test *params = test->param_value;
+ /* mgr->dev is only needed by drm_dbg_kms(), but it's not called for the test cases. */
+ struct drm_dp_mst_topology_mgr *mgr = test->priv;
+
+ KUNIT_EXPECT_EQ(test, drm_dp_get_vc_payload_bw(mgr, params->link_rate, params->lane_count).full,
+ params->expected.full);
+}
+
+static void dp_mst_calc_pbn_div_desc(const struct drm_dp_mst_calc_pbn_div_test *t, char *desc)
+{
+ sprintf(desc, "Link rate %d lane count %d", t->link_rate, t->lane_count);
+}
+
+KUNIT_ARRAY_PARAM(drm_dp_mst_calc_pbn_div, drm_dp_mst_calc_pbn_div_dp1_4_cases,
+ dp_mst_calc_pbn_div_desc);
+
static u8 data[] = { 0xff, 0x00, 0xdd };
struct drm_dp_mst_sideband_msg_req_test {
@@ -416,13 +562,27 @@ KUNIT_ARRAY_PARAM(drm_dp_mst_sideband_msg_req, drm_dp_mst_sideband_msg_req_cases
static struct kunit_case drm_dp_mst_helper_tests[] = {
KUNIT_CASE_PARAM(drm_test_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_gen_params),
+ KUNIT_CASE_PARAM(drm_test_dp_mst_calc_pbn_div, drm_dp_mst_calc_pbn_div_gen_params),
KUNIT_CASE_PARAM(drm_test_dp_mst_sideband_msg_req_decode,
drm_dp_mst_sideband_msg_req_gen_params),
{ }
};
+static int drm_dp_mst_helper_tests_init(struct kunit *test)
+{
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ mgr = kunit_kzalloc(test, sizeof(*mgr), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mgr);
+
+ test->priv = mgr;
+
+ return 0;
+}
+
static struct kunit_suite drm_dp_mst_helper_test_suite = {
.name = "drm_dp_mst_helper",
+ .init = drm_dp_mst_helper_tests_init,
.test_cases = drm_dp_mst_helper_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index f6408e56f786..08992636ec05 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -20,6 +20,10 @@
#define TEST_USE_DEFAULT_PITCH 0
+static unsigned char fmtcnv_state_mem[PAGE_SIZE];
+static struct drm_format_conv_state fmtcnv_state =
+ DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(fmtcnv_state_mem, sizeof(fmtcnv_state_mem));
+
struct convert_to_gray8_result {
unsigned int dst_pitch;
const u8 expected[TEST_BUF_SIZE];
@@ -630,8 +634,7 @@ static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_gray8(&dst, dst_pitch, &src, &fb, &params->clip);
-
+ drm_fb_xrgb8888_to_gray8(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -664,7 +667,7 @@ static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_rgb332(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_rgb332(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -697,12 +700,14 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip, false);
+ drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip,
+ &fmtcnv_state, false);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */
- drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, true);
+ drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip,
+ &fmtcnv_state, true);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size);
@@ -711,7 +716,8 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
@@ -748,7 +754,7 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -757,7 +763,8 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
@@ -794,7 +801,7 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -803,7 +810,8 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
@@ -840,7 +848,7 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -849,7 +857,8 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
@@ -890,7 +899,7 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */
@@ -898,7 +907,8 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, &params->clip,
+ &fmtcnv_state);
KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -933,7 +943,7 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -942,7 +952,8 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
@@ -979,7 +990,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -989,7 +1000,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
int blit_result = 0;
blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb,
- &params->clip);
+ &params->clip, &fmtcnv_state);
KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -1024,7 +1035,7 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -1034,7 +1045,7 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
int blit_result = 0;
blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb,
- &params->clip);
+ &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
@@ -1071,7 +1082,7 @@ static void drm_test_fb_xrgb8888_to_mono(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_mono(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_mono(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1104,7 +1115,7 @@ static void drm_test_fb_swab(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false);
+ drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -1114,7 +1125,7 @@ static void drm_test_fb_swab(struct kunit *test)
int blit_result;
blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN,
- &src, &fb, &params->clip);
+ &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_FALSE(test, blit_result);
@@ -1123,7 +1134,8 @@ static void drm_test_fb_swab(struct kunit *test)
buf = dst.vaddr;
memset(buf, 0, dst_size);
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_FALSE(test, blit_result);
@@ -1137,7 +1149,8 @@ static void drm_test_fb_swab(struct kunit *test)
mock_format.format |= DRM_FORMAT_BIG_ENDIAN;
fb.format = &mock_format;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_FALSE(test, blit_result);
@@ -1175,7 +1188,8 @@ static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
@@ -1214,7 +1228,8 @@ static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
@@ -1817,7 +1832,8 @@ static void drm_test_fb_memcpy(struct kunit *test)
int blit_result;
- blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, &params->clip);
+ blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, &params->clip,
+ &fmtcnv_state);
KUNIT_EXPECT_FALSE(test, blit_result);
for (size_t i = 0; i < fb.format->num_planes; i++) {
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
new file mode 100644
index 000000000000..91202e40cde9
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test suite for GEM objects backed by shmem buffers
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/iosys-map.h>
+#include <linux/sizes.h>
+
+#include <kunit/test.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_kunit_helpers.h>
+
+#define TEST_SIZE SZ_1M
+#define TEST_BYTE 0xae
+
+/*
+ * Wrappers to avoid an explicit type casting when passing action
+ * functions to kunit_add_action().
+ */
+static void kfree_wrapper(void *ptr)
+{
+ const void *obj = ptr;
+
+ kfree(obj);
+}
+
+static void sg_free_table_wrapper(void *ptr)
+{
+ struct sg_table *sgt = ptr;
+
+ sg_free_table(sgt);
+}
+
+static void drm_gem_shmem_free_wrapper(void *ptr)
+{
+ struct drm_gem_shmem_object *shmem = ptr;
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test creating a shmem GEM object backed by shmem buffer. The test
+ * case succeeds if the GEM object is successfully allocated with the
+ * shmem file node and object functions attributes set, and the size
+ * attribute is equal to the correct size.
+ */
+static void drm_gem_shmem_test_obj_create(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
+ KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
+ KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test creating a shmem GEM object from a scatter/gather table exported
+ * via a DMA-BUF. The test case succeed if the GEM object is successfully
+ * created with the shmem file node attribute equal to NULL and the sgt
+ * attribute pointing to the scatter/gather table that has been imported.
+ */
+static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *gem_obj;
+ struct dma_buf buf_mock;
+ struct dma_buf_attachment attach_mock;
+ struct sg_table *sgt;
+ char *buf;
+ int ret;
+
+ /* Create a mock scatter/gather table */
+ buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, buf);
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, sgt);
+
+ ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sg_init_one(sgt->sgl, buf, TEST_SIZE);
+
+ /* Init a mock DMA-BUF */
+ buf_mock.size = TEST_SIZE;
+ attach_mock.dmabuf = &buf_mock;
+
+ gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
+ KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
+ KUNIT_EXPECT_NULL(test, gem_obj->filp);
+ KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ kunit_remove_action(test, sg_free_table_wrapper, sgt);
+ kunit_remove_action(test, kfree_wrapper, sgt);
+
+ shmem = to_drm_gem_shmem_obj(gem_obj);
+ KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test pinning backing pages for a shmem GEM object. The test case
+ * succeeds if a suitable number of backing pages are allocated, and
+ * the pages table counter attribute is increased by one.
+ */
+static void drm_gem_shmem_test_pin_pages(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ int i, ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_pin(shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+
+ for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
+
+ drm_gem_shmem_unpin(shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+}
+
+/*
+ * Test creating a virtual mapping for a shmem GEM object. The test
+ * case succeeds if the backing memory is mapped and the reference
+ * counter for virtual mapping is increased by one. Moreover, the test
+ * case writes and then reads a test pattern over the mapped memory.
+ */
+static void drm_gem_shmem_test_vmap(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct iosys_map map;
+ int ret, i;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_NULL(test, shmem->vaddr);
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_vmap(shmem, &map);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
+ KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+
+ iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
+ for (i = 0; i < TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
+
+ drm_gem_shmem_vunmap(shmem, &map);
+ KUNIT_EXPECT_NULL(test, shmem->vaddr);
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+}
+
+/*
+ * Test exporting a scatter/gather table of pinned pages suitable for
+ * PRIME usage from a shmem GEM object. The test case succeeds if a
+ * scatter/gather table large enough to accommodate the backing memory
+ * is successfully exported.
+ */
+static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int si, len = 0;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_pin(shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sgt = drm_gem_shmem_get_sg_table(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ KUNIT_EXPECT_NULL(test, shmem->sgt);
+
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ for_each_sgtable_sg(sgt, sg, si) {
+ KUNIT_EXPECT_NOT_NULL(test, sg);
+ len += sg->length;
+ }
+
+ KUNIT_EXPECT_GE(test, len, TEST_SIZE);
+}
+
+/*
+ * Test pinning pages and exporting a scatter/gather table suitable for
+ * driver usage from a shmem GEM object. The test case succeeds if the
+ * backing pages are pinned and a scatter/gather table large enough to
+ * accommodate the backing memory is successfully exported.
+ */
+static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int si, ret, len = 0;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
+
+ for_each_sgtable_sg(sgt, sg, si) {
+ KUNIT_EXPECT_NOT_NULL(test, sg);
+ len += sg->length;
+ }
+
+ KUNIT_EXPECT_GE(test, len, TEST_SIZE);
+}
+
+/*
+ * Test updating the madvise state of a shmem GEM object. The test
+ * case checks that the function for setting madv updates it only if
+ * its current value is greater or equal than zero and returns false
+ * if it has a negative value.
+ */
+static void drm_gem_shmem_test_madvise(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_ASSERT_EQ(test, shmem->madv, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_madvise(shmem, 1);
+ KUNIT_EXPECT_TRUE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, 1);
+
+ /* Set madv to a negative value */
+ ret = drm_gem_shmem_madvise(shmem, -1);
+ KUNIT_EXPECT_FALSE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, -1);
+
+ /* Check that madv cannot be set back to a positive value */
+ ret = drm_gem_shmem_madvise(shmem, 0);
+ KUNIT_EXPECT_FALSE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, -1);
+}
+
+/*
+ * Test purging a shmem GEM object. First, assert that a newly created
+ * shmem GEM object is not purgeable. Then, set madvise to a positive
+ * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
+ * backing pages. Finally, assert that the shmem GEM object is now
+ * purgeable and purge it.
+ */
+static void drm_gem_shmem_test_purge(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_is_purgeable(shmem);
+ KUNIT_EXPECT_FALSE(test, ret);
+
+ ret = drm_gem_shmem_madvise(shmem, 1);
+ KUNIT_EXPECT_TRUE(test, ret);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+
+ ret = drm_gem_shmem_is_purgeable(shmem);
+ KUNIT_EXPECT_TRUE(test, ret);
+
+ drm_gem_shmem_purge(shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_NULL(test, shmem->sgt);
+ KUNIT_EXPECT_EQ(test, shmem->madv, -1);
+}
+
+static int drm_gem_shmem_test_init(struct kunit *test)
+{
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ /* Allocate a parent device */
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /*
+ * The DRM core will automatically initialize the GEM core and create
+ * a DRM Memory Manager object which provides an address space pool
+ * for GEM objects allocation.
+ */
+ drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
+ 0, DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
+
+ test->priv = drm_dev;
+
+ return 0;
+}
+
+static struct kunit_case drm_gem_shmem_test_cases[] = {
+ KUNIT_CASE(drm_gem_shmem_test_obj_create),
+ KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
+ KUNIT_CASE(drm_gem_shmem_test_pin_pages),
+ KUNIT_CASE(drm_gem_shmem_test_vmap),
+ KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
+ KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
+ KUNIT_CASE(drm_gem_shmem_test_madvise),
+ KUNIT_CASE(drm_gem_shmem_test_purge),
+ {}
+};
+
+static struct kunit_suite drm_gem_shmem_suite = {
+ .name = "drm_gem_shmem",
+ .init = drm_gem_shmem_test_init,
+ .test_cases = drm_gem_shmem_test_cases
+};
+
+kunit_test_suite(drm_gem_shmem_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 05d5e7af6d25..4e9247cf9977 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -17,10 +17,6 @@
#include "../lib/drm_random.h"
-static unsigned int random_seed;
-static unsigned int max_iterations = 8192;
-static unsigned int max_prime = 128;
-
enum {
BEST,
BOTTOMUP,
@@ -37,10 +33,6 @@ static const struct insert_mode {
[TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
[EVICT] = { "evict", DRM_MM_INSERT_EVICT },
{}
-}, evict_modes[] = {
- { "bottom-up", DRM_MM_INSERT_LOW },
- { "top-down", DRM_MM_INSERT_HIGH },
- {}
};
static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm)
@@ -97,57 +89,6 @@ static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 sta
return ok;
}
-static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size)
-{
- struct drm_mm_node *node, *check, *found;
- unsigned long n;
- u64 addr;
-
- if (!assert_no_holes(test, mm))
- return false;
-
- n = 0;
- addr = 0;
- drm_mm_for_each_node(node, mm) {
- if (node->start != addr) {
- KUNIT_FAIL(test, "node[%ld] list out of order, expected %llx found %llx\n",
- n, addr, node->start);
- return false;
- }
-
- if (node->size != size) {
- KUNIT_FAIL(test, "node[%ld].size incorrect, expected %llx, found %llx\n",
- n, size, node->size);
- return false;
- }
-
- if (drm_mm_hole_follows(node)) {
- KUNIT_FAIL(test, "node[%ld] is followed by a hole!\n", n);
- return false;
- }
-
- found = NULL;
- drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
- if (node != check) {
- KUNIT_FAIL(test,
- "lookup return wrong node, expected start %llx, found %llx\n",
- node->start, check->start);
- return false;
- }
- found = check;
- }
- if (!found) {
- KUNIT_FAIL(test, "lookup failed for node %llx + %llx\n", addr, size);
- return false;
- }
-
- addr += size;
- n++;
- }
-
- return true;
-}
-
static u64 misalignment(struct drm_mm_node *node, u64 alignment)
{
u64 rem;
@@ -270,215 +211,6 @@ static void drm_test_mm_debug(struct kunit *test)
nodes[0].start, nodes[0].size);
}
-static struct drm_mm_node *set_node(struct drm_mm_node *node,
- u64 start, u64 size)
-{
- node->start = start;
- node->size = size;
- return node;
-}
-
-static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node)
-{
- int err;
-
- err = drm_mm_reserve_node(mm, node);
- if (likely(err == -ENOSPC))
- return true;
-
- if (!err) {
- KUNIT_FAIL(test, "impossible reserve succeeded, node %llu + %llu\n",
- node->start, node->size);
- drm_mm_remove_node(node);
- } else {
- KUNIT_FAIL(test,
- "impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
- err, -ENOSPC, node->start, node->size);
- }
- return false;
-}
-
-static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
- unsigned int count,
- u64 size)
-{
- const struct boundary {
- u64 start, size;
- const char *name;
- } boundaries[] = {
-#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
- B(0, 0),
- B(-size, 0),
- B(size, 0),
- B(size * count, 0),
- B(-size, size),
- B(-size, -size),
- B(-size, 2 * size),
- B(0, -size),
- B(size, -size),
- B(count * size, size),
- B(count * size, -size),
- B(count * size, count * size),
- B(count * size, -count * size),
- B(count * size, -(count + 1) * size),
- B((count + 1) * size, size),
- B((count + 1) * size, -size),
- B((count + 1) * size, -2 * size),
-#undef B
- };
- struct drm_mm_node tmp = {};
- int n;
-
- for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
- if (!expect_reserve_fail(test, mm, set_node(&tmp, boundaries[n].start,
- boundaries[n].size))) {
- KUNIT_FAIL(test, "boundary[%d:%s] failed, count=%u, size=%lld\n",
- n, boundaries[n].name, count, size);
- return false;
- }
- }
-
- return true;
-}
-
-static int __drm_test_mm_reserve(struct kunit *test, unsigned int count, u64 size)
-{
- DRM_RND_STATE(prng, random_seed);
- struct drm_mm mm;
- struct drm_mm_node tmp, *nodes, *node, *next;
- unsigned int *order, n, m, o = 0;
- int ret, err;
-
- /* For exercising drm_mm_reserve_node(), we want to check that
- * reservations outside of the drm_mm range are rejected, and to
- * overlapping and otherwise already occupied ranges. Afterwards,
- * the tree and nodes should be intact.
- */
-
- DRM_MM_BUG_ON(!count);
- DRM_MM_BUG_ON(!size);
-
- ret = -ENOMEM;
- order = drm_random_order(count, &prng);
- if (!order)
- goto err;
-
- nodes = vzalloc(array_size(count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- ret = -EINVAL;
- drm_mm_init(&mm, 0, count * size);
-
- if (!check_reserve_boundaries(test, &mm, count, size))
- goto out;
-
- for (n = 0; n < count; n++) {
- nodes[n].start = order[n] * size;
- nodes[n].size = size;
-
- err = drm_mm_reserve_node(&mm, &nodes[n]);
- if (err) {
- KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
- n, nodes[n].start);
- ret = err;
- goto out;
- }
-
- if (!drm_mm_node_allocated(&nodes[n])) {
- KUNIT_FAIL(test, "reserved node not allocated! step %d, start %llu\n",
- n, nodes[n].start);
- goto out;
- }
-
- if (!expect_reserve_fail(test, &mm, &nodes[n]))
- goto out;
- }
-
- /* After random insertion the nodes should be in order */
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- /* Repeated use should then fail */
- drm_random_reorder(order, count, &prng);
- for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(test, &mm, set_node(&tmp, order[n] * size, 1)))
- goto out;
-
- /* Remove and reinsert should work */
- drm_mm_remove_node(&nodes[order[n]]);
- err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
- if (err) {
- KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
- n, nodes[n].start);
- ret = err;
- goto out;
- }
- }
-
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- /* Overlapping use should then fail */
- for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(test, &mm, set_node(&tmp, 0, size * count)))
- goto out;
- }
- for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(test, &mm, set_node(&tmp, size * n, size * (count - n))))
- goto out;
- }
-
- /* Remove several, reinsert, check full */
- for_each_prime_number(n, min(max_prime, count)) {
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- drm_mm_remove_node(node);
- }
-
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- err = drm_mm_reserve_node(&mm, node);
- if (err) {
- KUNIT_FAIL(test, "reserve failed, step %d/%d, start %llu\n",
- m, n, node->start);
- ret = err;
- goto out;
- }
- }
-
- o += n;
-
- if (!assert_continuous(test, &mm, size))
- goto out;
- }
-
- ret = 0;
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- vfree(nodes);
- kfree(order);
-err:
- return ret;
-}
-
-static void drm_test_mm_reserve(struct kunit *test)
-{
- const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
- int n;
-
- for_each_prime_number_from(n, 1, 54) {
- u64 size = BIT_ULL(n);
-
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1));
-
- cond_resched();
- }
-}
-
static bool expect_insert(struct kunit *test, struct drm_mm *mm,
struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color,
const struct insert_mode *mode)
@@ -503,600 +235,6 @@ static bool expect_insert(struct kunit *test, struct drm_mm *mm,
return true;
}
-static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size)
-{
- struct drm_mm_node tmp = {};
- int err;
-
- err = drm_mm_insert_node(mm, &tmp, size);
- if (likely(err == -ENOSPC))
- return true;
-
- if (!err) {
- KUNIT_FAIL(test, "impossible insert succeeded, node %llu + %llu\n",
- tmp.start, tmp.size);
- drm_mm_remove_node(&tmp);
- } else {
- KUNIT_FAIL(test,
- "impossible insert failed with wrong error %d [expected %d], size %llu\n",
- err, -ENOSPC, size);
- }
- return false;
-}
-
-static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size, bool replace)
-{
- DRM_RND_STATE(prng, random_seed);
- const struct insert_mode *mode;
- struct drm_mm mm;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int *order, n, m, o = 0;
- int ret;
-
- /* Fill a range with lots of nodes, check it doesn't fail too early */
-
- DRM_MM_BUG_ON(!count);
- DRM_MM_BUG_ON(!size);
-
- ret = -ENOMEM;
- nodes = vmalloc(array_size(count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- order = drm_random_order(count, &prng);
- if (!order)
- goto err_nodes;
-
- ret = -EINVAL;
- drm_mm_init(&mm, 0, count * size);
-
- for (mode = insert_modes; mode->name; mode++) {
- for (n = 0; n < count; n++) {
- struct drm_mm_node tmp;
-
- node = replace ? &tmp : &nodes[n];
- memset(node, 0, sizeof(*node));
- if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
- KUNIT_FAIL(test, "%s insert failed, size %llu step %d\n",
- mode->name, size, n);
- goto out;
- }
-
- if (replace) {
- drm_mm_replace_node(&tmp, &nodes[n]);
- if (drm_mm_node_allocated(&tmp)) {
- KUNIT_FAIL(test,
- "replaced old-node still allocated! step %d\n",
- n);
- goto out;
- }
-
- if (!assert_node(test, &nodes[n], &mm, size, 0, n)) {
- KUNIT_FAIL(test,
- "replaced node did not inherit parameters, size %llu step %d\n",
- size, n);
- goto out;
- }
-
- if (tmp.start != nodes[n].start) {
- KUNIT_FAIL(test,
- "replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
- tmp.start, size, nodes[n].start, nodes[n].size);
- goto out;
- }
- }
- }
-
- /* After random insertion the nodes should be in order */
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- /* Repeated use should then fail */
- if (!expect_insert_fail(test, &mm, size))
- goto out;
-
- /* Remove one and reinsert, as the only hole it should refill itself */
- for (n = 0; n < count; n++) {
- u64 addr = nodes[n].start;
-
- drm_mm_remove_node(&nodes[n]);
- if (!expect_insert(test, &mm, &nodes[n], size, 0, n, mode)) {
- KUNIT_FAIL(test, "%s reinsert failed, size %llu step %d\n",
- mode->name, size, n);
- goto out;
- }
-
- if (nodes[n].start != addr) {
- KUNIT_FAIL(test,
- "%s reinsert node moved, step %d, expected %llx, found %llx\n",
- mode->name, n, addr, nodes[n].start);
- goto out;
- }
-
- if (!assert_continuous(test, &mm, size))
- goto out;
- }
-
- /* Remove several, reinsert, check full */
- for_each_prime_number(n, min(max_prime, count)) {
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- drm_mm_remove_node(node);
- }
-
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
- KUNIT_FAIL(test,
- "%s multiple reinsert failed, size %llu step %d\n",
- mode->name, size, n);
- goto out;
- }
- }
-
- o += n;
-
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- if (!expect_insert_fail(test, &mm, size))
- goto out;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- DRM_MM_BUG_ON(!drm_mm_clean(&mm));
-
- cond_resched();
- }
-
- ret = 0;
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_nodes:
- vfree(nodes);
- return ret;
-}
-
-static void drm_test_mm_insert(struct kunit *test)
-{
- const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
- unsigned int n;
-
- for_each_prime_number_from(n, 1, 54) {
- u64 size = BIT_ULL(n);
-
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false));
-
- cond_resched();
- }
-}
-
-static void drm_test_mm_replace(struct kunit *test)
-{
- const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
- unsigned int n;
-
- /* Reuse __drm_test_mm_insert to exercise replacement by inserting a dummy node,
- * then replacing it with the intended node. We want to check that
- * the tree is intact and all the information we need is carried
- * across to the target node.
- */
-
- for_each_prime_number_from(n, 1, 54) {
- u64 size = BIT_ULL(n);
-
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true));
-
- cond_resched();
- }
-}
-
-static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node,
- u64 size, u64 alignment, unsigned long color,
- u64 range_start, u64 range_end, const struct insert_mode *mode)
-{
- int err;
-
- err = drm_mm_insert_node_in_range(mm, node,
- size, alignment, color,
- range_start, range_end,
- mode->mode);
- if (err) {
- KUNIT_FAIL(test,
- "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
- size, alignment, color, mode->name,
- range_start, range_end, err);
- return false;
- }
-
- if (!assert_node(test, node, mm, size, alignment, color)) {
- drm_mm_remove_node(node);
- return false;
- }
-
- return true;
-}
-
-static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm,
- u64 size, u64 range_start, u64 range_end)
-{
- struct drm_mm_node tmp = {};
- int err;
-
- err = drm_mm_insert_node_in_range(mm, &tmp, size, 0, 0, range_start, range_end,
- 0);
- if (likely(err == -ENOSPC))
- return true;
-
- if (!err) {
- KUNIT_FAIL(test,
- "impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
- tmp.start, tmp.size, range_start, range_end);
- drm_mm_remove_node(&tmp);
- } else {
- KUNIT_FAIL(test,
- "impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
- err, -ENOSPC, size, range_start, range_end);
- }
-
- return false;
-}
-
-static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm,
- u64 size, u64 start, u64 end)
-{
- struct drm_mm_node *node;
- unsigned int n;
-
- if (!expect_insert_in_range_fail(test, mm, size, start, end))
- return false;
-
- n = div64_u64(start + size - 1, size);
- drm_mm_for_each_node(node, mm) {
- if (node->start < start || node->start + node->size > end) {
- KUNIT_FAIL(test,
- "node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
- n, node->start, node->start + node->size, start, end);
- return false;
- }
-
- if (node->start != n * size) {
- KUNIT_FAIL(test, "node %d out of order, expected start %llx, found %llx\n",
- n, n * size, node->start);
- return false;
- }
-
- if (node->size != size) {
- KUNIT_FAIL(test, "node %d has wrong size, expected size %llx, found %llx\n",
- n, size, node->size);
- return false;
- }
-
- if (drm_mm_hole_follows(node) && drm_mm_hole_node_end(node) < end) {
- KUNIT_FAIL(test, "node %d is followed by a hole!\n", n);
- return false;
- }
-
- n++;
- }
-
- if (start > 0) {
- node = __drm_mm_interval_first(mm, 0, start - 1);
- if (drm_mm_node_allocated(node)) {
- KUNIT_FAIL(test, "node before start: node=%llx+%llu, start=%llx\n",
- node->start, node->size, start);
- return false;
- }
- }
-
- if (end < U64_MAX) {
- node = __drm_mm_interval_first(mm, end, U64_MAX);
- if (drm_mm_node_allocated(node)) {
- KUNIT_FAIL(test, "node after end: node=%llx+%llu, end=%llx\n",
- node->start, node->size, end);
- return false;
- }
- }
-
- return true;
-}
-
-static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u64 size,
- u64 start, u64 end)
-{
- const struct insert_mode *mode;
- struct drm_mm mm;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int n, start_n, end_n;
- int ret;
-
- DRM_MM_BUG_ON(!count);
- DRM_MM_BUG_ON(!size);
- DRM_MM_BUG_ON(end <= start);
-
- /* Very similar to __drm_test_mm_insert(), but now instead of populating the
- * full range of the drm_mm, we try to fill a small portion of it.
- */
-
- ret = -ENOMEM;
- nodes = vzalloc(array_size(count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- ret = -EINVAL;
- drm_mm_init(&mm, 0, count * size);
-
- start_n = div64_u64(start + size - 1, size);
- end_n = div64_u64(end - size, size);
-
- for (mode = insert_modes; mode->name; mode++) {
- for (n = start_n; n <= end_n; n++) {
- if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
- start, end, mode)) {
- KUNIT_FAIL(test,
- "%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
- mode->name, size, n, start_n, end_n, start, end);
- goto out;
- }
- }
-
- if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
- KUNIT_FAIL(test,
- "%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
- mode->name, start, end, size);
- goto out;
- }
-
- /* Remove one and reinsert, it should refill itself */
- for (n = start_n; n <= end_n; n++) {
- u64 addr = nodes[n].start;
-
- drm_mm_remove_node(&nodes[n]);
- if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
- start, end, mode)) {
- KUNIT_FAIL(test, "%s reinsert failed, step %d\n", mode->name, n);
- goto out;
- }
-
- if (nodes[n].start != addr) {
- KUNIT_FAIL(test,
- "%s reinsert node moved, step %d, expected %llx, found %llx\n",
- mode->name, n, addr, nodes[n].start);
- goto out;
- }
- }
-
- if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
- KUNIT_FAIL(test,
- "%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
- mode->name, start, end, size);
- goto out;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- DRM_MM_BUG_ON(!drm_mm_clean(&mm));
-
- cond_resched();
- }
-
- ret = 0;
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- vfree(nodes);
- return ret;
-}
-
-static int insert_outside_range(struct kunit *test)
-{
- struct drm_mm mm;
- const unsigned int start = 1024;
- const unsigned int end = 2048;
- const unsigned int size = end - start;
-
- drm_mm_init(&mm, start, size);
-
- if (!expect_insert_in_range_fail(test, &mm, 1, 0, start))
- return -EINVAL;
-
- if (!expect_insert_in_range_fail(test, &mm, size,
- start - size / 2, start + (size + 1) / 2))
- return -EINVAL;
-
- if (!expect_insert_in_range_fail(test, &mm, size,
- end - (size + 1) / 2, end + size / 2))
- return -EINVAL;
-
- if (!expect_insert_in_range_fail(test, &mm, 1, end, end + size))
- return -EINVAL;
-
- drm_mm_takedown(&mm);
- return 0;
-}
-
-static void drm_test_mm_insert_range(struct kunit *test)
-{
- const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
- unsigned int n;
-
- /* Check that requests outside the bounds of drm_mm are rejected. */
- KUNIT_ASSERT_FALSE(test, insert_outside_range(test));
-
- for_each_prime_number_from(n, 1, 50) {
- const u64 size = BIT_ULL(n);
- const u64 max = count * size;
-
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 1, max));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
- max / 2, max));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
- max / 4 + 1, 3 * max / 4 - 1));
-
- cond_resched();
- }
-}
-
-static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes,
- unsigned int num_insert, const struct insert_mode *mode)
-{
- unsigned int size = 4096;
- unsigned int i;
-
- for (i = 0; i < num_insert; i++) {
- if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
- KUNIT_FAIL(test, "%s insert failed\n", mode->name);
- return -EINVAL;
- }
- }
-
- /* introduce fragmentation by freeing every other node */
- for (i = 0; i < num_insert; i++) {
- if (i % 2 == 0)
- drm_mm_remove_node(&nodes[i]);
- }
-
- return 0;
-}
-
-static u64 get_insert_time(struct kunit *test, struct drm_mm *mm,
- unsigned int num_insert, struct drm_mm_node *nodes,
- const struct insert_mode *mode)
-{
- unsigned int size = 8192;
- ktime_t start;
- unsigned int i;
-
- start = ktime_get();
- for (i = 0; i < num_insert; i++) {
- if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
- KUNIT_FAIL(test, "%s insert failed\n", mode->name);
- return 0;
- }
- }
-
- return ktime_to_ns(ktime_sub(ktime_get(), start));
-}
-
-static void drm_test_mm_frag(struct kunit *test)
-{
- struct drm_mm mm;
- const struct insert_mode *mode;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int insert_size = 10000;
- unsigned int scale_factor = 4;
-
- /* We need 4 * insert_size nodes to hold intermediate allocated
- * drm_mm nodes.
- * 1 times for prepare_frag()
- * 1 times for get_insert_time()
- * 2 times for get_insert_time()
- */
- nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- /* For BOTTOMUP and TOPDOWN, we first fragment the
- * address space using prepare_frag() and then try to verify
- * that insertions scale quadratically from 10k to 20k insertions
- */
- drm_mm_init(&mm, 1, U64_MAX - 2);
- for (mode = insert_modes; mode->name; mode++) {
- u64 insert_time1, insert_time2;
-
- if (mode->mode != DRM_MM_INSERT_LOW &&
- mode->mode != DRM_MM_INSERT_HIGH)
- continue;
-
- if (prepare_frag(test, &mm, nodes, insert_size, mode))
- goto err;
-
- insert_time1 = get_insert_time(test, &mm, insert_size,
- nodes + insert_size, mode);
- if (insert_time1 == 0)
- goto err;
-
- insert_time2 = get_insert_time(test, &mm, (insert_size * 2),
- nodes + insert_size * 2, mode);
- if (insert_time2 == 0)
- goto err;
-
- kunit_info(test, "%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
- mode->name, insert_size, insert_size * 2, insert_time1, insert_time2);
-
- if (insert_time2 > (scale_factor * insert_time1)) {
- KUNIT_FAIL(test, "%s fragmented insert took %llu nsecs more\n",
- mode->name, insert_time2 - (scale_factor * insert_time1));
- goto err;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- }
-
-err:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- vfree(nodes);
-}
-
-static void drm_test_mm_align(struct kunit *test)
-{
- const struct insert_mode *mode;
- const unsigned int max_count = min(8192u, max_prime);
- struct drm_mm mm;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int prime;
-
- /* For each of the possible insertion modes, we pick a few
- * arbitrary alignments and check that the inserted node
- * meets our requirements.
- */
-
- nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- drm_mm_init(&mm, 1, U64_MAX - 2);
-
- for (mode = insert_modes; mode->name; mode++) {
- unsigned int i = 0;
-
- for_each_prime_number_from(prime, 1, max_count) {
- u64 size = next_prime_number(prime);
-
- if (!expect_insert(test, &mm, &nodes[i], size, prime, i, mode)) {
- KUNIT_FAIL(test, "%s insert failed with alignment=%d",
- mode->name, prime);
- goto out;
- }
-
- i++;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- DRM_MM_BUG_ON(!drm_mm_clean(&mm));
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- vfree(nodes);
-}
-
static void drm_test_mm_align_pot(struct kunit *test, int max)
{
struct drm_mm mm;
@@ -1144,626 +282,6 @@ static void drm_test_mm_align64(struct kunit *test)
drm_test_mm_align_pot(test, 64);
}
-static void show_scan(struct kunit *test, const struct drm_mm_scan *scan)
-{
- kunit_info(test, "scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
- scan->hit_start, scan->hit_end, scan->size, scan->alignment, scan->color);
-}
-
-static void show_holes(struct kunit *test, const struct drm_mm *mm, int count)
-{
- u64 hole_start, hole_end;
- struct drm_mm_node *hole;
-
- drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- struct drm_mm_node *next = list_next_entry(hole, node_list);
- const char *node1 = NULL, *node2 = NULL;
-
- if (drm_mm_node_allocated(hole))
- node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ",
- hole->start, hole->size, hole->color);
-
- if (drm_mm_node_allocated(next))
- node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]",
- next->start, next->size, next->color);
-
- kunit_info(test, "%sHole [%llx - %llx, size %lld]%s\n", node1,
- hole_start, hole_end, hole_end - hole_start, node2);
-
- kfree(node2);
- kfree(node1);
-
- if (!--count)
- break;
- }
-}
-
-struct evict_node {
- struct drm_mm_node node;
- struct list_head link;
-};
-
-static bool evict_nodes(struct kunit *test, struct drm_mm_scan *scan,
- struct evict_node *nodes, unsigned int *order, unsigned int count,
- bool use_color, struct list_head *evict_list)
-{
- struct evict_node *e, *en;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- e = &nodes[order ? order[i] : i];
- list_add(&e->link, evict_list);
- if (drm_mm_scan_add_block(scan, &e->node))
- break;
- }
- list_for_each_entry_safe(e, en, evict_list, link) {
- if (!drm_mm_scan_remove_block(scan, &e->node))
- list_del(&e->link);
- }
- if (list_empty(evict_list)) {
- KUNIT_FAIL(test,
- "Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
- scan->size, count, scan->alignment, scan->color);
- return false;
- }
-
- list_for_each_entry(e, evict_list, link)
- drm_mm_remove_node(&e->node);
-
- if (use_color) {
- struct drm_mm_node *node;
-
- while ((node = drm_mm_scan_color_evict(scan))) {
- e = container_of(node, typeof(*e), node);
- drm_mm_remove_node(&e->node);
- list_add(&e->link, evict_list);
- }
- } else {
- if (drm_mm_scan_color_evict(scan)) {
- KUNIT_FAIL(test,
- "drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
- return false;
- }
- }
-
- return true;
-}
-
-static bool evict_nothing(struct kunit *test, struct drm_mm *mm,
- unsigned int total_size, struct evict_node *nodes)
-{
- struct drm_mm_scan scan;
- LIST_HEAD(evict_list);
- struct evict_node *e;
- struct drm_mm_node *node;
- unsigned int n;
-
- drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
- for (n = 0; n < total_size; n++) {
- e = &nodes[n];
- list_add(&e->link, &evict_list);
- drm_mm_scan_add_block(&scan, &e->node);
- }
- list_for_each_entry(e, &evict_list, link)
- drm_mm_scan_remove_block(&scan, &e->node);
-
- for (n = 0; n < total_size; n++) {
- e = &nodes[n];
-
- if (!drm_mm_node_allocated(&e->node)) {
- KUNIT_FAIL(test, "node[%d] no longer allocated!\n", n);
- return false;
- }
-
- e->link.next = NULL;
- }
-
- drm_mm_for_each_node(node, mm) {
- e = container_of(node, typeof(*e), node);
- e->link.next = &e->link;
- }
-
- for (n = 0; n < total_size; n++) {
- e = &nodes[n];
-
- if (!e->link.next) {
- KUNIT_FAIL(test, "node[%d] no longer connected!\n", n);
- return false;
- }
- }
-
- return assert_continuous(test, mm, nodes[0].node.size);
-}
-
-static bool evict_everything(struct kunit *test, struct drm_mm *mm,
- unsigned int total_size, struct evict_node *nodes)
-{
- struct drm_mm_scan scan;
- LIST_HEAD(evict_list);
- struct evict_node *e;
- unsigned int n;
- int err;
-
- drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
- for (n = 0; n < total_size; n++) {
- e = &nodes[n];
- list_add(&e->link, &evict_list);
- if (drm_mm_scan_add_block(&scan, &e->node))
- break;
- }
-
- err = 0;
- list_for_each_entry(e, &evict_list, link) {
- if (!drm_mm_scan_remove_block(&scan, &e->node)) {
- if (!err) {
- KUNIT_FAIL(test, "Node %lld not marked for eviction!\n",
- e->node.start);
- err = -EINVAL;
- }
- }
- }
- if (err)
- return false;
-
- list_for_each_entry(e, &evict_list, link)
- drm_mm_remove_node(&e->node);
-
- if (!assert_one_hole(test, mm, 0, total_size))
- return false;
-
- list_for_each_entry(e, &evict_list, link) {
- err = drm_mm_reserve_node(mm, &e->node);
- if (err) {
- KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
- return false;
- }
- }
-
- return assert_continuous(test, mm, nodes[0].node.size);
-}
-
-static int evict_something(struct kunit *test, struct drm_mm *mm,
- u64 range_start, u64 range_end, struct evict_node *nodes,
- unsigned int *order, unsigned int count, unsigned int size,
- unsigned int alignment, const struct insert_mode *mode)
-{
- struct drm_mm_scan scan;
- LIST_HEAD(evict_list);
- struct evict_node *e;
- struct drm_mm_node tmp;
- int err;
-
- drm_mm_scan_init_with_range(&scan, mm, size, alignment, 0, range_start,
- range_end, mode->mode);
- if (!evict_nodes(test, &scan, nodes, order, count, false, &evict_list))
- return -EINVAL;
-
- memset(&tmp, 0, sizeof(tmp));
- err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
- DRM_MM_INSERT_EVICT);
- if (err) {
- KUNIT_FAIL(test, "Failed to insert into eviction hole: size=%d, align=%d\n",
- size, alignment);
- show_scan(test, &scan);
- show_holes(test, mm, 3);
- return err;
- }
-
- if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
- KUNIT_FAIL(test,
- "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
- tmp.start, tmp.size, range_start, range_end);
- err = -EINVAL;
- }
-
- if (!assert_node(test, &tmp, mm, size, alignment, 0) ||
- drm_mm_hole_follows(&tmp)) {
- KUNIT_FAIL(test,
- "Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
- tmp.size, size, alignment, misalignment(&tmp, alignment),
- tmp.start, drm_mm_hole_follows(&tmp));
- err = -EINVAL;
- }
-
- drm_mm_remove_node(&tmp);
- if (err)
- return err;
-
- list_for_each_entry(e, &evict_list, link) {
- err = drm_mm_reserve_node(mm, &e->node);
- if (err) {
- KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
- return err;
- }
- }
-
- if (!assert_continuous(test, mm, nodes[0].node.size)) {
- KUNIT_FAIL(test, "range is no longer continuous\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void drm_test_mm_evict(struct kunit *test)
-{
- DRM_RND_STATE(prng, random_seed);
- const unsigned int size = 8192;
- const struct insert_mode *mode;
- struct drm_mm mm;
- struct evict_node *nodes;
- struct drm_mm_node *node, *next;
- unsigned int *order, n;
-
- /* Here we populate a full drm_mm and then try and insert a new node
- * by evicting other nodes in a random order. The drm_mm_scan should
- * pick the first matching hole it finds from the random list. We
- * repeat that for different allocation strategies, alignments and
- * sizes to try and stress the hole finder.
- */
-
- nodes = vzalloc(array_size(size, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- order = drm_random_order(size, &prng);
- if (!order)
- goto err_nodes;
-
- drm_mm_init(&mm, 0, size);
- for (n = 0; n < size; n++) {
- if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
- KUNIT_FAIL(test, "insert failed, step %d\n", n);
- goto out;
- }
- }
-
- /* First check that using the scanner doesn't break the mm */
- if (!evict_nothing(test, &mm, size, nodes)) {
- KUNIT_FAIL(test, "evict_nothing() failed\n");
- goto out;
- }
- if (!evict_everything(test, &mm, size, nodes)) {
- KUNIT_FAIL(test, "evict_everything() failed\n");
- goto out;
- }
-
- for (mode = evict_modes; mode->name; mode++) {
- for (n = 1; n <= size; n <<= 1) {
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, n, 1,
- mode)) {
- KUNIT_FAIL(test, "%s evict_something(size=%u) failed\n",
- mode->name, n);
- goto out;
- }
- }
-
- for (n = 1; n < size; n <<= 1) {
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
- size / 2, n, mode)) {
- KUNIT_FAIL(test,
- "%s evict_something(size=%u, alignment=%u) failed\n",
- mode->name, size / 2, n);
- goto out;
- }
- }
-
- for_each_prime_number_from(n, 1, min(size, max_prime)) {
- unsigned int nsize = (size - n + 1) / 2;
-
- DRM_MM_BUG_ON(!nsize);
-
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
- nsize, n, mode)) {
- KUNIT_FAIL(test,
- "%s evict_something(size=%u, alignment=%u) failed\n",
- mode->name, nsize, n);
- goto out;
- }
- }
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_nodes:
- vfree(nodes);
-}
-
-static void drm_test_mm_evict_range(struct kunit *test)
-{
- DRM_RND_STATE(prng, random_seed);
- const unsigned int size = 8192;
- const unsigned int range_size = size / 2;
- const unsigned int range_start = size / 4;
- const unsigned int range_end = range_start + range_size;
- const struct insert_mode *mode;
- struct drm_mm mm;
- struct evict_node *nodes;
- struct drm_mm_node *node, *next;
- unsigned int *order, n;
-
- /* Like drm_test_mm_evict() but now we are limiting the search to a
- * small portion of the full drm_mm.
- */
-
- nodes = vzalloc(array_size(size, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- order = drm_random_order(size, &prng);
- if (!order)
- goto err_nodes;
-
- drm_mm_init(&mm, 0, size);
- for (n = 0; n < size; n++) {
- if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
- KUNIT_FAIL(test, "insert failed, step %d\n", n);
- goto out;
- }
- }
-
- for (mode = evict_modes; mode->name; mode++) {
- for (n = 1; n <= range_size; n <<= 1) {
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, range_start, range_end, nodes,
- order, size, n, 1, mode)) {
- KUNIT_FAIL(test,
- "%s evict_something(size=%u) failed with range [%u, %u]\n",
- mode->name, n, range_start, range_end);
- goto out;
- }
- }
-
- for (n = 1; n <= range_size; n <<= 1) {
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, range_start, range_end, nodes,
- order, size, range_size / 2, n, mode)) {
- KUNIT_FAIL(test,
- "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
- mode->name, range_size / 2, n, range_start, range_end);
- goto out;
- }
- }
-
- for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
- unsigned int nsize = (range_size - n + 1) / 2;
-
- DRM_MM_BUG_ON(!nsize);
-
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, range_start, range_end, nodes,
- order, size, nsize, n, mode)) {
- KUNIT_FAIL(test,
- "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
- mode->name, nsize, n, range_start, range_end);
- goto out;
- }
- }
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_nodes:
- vfree(nodes);
-}
-
-static unsigned int node_index(const struct drm_mm_node *node)
-{
- return div64_u64(node->start, node->size);
-}
-
-static void drm_test_mm_topdown(struct kunit *test)
-{
- const struct insert_mode *topdown = &insert_modes[TOPDOWN];
-
- DRM_RND_STATE(prng, random_seed);
- const unsigned int count = 8192;
- unsigned int size;
- unsigned long *bitmap;
- struct drm_mm mm;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int *order, n, m, o = 0;
-
- /* When allocating top-down, we expect to be returned a node
- * from a suitable hole at the top of the drm_mm. We check that
- * the returned node does match the highest available slot.
- */
-
- nodes = vzalloc(array_size(count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- bitmap = bitmap_zalloc(count, GFP_KERNEL);
- if (!bitmap)
- goto err_nodes;
-
- order = drm_random_order(count, &prng);
- if (!order)
- goto err_bitmap;
-
- for (size = 1; size <= 64; size <<= 1) {
- drm_mm_init(&mm, 0, size * count);
- for (n = 0; n < count; n++) {
- if (!expect_insert(test, &mm, &nodes[n], size, 0, n, topdown)) {
- KUNIT_FAIL(test, "insert failed, size %u step %d\n", size, n);
- goto out;
- }
-
- if (drm_mm_hole_follows(&nodes[n])) {
- KUNIT_FAIL(test,
- "hole after topdown insert %d, start=%llx\n, size=%u",
- n, nodes[n].start, size);
- goto out;
- }
-
- if (!assert_one_hole(test, &mm, 0, size * (count - n - 1)))
- goto out;
- }
-
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- drm_random_reorder(order, count, &prng);
- for_each_prime_number_from(n, 1, min(count, max_prime)) {
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- drm_mm_remove_node(node);
- __set_bit(node_index(node), bitmap);
- }
-
- for (m = 0; m < n; m++) {
- unsigned int last;
-
- node = &nodes[order[(o + m) % count]];
- if (!expect_insert(test, &mm, node, size, 0, 0, topdown)) {
- KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
- goto out;
- }
-
- if (drm_mm_hole_follows(node)) {
- KUNIT_FAIL(test,
- "hole after topdown insert %d/%d, start=%llx\n",
- m, n, node->start);
- goto out;
- }
-
- last = find_last_bit(bitmap, count);
- if (node_index(node) != last) {
- KUNIT_FAIL(test,
- "node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
- m, n, size, last, node_index(node));
- goto out;
- }
-
- __clear_bit(last, bitmap);
- }
-
- DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
-
- o += n;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- DRM_MM_BUG_ON(!drm_mm_clean(&mm));
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_bitmap:
- bitmap_free(bitmap);
-err_nodes:
- vfree(nodes);
-}
-
-static void drm_test_mm_bottomup(struct kunit *test)
-{
- const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
-
- DRM_RND_STATE(prng, random_seed);
- const unsigned int count = 8192;
- unsigned int size;
- unsigned long *bitmap;
- struct drm_mm mm;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int *order, n, m, o = 0;
-
- /* Like drm_test_mm_topdown, but instead of searching for the last hole,
- * we search for the first.
- */
-
- nodes = vzalloc(array_size(count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- bitmap = bitmap_zalloc(count, GFP_KERNEL);
- if (!bitmap)
- goto err_nodes;
-
- order = drm_random_order(count, &prng);
- if (!order)
- goto err_bitmap;
-
- for (size = 1; size <= 64; size <<= 1) {
- drm_mm_init(&mm, 0, size * count);
- for (n = 0; n < count; n++) {
- if (!expect_insert(test, &mm, &nodes[n], size, 0, n, bottomup)) {
- KUNIT_FAIL(test,
- "bottomup insert failed, size %u step %d\n", size, n);
- goto out;
- }
-
- if (!assert_one_hole(test, &mm, size * (n + 1), size * count))
- goto out;
- }
-
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- drm_random_reorder(order, count, &prng);
- for_each_prime_number_from(n, 1, min(count, max_prime)) {
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- drm_mm_remove_node(node);
- __set_bit(node_index(node), bitmap);
- }
-
- for (m = 0; m < n; m++) {
- unsigned int first;
-
- node = &nodes[order[(o + m) % count]];
- if (!expect_insert(test, &mm, node, size, 0, 0, bottomup)) {
- KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
- goto out;
- }
-
- first = find_first_bit(bitmap, count);
- if (node_index(node) != first) {
- KUNIT_FAIL(test,
- "node %d/%d not inserted into bottom hole, expected %d, found %d\n",
- m, n, first, node_index(node));
- goto out;
- }
- __clear_bit(first, bitmap);
- }
-
- DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
-
- o += n;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- DRM_MM_BUG_ON(!drm_mm_clean(&mm));
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_bitmap:
- bitmap_free(bitmap);
-err_nodes:
- vfree(nodes);
-}
-
static void drm_test_mm_once(struct kunit *test, unsigned int mode)
{
struct drm_mm mm;
@@ -1817,440 +335,18 @@ static void drm_test_mm_highest(struct kunit *test)
drm_test_mm_once(test, DRM_MM_INSERT_HIGH);
}
-static void separate_adjacent_colors(const struct drm_mm_node *node,
- unsigned long color, u64 *start, u64 *end)
-{
- if (drm_mm_node_allocated(node) && node->color != color)
- ++*start;
-
- node = list_next_entry(node, node_list);
- if (drm_mm_node_allocated(node) && node->color != color)
- --*end;
-}
-
-static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node)
-{
- if (!drm_mm_hole_follows(node) &&
- drm_mm_node_allocated(list_next_entry(node, node_list))) {
- KUNIT_FAIL(test, "colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
- node->color, node->start, node->size,
- list_next_entry(node, node_list)->color,
- list_next_entry(node, node_list)->start,
- list_next_entry(node, node_list)->size);
- return true;
- }
-
- return false;
-}
-
-static void drm_test_mm_color(struct kunit *test)
-{
- const unsigned int count = min(4096u, max_iterations);
- const struct insert_mode *mode;
- struct drm_mm mm;
- struct drm_mm_node *node, *nn;
- unsigned int n;
-
- /* Color adjustment complicates everything. First we just check
- * that when we insert a node we apply any color_adjustment callback.
- * The callback we use should ensure that there is a gap between
- * any two nodes, and so after each insertion we check that those
- * holes are inserted and that they are preserved.
- */
-
- drm_mm_init(&mm, 0, U64_MAX);
-
- for (n = 1; n <= count; n++) {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- goto out;
-
- if (!expect_insert(test, &mm, node, n, 0, n, &insert_modes[0])) {
- KUNIT_FAIL(test, "insert failed, step %d\n", n);
- kfree(node);
- goto out;
- }
- }
-
- drm_mm_for_each_node_safe(node, nn, &mm) {
- if (node->color != node->size) {
- KUNIT_FAIL(test, "invalid color stored: expected %lld, found %ld\n",
- node->size, node->color);
-
- goto out;
- }
-
- drm_mm_remove_node(node);
- kfree(node);
- }
-
- /* Now, let's start experimenting with applying a color callback */
- mm.color_adjust = separate_adjacent_colors;
- for (mode = insert_modes; mode->name; mode++) {
- u64 last;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- goto out;
-
- node->size = 1 + 2 * count;
- node->color = node->size;
-
- if (drm_mm_reserve_node(&mm, node)) {
- KUNIT_FAIL(test, "initial reserve failed!\n");
- goto out;
- }
-
- last = node->start + node->size;
-
- for (n = 1; n <= count; n++) {
- int rem;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- goto out;
-
- node->start = last;
- node->size = n + count;
- node->color = node->size;
-
- if (drm_mm_reserve_node(&mm, node) != -ENOSPC) {
- KUNIT_FAIL(test, "reserve %d did not report color overlap!", n);
- goto out;
- }
-
- node->start += n + 1;
- rem = misalignment(node, n + count);
- node->start += n + count - rem;
-
- if (drm_mm_reserve_node(&mm, node)) {
- KUNIT_FAIL(test, "reserve %d failed", n);
- goto out;
- }
-
- last = node->start + node->size;
- }
-
- for (n = 1; n <= count; n++) {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- goto out;
-
- if (!expect_insert(test, &mm, node, n, n, n, mode)) {
- KUNIT_FAIL(test, "%s insert failed, step %d\n", mode->name, n);
- kfree(node);
- goto out;
- }
- }
-
- drm_mm_for_each_node_safe(node, nn, &mm) {
- u64 rem;
-
- if (node->color != node->size) {
- KUNIT_FAIL(test,
- "%s invalid color stored: expected %lld, found %ld\n",
- mode->name, node->size, node->color);
-
- goto out;
- }
-
- if (colors_abutt(test, node))
- goto out;
-
- div64_u64_rem(node->start, node->size, &rem);
- if (rem) {
- KUNIT_FAIL(test,
- "%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
- mode->name, node->start, node->size, rem);
- goto out;
- }
-
- drm_mm_remove_node(node);
- kfree(node);
- }
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, nn, &mm) {
- drm_mm_remove_node(node);
- kfree(node);
- }
- drm_mm_takedown(&mm);
-}
-
-static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
- u64 range_end, struct evict_node *nodes, unsigned int *order,
- unsigned int count, unsigned int size, unsigned int alignment,
- unsigned long color, const struct insert_mode *mode)
-{
- struct drm_mm_scan scan;
- LIST_HEAD(evict_list);
- struct evict_node *e;
- struct drm_mm_node tmp;
- int err;
-
- drm_mm_scan_init_with_range(&scan, mm, size, alignment, color, range_start,
- range_end, mode->mode);
- if (!evict_nodes(test, &scan, nodes, order, count, true, &evict_list))
- return -EINVAL;
-
- memset(&tmp, 0, sizeof(tmp));
- err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
- DRM_MM_INSERT_EVICT);
- if (err) {
- KUNIT_FAIL(test,
- "Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
- size, alignment, color, err);
- show_scan(test, &scan);
- show_holes(test, mm, 3);
- return err;
- }
-
- if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
- KUNIT_FAIL(test,
- "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
- tmp.start, tmp.size, range_start, range_end);
- err = -EINVAL;
- }
-
- if (colors_abutt(test, &tmp))
- err = -EINVAL;
-
- if (!assert_node(test, &tmp, mm, size, alignment, color)) {
- KUNIT_FAIL(test,
- "Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
- tmp.size, size, alignment, misalignment(&tmp, alignment), tmp.start);
- err = -EINVAL;
- }
-
- drm_mm_remove_node(&tmp);
- if (err)
- return err;
-
- list_for_each_entry(e, &evict_list, link) {
- err = drm_mm_reserve_node(mm, &e->node);
- if (err) {
- KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
- return err;
- }
- }
-
- cond_resched();
- return 0;
-}
-
-static void drm_test_mm_color_evict(struct kunit *test)
-{
- DRM_RND_STATE(prng, random_seed);
- const unsigned int total_size = min(8192u, max_iterations);
- const struct insert_mode *mode;
- unsigned long color = 0;
- struct drm_mm mm;
- struct evict_node *nodes;
- struct drm_mm_node *node, *next;
- unsigned int *order, n;
-
- /* Check that the drm_mm_scan also honours color adjustment when
- * choosing its victims to create a hole. Our color_adjust does not
- * allow two nodes to be placed together without an intervening hole
- * enlarging the set of victims that must be evicted.
- */
-
- nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- order = drm_random_order(total_size, &prng);
- if (!order)
- goto err_nodes;
-
- drm_mm_init(&mm, 0, 2 * total_size - 1);
- mm.color_adjust = separate_adjacent_colors;
- for (n = 0; n < total_size; n++) {
- if (!expect_insert(test, &mm, &nodes[n].node,
- 1, 0, color++,
- &insert_modes[0])) {
- KUNIT_FAIL(test, "insert failed, step %d\n", n);
- goto out;
- }
- }
-
- for (mode = evict_modes; mode->name; mode++) {
- for (n = 1; n <= total_size; n <<= 1) {
- drm_random_reorder(order, total_size, &prng);
- if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
- n, 1, color++, mode)) {
- KUNIT_FAIL(test, "%s evict_color(size=%u) failed\n", mode->name, n);
- goto out;
- }
- }
-
- for (n = 1; n < total_size; n <<= 1) {
- drm_random_reorder(order, total_size, &prng);
- if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
- total_size / 2, n, color++, mode)) {
- KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
- mode->name, total_size / 2, n);
- goto out;
- }
- }
-
- for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
- unsigned int nsize = (total_size - n + 1) / 2;
-
- DRM_MM_BUG_ON(!nsize);
-
- drm_random_reorder(order, total_size, &prng);
- if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
- nsize, n, color++, mode)) {
- KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
- mode->name, nsize, n);
- goto out;
- }
- }
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_nodes:
- vfree(nodes);
-}
-
-static void drm_test_mm_color_evict_range(struct kunit *test)
-{
- DRM_RND_STATE(prng, random_seed);
- const unsigned int total_size = 8192;
- const unsigned int range_size = total_size / 2;
- const unsigned int range_start = total_size / 4;
- const unsigned int range_end = range_start + range_size;
- const struct insert_mode *mode;
- unsigned long color = 0;
- struct drm_mm mm;
- struct evict_node *nodes;
- struct drm_mm_node *node, *next;
- unsigned int *order, n;
-
- /* Like drm_test_mm_color_evict(), but limited to small portion of the full
- * drm_mm range.
- */
-
- nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- order = drm_random_order(total_size, &prng);
- if (!order)
- goto err_nodes;
-
- drm_mm_init(&mm, 0, 2 * total_size - 1);
- mm.color_adjust = separate_adjacent_colors;
- for (n = 0; n < total_size; n++) {
- if (!expect_insert(test, &mm, &nodes[n].node,
- 1, 0, color++,
- &insert_modes[0])) {
- KUNIT_FAIL(test, "insert failed, step %d\n", n);
- goto out;
- }
- }
-
- for (mode = evict_modes; mode->name; mode++) {
- for (n = 1; n <= range_size; n <<= 1) {
- drm_random_reorder(order, range_size, &prng);
- if (evict_color(test, &mm, range_start, range_end, nodes, order,
- total_size, n, 1, color++, mode)) {
- KUNIT_FAIL(test,
- "%s evict_color(size=%u) failed for range [%x, %x]\n",
- mode->name, n, range_start, range_end);
- goto out;
- }
- }
-
- for (n = 1; n < range_size; n <<= 1) {
- drm_random_reorder(order, total_size, &prng);
- if (evict_color(test, &mm, range_start, range_end, nodes, order,
- total_size, range_size / 2, n, color++, mode)) {
- KUNIT_FAIL(test,
- "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
- mode->name, total_size / 2, n, range_start, range_end);
- goto out;
- }
- }
-
- for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
- unsigned int nsize = (range_size - n + 1) / 2;
-
- DRM_MM_BUG_ON(!nsize);
-
- drm_random_reorder(order, total_size, &prng);
- if (evict_color(test, &mm, range_start, range_end, nodes, order,
- total_size, nsize, n, color++, mode)) {
- KUNIT_FAIL(test,
- "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
- mode->name, nsize, n, range_start, range_end);
- goto out;
- }
- }
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_nodes:
- vfree(nodes);
-}
-
-static int drm_mm_suite_init(struct kunit_suite *suite)
-{
- while (!random_seed)
- random_seed = get_random_u32();
-
- kunit_info(suite,
- "Testing DRM range manager, with random_seed=0x%x max_iterations=%u max_prime=%u\n",
- random_seed, max_iterations, max_prime);
-
- return 0;
-}
-
-module_param(random_seed, uint, 0400);
-module_param(max_iterations, uint, 0400);
-module_param(max_prime, uint, 0400);
-
static struct kunit_case drm_mm_tests[] = {
KUNIT_CASE(drm_test_mm_init),
KUNIT_CASE(drm_test_mm_debug),
- KUNIT_CASE(drm_test_mm_reserve),
- KUNIT_CASE(drm_test_mm_insert),
- KUNIT_CASE(drm_test_mm_replace),
- KUNIT_CASE(drm_test_mm_insert_range),
- KUNIT_CASE(drm_test_mm_frag),
- KUNIT_CASE(drm_test_mm_align),
KUNIT_CASE(drm_test_mm_align32),
KUNIT_CASE(drm_test_mm_align64),
- KUNIT_CASE(drm_test_mm_evict),
- KUNIT_CASE(drm_test_mm_evict_range),
- KUNIT_CASE(drm_test_mm_topdown),
- KUNIT_CASE(drm_test_mm_bottomup),
KUNIT_CASE(drm_test_mm_lowest),
KUNIT_CASE(drm_test_mm_highest),
- KUNIT_CASE(drm_test_mm_color),
- KUNIT_CASE(drm_test_mm_color_evict),
- KUNIT_CASE(drm_test_mm_color_evict_range),
{}
};
static struct kunit_suite drm_mm_test_suite = {
.name = "drm_mm",
- .suite_init = drm_mm_suite_init,
.test_cases = drm_mm_tests,
};
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 5e5e466f35d1..5f838980c7a1 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -169,14 +169,10 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
- dev_dbg(ddev->dev,
- "%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
- crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
- crtc->state->enable, crtc->state->event);
-
- /* There is nothing to do if CRTC is not going to be enabled. */
- if (!crtc->state->enable)
- return;
+ dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n",
+ __func__, crtc->name, crtc->state->active ? "" : "not ",
+ drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need",
+ crtc->state->event);
/*
* Flush CRTC changes with go bit only if new modeset is not
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 9d9dee7abaef..1ad711f8d2a8 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -322,6 +322,60 @@ const struct dispc_features dispc_am625_feats = {
.vid_order = { 1, 0 },
};
+const struct dispc_features dispc_am62a7_feats = {
+ /*
+ * if the code reaches dispc_mode_valid with VP1,
+ * it should return MODE_BAD.
+ */
+ .max_pclk_khz = {
+ [DISPC_VP_TIED_OFF] = 0,
+ [DISPC_VP_DPI] = 165000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 2560,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_AM62A7,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 2,
+ .vp_name = { "vp1", "vp2" },
+ .ovr_name = { "ovr1", "ovr2" },
+ .vpclk_name = { "vp1", "vp2" },
+ /* VP1 of the DSS in AM62A7 SoC is tied off internally */
+ .vp_bus_type = { DISPC_VP_TIED_OFF, DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 2,
+ /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+ .vid_name = { "vid", "vidl1" },
+ .vid_lite = { false, true, },
+ .vid_order = { 1, 0 },
+};
+
static const u16 *dispc_common_regmap;
struct dss_vp_data {
@@ -824,6 +878,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
case DISPC_K2G:
return dispc_k2g_read_and_clear_irqstatus(dispc);
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
case DISPC_J721E:
return dispc_k3_read_and_clear_irqstatus(dispc);
@@ -840,6 +895,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
dispc_k2g_set_irqenable(dispc, mask);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_set_irqenable(dispc, mask);
@@ -1331,6 +1387,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
x, y, layer);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
x, y, layer);
@@ -2250,6 +2307,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
dispc_k2g_plane_init(dispc);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_plane_init(dispc);
@@ -2357,6 +2415,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
break;
@@ -2702,18 +2761,83 @@ static void dispc_init_errata(struct dispc_device *dispc)
}
}
-static void dispc_softreset(struct dispc_device *dispc)
+/*
+ * K2G display controller does not support soft reset, so we do a basic manual
+ * reset here: make sure the IRQs are masked and VPs are disabled.
+ */
+static void dispc_softreset_k2g(struct dispc_device *dispc)
+{
+ dispc_set_irqenable(dispc, 0);
+ dispc_read_and_clear_irqstatus(dispc);
+
+ for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
+ VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
+}
+
+static int dispc_softreset(struct dispc_device *dispc)
{
u32 val;
- int ret = 0;
+ int ret;
+
+ if (dispc->feat->subrev == DISPC_K2G) {
+ dispc_softreset_k2g(dispc);
+ return 0;
+ }
/* Soft reset */
REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
/* Wait for reset to complete */
ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
val, val & 1, 100, 5000);
+ if (ret) {
+ dev_err(dispc->dev, "failed to reset dispc\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dispc_init_hw(struct dispc_device *dispc)
+{
+ struct device *dev = dispc->dev;
+ int ret;
+
+ ret = pm_runtime_set_active(dev);
+ if (ret) {
+ dev_err(dev, "Failed to set DSS PM to active\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dispc->fclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable DSS fclk\n");
+ goto err_runtime_suspend;
+ }
+
+ ret = dispc_softreset(dispc);
if (ret)
- dev_warn(dispc->dev, "failed to reset dispc\n");
+ goto err_clk_disable;
+
+ clk_disable_unprepare(dispc->fclk);
+ ret = pm_runtime_set_suspended(dev);
+ if (ret) {
+ dev_err(dev, "Failed to set DSS PM to suspended\n");
+ return ret;
+ }
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(dispc->fclk);
+
+err_runtime_suspend:
+ ret = pm_runtime_set_suspended(dev);
+ if (ret) {
+ dev_err(dev, "Failed to set DSS PM to suspended\n");
+ return ret;
+ }
+
+ return ret;
}
int dispc_init(struct tidss_device *tidss)
@@ -2777,10 +2901,6 @@ int dispc_init(struct tidss_device *tidss)
return r;
}
- /* K2G display controller does not support soft reset */
- if (feat->subrev != DISPC_K2G)
- dispc_softreset(dispc);
-
for (i = 0; i < dispc->feat->num_vps; i++) {
u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
u32 *gamma_table;
@@ -2829,6 +2949,10 @@ int dispc_init(struct tidss_device *tidss)
of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
&dispc->memory_bandwidth_limit);
+ r = dispc_init_hw(dispc);
+ if (r)
+ return r;
+
tidss->dispc = dispc;
return 0;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index 33ac5ad7a423..086327d51a90 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -54,12 +54,14 @@ enum dispc_vp_bus_type {
DISPC_VP_DPI, /* DPI output */
DISPC_VP_OLDI, /* OLDI (LVDS) output */
DISPC_VP_INTERNAL, /* SoC internal routing */
+ DISPC_VP_TIED_OFF, /* Tied off / Unavailable */
DISPC_VP_MAX_BUS_TYPE,
};
enum dispc_dss_subrevision {
DISPC_K2G,
DISPC_AM625,
+ DISPC_AM62A7,
DISPC_AM65X,
DISPC_J721E,
};
@@ -88,6 +90,7 @@ struct dispc_features {
extern const struct dispc_features dispc_k2g_feats;
extern const struct dispc_features dispc_am625_feats;
+extern const struct dispc_features dispc_am62a7_feats;
extern const struct dispc_features dispc_am65x_feats;
extern const struct dispc_features dispc_j721e_feats;
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 4d063eb9cd0b..d15f836dca95 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -32,9 +32,9 @@ int tidss_runtime_get(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s\n", __func__);
- r = pm_runtime_get_sync(tidss->dev);
+ r = pm_runtime_resume_and_get(tidss->dev);
WARN_ON(r < 0);
- return r < 0 ? r : 0;
+ return r;
}
void tidss_runtime_put(struct tidss_device *tidss)
@@ -43,7 +43,9 @@ void tidss_runtime_put(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s\n", __func__);
- r = pm_runtime_put_sync(tidss->dev);
+ pm_runtime_mark_last_busy(tidss->dev);
+
+ r = pm_runtime_put_autosuspend(tidss->dev);
WARN_ON(r < 0);
}
@@ -136,6 +138,8 @@ static int tidss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tidss);
+ spin_lock_init(&tidss->wait_lock);
+
ret = dispc_init(tidss);
if (ret) {
dev_err(dev, "failed to initialize dispc: %d\n", ret);
@@ -144,6 +148,9 @@ static int tidss_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
#ifndef CONFIG_PM
/* If we don't have PM, we need to call resume manually */
dispc_runtime_resume(tidss->dispc);
@@ -192,6 +199,7 @@ err_runtime_suspend:
#ifndef CONFIG_PM
dispc_runtime_suspend(tidss->dispc);
#endif
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return ret;
@@ -215,6 +223,7 @@ static void tidss_remove(struct platform_device *pdev)
/* If we don't have PM, we need to call suspend manually */
dispc_runtime_suspend(tidss->dispc);
#endif
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
/* devm allocated dispc goes away with the dev so mark it NULL */
@@ -231,6 +240,7 @@ static void tidss_shutdown(struct platform_device *pdev)
static const struct of_device_id tidss_of_table[] = {
{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
{ .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
+ { .compatible = "ti,am62a7-dss", .data = &dispc_am62a7_feats, },
{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
{ }
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 0c681c7600bc..604334ef526a 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -93,33 +93,21 @@ void tidss_irq_resume(struct tidss_device *tidss)
spin_unlock_irqrestore(&tidss->wait_lock, flags);
}
-static void tidss_irq_preinstall(struct drm_device *ddev)
-{
- struct tidss_device *tidss = to_tidss(ddev);
-
- spin_lock_init(&tidss->wait_lock);
-
- tidss_runtime_get(tidss);
-
- dispc_set_irqenable(tidss->dispc, 0);
- dispc_read_and_clear_irqstatus(tidss->dispc);
-
- tidss_runtime_put(tidss);
-}
-
-static void tidss_irq_postinstall(struct drm_device *ddev)
+int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
{
struct tidss_device *tidss = to_tidss(ddev);
- unsigned long flags;
- unsigned int i;
+ int ret;
- tidss_runtime_get(tidss);
+ if (irq == IRQ_NOTCONNECTED)
+ return -ENOTCONN;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev);
+ if (ret)
+ return ret;
tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
- for (i = 0; i < tidss->num_crtcs; ++i) {
+ for (unsigned int i = 0; i < tidss->num_crtcs; ++i) {
struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport);
@@ -127,28 +115,6 @@ static void tidss_irq_postinstall(struct drm_device *ddev)
tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
}
- tidss_irq_update(tidss);
-
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
-
- tidss_runtime_put(tidss);
-}
-
-int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
-{
- int ret;
-
- if (irq == IRQ_NOTCONNECTED)
- return -ENOTCONN;
-
- tidss_irq_preinstall(ddev);
-
- ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev);
- if (ret)
- return ret;
-
- tidss_irq_postinstall(ddev);
-
return 0;
}
@@ -156,9 +122,5 @@ void tidss_irq_uninstall(struct drm_device *ddev)
{
struct tidss_device *tidss = to_tidss(ddev);
- tidss_runtime_get(tidss);
- dispc_set_irqenable(tidss->dispc, 0);
- tidss_runtime_put(tidss);
-
free_irq(tidss->irq, ddev);
}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index c979ad1af236..a0e494c806a9 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -4,8 +4,6 @@
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*/
-#include <linux/dma-fence.h>
-
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -25,18 +23,16 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *ddev = old_state->dev;
struct tidss_device *tidss = to_tidss(ddev);
- bool fence_cookie = dma_fence_begin_signalling();
dev_dbg(ddev->dev, "%s\n", __func__);
tidss_runtime_get(tidss);
drm_atomic_helper_commit_modeset_disables(ddev, old_state);
- drm_atomic_helper_commit_planes(ddev, old_state, 0);
+ drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(ddev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
- dma_fence_end_signalling(fence_cookie);
drm_atomic_helper_wait_for_flip_done(ddev, old_state);
drm_atomic_helper_cleanup_planes(ddev, old_state);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 8ebd7134ee21..23bf16f596f6 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -138,7 +138,7 @@ static int tilcdc_irq_install(struct drm_device *dev, unsigned int irq)
if (ret)
return ret;
- priv->irq_enabled = false;
+ priv->irq_enabled = true;
return 0;
}
@@ -570,19 +570,18 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
match);
}
-static int tilcdc_pdev_remove(struct platform_device *pdev)
+static void tilcdc_pdev_remove(struct platform_device *pdev)
{
int ret;
ret = tilcdc_get_external_components(&pdev->dev, NULL);
if (ret < 0)
- return ret;
+ dev_err(&pdev->dev, "tilcdc_get_external_components() failed (%pe)\n",
+ ERR_PTR(ret));
else if (ret == 0)
tilcdc_fini(platform_get_drvdata(pdev));
else
component_master_del(&pdev->dev, &tilcdc_comp_ops);
-
- return 0;
}
static void tilcdc_pdev_shutdown(struct platform_device *pdev)
@@ -599,7 +598,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match);
static struct platform_driver tilcdc_platform_driver = {
.probe = tilcdc_pdev_probe,
- .remove = tilcdc_pdev_remove,
+ .remove_new = tilcdc_pdev_remove,
.shutdown = tilcdc_pdev_shutdown,
.driver = {
.name = "tilcdc",
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index e5b10e41554a..4f8f3172379e 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -404,14 +404,12 @@ err_unload:
return ret;
}
-static int arcpgu_remove(struct platform_device *pdev)
+static void arcpgu_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
drm_dev_unregister(drm);
arcpgu_unload(drm);
-
- return 0;
}
static const struct of_device_id arcpgu_of_table[] = {
@@ -423,7 +421,7 @@ MODULE_DEVICE_TABLE(of, arcpgu_of_table);
static struct platform_driver arcpgu_platform_driver = {
.probe = arcpgu_probe,
- .remove = arcpgu_remove,
+ .remove_new = arcpgu_remove,
.driver = {
.name = "arcpgu",
.of_match_table = arcpgu_of_table,
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index c5c34cd2edc1..4e3a152f897a 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -411,7 +411,8 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
unsigned int offset = drm_fb_clip_offset(pitch, format, &damage);
struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset);
- drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb, &damage);
+ drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb,
+ &damage, &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 4ceb68ffac4b..dd8b0a181be9 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -78,7 +78,7 @@ static inline int ili9225_command(struct mipi_dbi *dbi, u8 cmd, u16 data)
}
static void ili9225_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *rect)
+ struct drm_rect *rect, struct drm_format_conv_state *fmtcnv_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
@@ -98,7 +98,7 @@ static void ili9225_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
- ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
+ ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap, fmtcnv_state);
if (ret)
goto err_msg;
} else {
@@ -171,7 +171,8 @@ static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
drm_dev_exit(idx);
}
@@ -281,7 +282,8 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
- ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
out_exit:
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index 2d999a0facde..ab89b7fc7bf6 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -19,7 +19,6 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -758,7 +757,11 @@ static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state)
{
+ struct drm_device *dev = plane->dev;
+ struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_shadow_plane_state *new_shadow_plane_state =
+ to_drm_shadow_plane_state(new_plane_state);
struct drm_framebuffer *new_fb = new_plane_state->fb;
struct drm_crtc *new_crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state = NULL;
@@ -777,6 +780,16 @@ static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
else if (!new_plane_state->visible)
return 0;
+ if (new_fb->format != odev->format) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
+ odev->pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state);
@@ -817,7 +830,7 @@ static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
- &damage);
+ &damage, &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 73dd4f4289c2..8fd6758f5725 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -509,7 +509,8 @@ static void repaper_get_temperature(struct repaper_epd *epd)
epd->factored_stage_time = epd->stage_time * factor10x / 10;
}
-static int repaper_fb_dirty(struct drm_framebuffer *fb)
+static int repaper_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_format_conv_state *fmtcnv_state)
{
struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct repaper_epd *epd = drm_to_epd(fb->dev);
@@ -545,7 +546,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
iosys_map_set_vaddr(&dst, buf);
iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
- drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -830,13 +831,16 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
struct drm_rect rect;
if (!pipe->crtc.state->active)
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- repaper_fb_dirty(state->fb);
+ repaper_fb_dirty(state->fb, &fmtcnv_state);
+
+ drm_format_conv_state_release(&fmtcnv_state);
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 5fefc895bca2..7ce1c4617675 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -19,12 +19,12 @@
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#define DRIVER_NAME "simpledrm"
@@ -579,6 +579,44 @@ static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
+static int simpledrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *new_shadow_plane_state =
+ to_drm_shadow_plane_state(new_plane_state);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct drm_device *dev = plane->dev;
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ if (new_fb->format != sdev->format) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
+ sdev->pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -609,7 +647,7 @@ static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane
iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data,
- fb, &damage);
+ fb, &damage, &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
@@ -635,7 +673,7 @@ static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plan
static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_check = simpledrm_primary_plane_helper_atomic_check,
.atomic_update = simpledrm_primary_plane_helper_atomic_update,
.atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
};
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 3cf4eec16a81..7336fa1ddaed 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -64,7 +64,8 @@ static const u8 st7586_lookup[] = { 0x7, 0x4, 0x2, 0x0 };
static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
struct drm_framebuffer *fb,
- struct drm_rect *clip)
+ struct drm_rect *clip,
+ struct drm_format_conv_state *fmtcnv_state)
{
size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1);
unsigned int x, y;
@@ -77,7 +78,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
iosys_map_set_vaddr(&dst_map, buf);
iosys_map_set_vaddr(&vmap, vaddr);
- drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip);
+ drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip, fmtcnv_state);
src = buf;
for (y = clip->y1; y < clip->y2; y++) {
@@ -93,7 +94,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
}
static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *clip)
+ struct drm_rect *clip, struct drm_format_conv_state *fmtcnv_state)
{
int ret;
@@ -101,7 +102,7 @@ static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuf
if (ret)
return ret;
- st7586_xrgb8888_to_gray332(dst, src->vaddr, fb, clip);
+ st7586_xrgb8888_to_gray332(dst, src->vaddr, fb, clip, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -109,7 +110,7 @@ static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuf
}
static void st7586_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *rect)
+ struct drm_rect *rect, struct drm_format_conv_state *fmtcnv_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
struct mipi_dbi *dbi = &dbidev->dbi;
@@ -121,7 +122,7 @@ static void st7586_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = st7586_buf_copy(dbidev->tx_buf, src, fb, rect);
+ ret = st7586_buf_copy(dbidev->tx_buf, src, fb, rect, fmtcnv_state);
if (ret)
goto err_msg;
@@ -160,7 +161,8 @@ static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
drm_dev_exit(idx);
}
@@ -238,7 +240,8 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
msleep(100);
- st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
out_exit:
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e58b7e249816..edf10618fe2b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -370,7 +370,13 @@ static void ttm_bo_release(struct kref *kref)
spin_unlock(&bo->bdev->lru_lock);
INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
- queue_work(bdev->wq, &bo->delayed_delete);
+
+ /* Schedule the worker on the closest NUMA node. This
+ * improves performance since system memory might be
+ * cleared on free and that is best done on a CPU core
+ * close to it.
+ */
+ queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
return;
}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index d48b39132b32..f5187b384ae9 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -204,7 +204,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
if (ret)
return ret;
- bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16);
+ bdev->wq = alloc_workqueue("ttm",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16);
if (!bdev->wq) {
ttm_global_release();
return -ENOMEM;
@@ -213,7 +214,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
bdev->funcs = funcs;
ttm_sys_man_init(bdev);
- ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32);
+
+ ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 40876bcdd79a..7702359c90c2 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -21,7 +21,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -261,6 +260,22 @@ static const uint64_t udl_primary_plane_fmtmods[] = {
DRM_FORMAT_MOD_INVALID
};
+static int udl_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+
static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -296,7 +311,7 @@ out_drm_gem_fb_end_cpu_access:
static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_check = udl_primary_plane_helper_atomic_check,
.atomic_update = udl_primary_plane_helper_atomic_update,
};
diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile
index e8b314137020..b7d673f1153b 100644
--- a/drivers/gpu/drm/v3d/Makefile
+++ b/drivers/gpu/drm/v3d/Makefile
@@ -11,7 +11,9 @@ v3d-y := \
v3d_mmu.o \
v3d_perfmon.o \
v3d_trace_points.o \
- v3d_sched.o
+ v3d_sched.o \
+ v3d_sysfs.o \
+ v3d_submit.o
v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 8b3229a37c6d..1bdfac8beafd 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -33,6 +33,9 @@ void v3d_free_object(struct drm_gem_object *obj)
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
+ if (bo->vaddr)
+ v3d_put_bo_vaddr(bo);
+
v3d_mmu_remove_ptes(bo);
mutex_lock(&v3d->bo_lock);
@@ -134,6 +137,7 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
bo = to_v3d_bo(&shmem_obj->base);
+ bo->vaddr = NULL;
ret = v3d_bo_create_finish(&shmem_obj->base);
if (ret)
@@ -167,6 +171,20 @@ v3d_prime_import_sg_table(struct drm_device *dev,
return obj;
}
+void v3d_get_bo_vaddr(struct v3d_bo *bo)
+{
+ struct drm_gem_shmem_object *obj = &bo->base;
+
+ bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+}
+
+void v3d_put_bo_vaddr(struct v3d_bo *bo)
+{
+ vunmap(bo->vaddr);
+ bo->vaddr = NULL;
+}
+
int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -233,3 +251,36 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
drm_gem_object_put(gem_obj);
return 0;
}
+
+int
+v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_v3d_wait_bo *args = data;
+ ktime_t start = ktime_get();
+ u64 delta_ns;
+ unsigned long timeout_jiffies =
+ nsecs_to_jiffies_timeout(args->timeout_ns);
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ ret = drm_gem_dma_resv_wait(file_priv, args->handle,
+ true, timeout_jiffies);
+
+ /* Decrement the user's timeout, in case we got interrupted
+ * such that the ioctl will be restarted.
+ */
+ delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (delta_ns < args->timeout_ns)
+ args->timeout_ns -= delta_ns;
+ else
+ args->timeout_ns = 0;
+
+ /* Asked to wait beyond the jiffie/scheduler precision? */
+ if (ret == -ETIME && args->timeout_ns)
+ ret = -EAGAIN;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 330669f51fa7..f843a50d5dce 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -12,69 +12,83 @@
#include "v3d_drv.h"
#include "v3d_regs.h"
-#define REGDEF(reg) { reg, #reg }
+#define REGDEF(min_ver, max_ver, reg) { min_ver, max_ver, reg, #reg }
struct v3d_reg_def {
+ u32 min_ver;
+ u32 max_ver;
u32 reg;
const char *name;
};
static const struct v3d_reg_def v3d_hub_reg_defs[] = {
- REGDEF(V3D_HUB_AXICFG),
- REGDEF(V3D_HUB_UIFCFG),
- REGDEF(V3D_HUB_IDENT0),
- REGDEF(V3D_HUB_IDENT1),
- REGDEF(V3D_HUB_IDENT2),
- REGDEF(V3D_HUB_IDENT3),
- REGDEF(V3D_HUB_INT_STS),
- REGDEF(V3D_HUB_INT_MSK_STS),
-
- REGDEF(V3D_MMU_CTL),
- REGDEF(V3D_MMU_VIO_ADDR),
- REGDEF(V3D_MMU_VIO_ID),
- REGDEF(V3D_MMU_DEBUG_INFO),
+ REGDEF(33, 42, V3D_HUB_AXICFG),
+ REGDEF(33, 71, V3D_HUB_UIFCFG),
+ REGDEF(33, 71, V3D_HUB_IDENT0),
+ REGDEF(33, 71, V3D_HUB_IDENT1),
+ REGDEF(33, 71, V3D_HUB_IDENT2),
+ REGDEF(33, 71, V3D_HUB_IDENT3),
+ REGDEF(33, 71, V3D_HUB_INT_STS),
+ REGDEF(33, 71, V3D_HUB_INT_MSK_STS),
+
+ REGDEF(33, 71, V3D_MMU_CTL),
+ REGDEF(33, 71, V3D_MMU_VIO_ADDR),
+ REGDEF(33, 71, V3D_MMU_VIO_ID),
+ REGDEF(33, 71, V3D_MMU_DEBUG_INFO),
+
+ REGDEF(71, 71, V3D_GMP_STATUS(71)),
+ REGDEF(71, 71, V3D_GMP_CFG(71)),
+ REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)),
};
static const struct v3d_reg_def v3d_gca_reg_defs[] = {
- REGDEF(V3D_GCA_SAFE_SHUTDOWN),
- REGDEF(V3D_GCA_SAFE_SHUTDOWN_ACK),
+ REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN),
+ REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK),
};
static const struct v3d_reg_def v3d_core_reg_defs[] = {
- REGDEF(V3D_CTL_IDENT0),
- REGDEF(V3D_CTL_IDENT1),
- REGDEF(V3D_CTL_IDENT2),
- REGDEF(V3D_CTL_MISCCFG),
- REGDEF(V3D_CTL_INT_STS),
- REGDEF(V3D_CTL_INT_MSK_STS),
- REGDEF(V3D_CLE_CT0CS),
- REGDEF(V3D_CLE_CT0CA),
- REGDEF(V3D_CLE_CT0EA),
- REGDEF(V3D_CLE_CT1CS),
- REGDEF(V3D_CLE_CT1CA),
- REGDEF(V3D_CLE_CT1EA),
-
- REGDEF(V3D_PTB_BPCA),
- REGDEF(V3D_PTB_BPCS),
-
- REGDEF(V3D_GMP_STATUS),
- REGDEF(V3D_GMP_CFG),
- REGDEF(V3D_GMP_VIO_ADDR),
-
- REGDEF(V3D_ERR_FDBGO),
- REGDEF(V3D_ERR_FDBGB),
- REGDEF(V3D_ERR_FDBGS),
- REGDEF(V3D_ERR_STAT),
+ REGDEF(33, 71, V3D_CTL_IDENT0),
+ REGDEF(33, 71, V3D_CTL_IDENT1),
+ REGDEF(33, 71, V3D_CTL_IDENT2),
+ REGDEF(33, 71, V3D_CTL_MISCCFG),
+ REGDEF(33, 71, V3D_CTL_INT_STS),
+ REGDEF(33, 71, V3D_CTL_INT_MSK_STS),
+ REGDEF(33, 71, V3D_CLE_CT0CS),
+ REGDEF(33, 71, V3D_CLE_CT0CA),
+ REGDEF(33, 71, V3D_CLE_CT0EA),
+ REGDEF(33, 71, V3D_CLE_CT1CS),
+ REGDEF(33, 71, V3D_CLE_CT1CA),
+ REGDEF(33, 71, V3D_CLE_CT1EA),
+
+ REGDEF(33, 71, V3D_PTB_BPCA),
+ REGDEF(33, 71, V3D_PTB_BPCS),
+
+ REGDEF(33, 41, V3D_GMP_STATUS(33)),
+ REGDEF(33, 41, V3D_GMP_CFG(33)),
+ REGDEF(33, 41, V3D_GMP_VIO_ADDR(33)),
+
+ REGDEF(33, 71, V3D_ERR_FDBGO),
+ REGDEF(33, 71, V3D_ERR_FDBGB),
+ REGDEF(33, 71, V3D_ERR_FDBGS),
+ REGDEF(33, 71, V3D_ERR_STAT),
};
static const struct v3d_reg_def v3d_csd_reg_defs[] = {
- REGDEF(V3D_CSD_STATUS),
- REGDEF(V3D_CSD_CURRENT_CFG0),
- REGDEF(V3D_CSD_CURRENT_CFG1),
- REGDEF(V3D_CSD_CURRENT_CFG2),
- REGDEF(V3D_CSD_CURRENT_CFG3),
- REGDEF(V3D_CSD_CURRENT_CFG4),
- REGDEF(V3D_CSD_CURRENT_CFG5),
- REGDEF(V3D_CSD_CURRENT_CFG6),
+ REGDEF(41, 71, V3D_CSD_STATUS),
+ REGDEF(41, 41, V3D_CSD_CURRENT_CFG0(41)),
+ REGDEF(41, 41, V3D_CSD_CURRENT_CFG1(41)),
+ REGDEF(41, 41, V3D_CSD_CURRENT_CFG2(41)),
+ REGDEF(41, 41, V3D_CSD_CURRENT_CFG3(41)),
+ REGDEF(41, 41, V3D_CSD_CURRENT_CFG4(41)),
+ REGDEF(41, 41, V3D_CSD_CURRENT_CFG5(41)),
+ REGDEF(41, 41, V3D_CSD_CURRENT_CFG6(41)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)),
+ REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7),
};
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
@@ -85,38 +99,41 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
int i, core;
for (i = 0; i < ARRAY_SIZE(v3d_hub_reg_defs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- v3d_hub_reg_defs[i].name, v3d_hub_reg_defs[i].reg,
- V3D_READ(v3d_hub_reg_defs[i].reg));
+ const struct v3d_reg_def *def = &v3d_hub_reg_defs[i];
+
+ if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ def->name, def->reg, V3D_READ(def->reg));
+ }
}
- if (v3d->ver < 41) {
- for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+ for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+ const struct v3d_reg_def *def = &v3d_gca_reg_defs[i];
+
+ if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) {
seq_printf(m, "%s (0x%04x): 0x%08x\n",
- v3d_gca_reg_defs[i].name,
- v3d_gca_reg_defs[i].reg,
- V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+ def->name, def->reg, V3D_GCA_READ(def->reg));
}
}
for (core = 0; core < v3d->cores; core++) {
for (i = 0; i < ARRAY_SIZE(v3d_core_reg_defs); i++) {
- seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
- core,
- v3d_core_reg_defs[i].name,
- v3d_core_reg_defs[i].reg,
- V3D_CORE_READ(core,
- v3d_core_reg_defs[i].reg));
+ const struct v3d_reg_def *def = &v3d_core_reg_defs[i];
+
+ if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) {
+ seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
+ core, def->name, def->reg,
+ V3D_CORE_READ(core, def->reg));
+ }
}
- if (v3d_has_csd(v3d)) {
- for (i = 0; i < ARRAY_SIZE(v3d_csd_reg_defs); i++) {
+ for (i = 0; i < ARRAY_SIZE(v3d_csd_reg_defs); i++) {
+ const struct v3d_reg_def *def = &v3d_csd_reg_defs[i];
+
+ if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) {
seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
- core,
- v3d_csd_reg_defs[i].name,
- v3d_csd_reg_defs[i].reg,
- V3D_CORE_READ(core,
- v3d_csd_reg_defs[i].reg));
+ core, def->name, def->reg,
+ V3D_CORE_READ(core, def->reg));
}
}
}
@@ -147,8 +164,10 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU));
seq_printf(m, "TFU: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU));
- seq_printf(m, "TSY: %s\n",
- str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
+ if (v3d->ver <= 42) {
+ seq_printf(m, "TSY: %s\n",
+ str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
+ }
seq_printf(m, "MSO: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_MSO));
seq_printf(m, "L3C: %s (%dkb)\n",
@@ -177,10 +196,14 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
seq_printf(m, " QPUs: %d\n", nslc * qups);
seq_printf(m, " Semaphores: %d\n",
V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
- seq_printf(m, " BCG int: %d\n",
- (ident2 & V3D_IDENT2_BCG_INT) != 0);
- seq_printf(m, " Override TMU: %d\n",
- (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
+ if (v3d->ver <= 42) {
+ seq_printf(m, " BCG int: %d\n",
+ (ident2 & V3D_IDENT2_BCG_INT) != 0);
+ }
+ if (v3d->ver < 40) {
+ seq_printf(m, " Override TMU: %d\n",
+ (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
+ }
}
return 0;
@@ -212,14 +235,15 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
int measure_ms = 1000;
if (v3d->ver >= 40) {
+ int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
- V3D_SET_FIELD(V3D_PCTR_CYCLE_COUNT,
+ V3D_SET_FIELD(cycle_count_reg,
V3D_PCTR_S0));
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
} else {
V3D_CORE_WRITE(core, V3D_V3_PCTR_0_PCTRS0,
- V3D_PCTR_CYCLE_COUNT);
+ V3D_PCTR_CYCLE_COUNT(v3d->ver));
V3D_CORE_WRITE(core, V3D_V3_PCTR_0_CLR, 1);
V3D_CORE_WRITE(core, V3D_V3_PCTR_0_EN,
V3D_V3_PCTR_0_EN_ENABLE |
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index ffbbe9d527d3..3debf37e7d9b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/sched/clock.h>
#include <linux/reset.h>
#include <drm/drm_drv.h>
@@ -90,6 +91,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
args->value = 1;
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE:
+ args->value = 1;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -111,6 +115,10 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
+ v3d_priv->enabled_ns[i] = 0;
+ v3d_priv->start_ns[i] = 0;
+ v3d_priv->jobs_sent[i] = 0;
+
sched = &v3d->queue[i].sched;
drm_sched_entity_init(&v3d_priv->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
@@ -136,7 +144,35 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
+static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct v3d_file_priv *file_priv = file->driver_priv;
+ u64 timestamp = local_clock();
+ enum v3d_queue queue;
+
+ for (queue = 0; queue < V3D_MAX_QUEUES; queue++) {
+ /* Note that, in case of a GPU reset, the time spent during an
+ * attempt of executing the job is not computed in the runtime.
+ */
+ drm_printf(p, "drm-engine-%s: \t%llu ns\n",
+ v3d_queue_to_string(queue),
+ file_priv->start_ns[queue] ? file_priv->enabled_ns[queue]
+ + timestamp - file_priv->start_ns[queue]
+ : file_priv->enabled_ns[queue]);
+
+ /* Note that we only count jobs that completed. Therefore, jobs
+ * that were resubmitted due to a GPU reset are not computed.
+ */
+ drm_printf(p, "v3d-jobs-%s: \t%llu jobs\n",
+ v3d_queue_to_string(queue), file_priv->jobs_sent[queue]);
+ }
+}
+
+static const struct file_operations v3d_drm_fops = {
+ .owner = THIS_MODULE,
+ DRM_GEM_FOPS,
+ .show_fdinfo = drm_show_fdinfo,
+};
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be
@@ -156,6 +192,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
static const struct drm_driver v3d_drm_driver = {
@@ -176,6 +213,7 @@ static const struct drm_driver v3d_drm_driver = {
.ioctls = v3d_drm_ioctls,
.num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
.fops = &v3d_drm_fops,
+ .show_fdinfo = v3d_show_fdinfo,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -187,6 +225,7 @@ static const struct drm_driver v3d_drm_driver = {
static const struct of_device_id v3d_of_match[] = {
{ .compatible = "brcm,2711-v3d" },
+ { .compatible = "brcm,2712-v3d" },
{ .compatible = "brcm,7268-v3d" },
{ .compatible = "brcm,7278-v3d" },
{},
@@ -281,8 +320,14 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
goto irq_disable;
+ ret = v3d_sysfs_init(dev);
+ if (ret)
+ goto drm_unregister;
+
return 0;
+drm_unregister:
+ drm_dev_unregister(drm);
irq_disable:
v3d_irq_disable(v3d);
gem_destroy:
@@ -296,6 +341,9 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct v3d_dev *v3d = to_v3d_dev(drm);
+ struct device *dev = &pdev->dev;
+
+ v3d_sysfs_destroy(dev);
drm_dev_unregister(drm);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 106454f28956..3c7d58866570 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -19,13 +19,30 @@ struct reset_control;
#define GMP_GRANULARITY (128 * 1024)
-#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
+#define V3D_MAX_QUEUES (V3D_CPU + 1)
+
+static inline char *v3d_queue_to_string(enum v3d_queue queue)
+{
+ switch (queue) {
+ case V3D_BIN: return "bin";
+ case V3D_RENDER: return "render";
+ case V3D_TFU: return "tfu";
+ case V3D_CSD: return "csd";
+ case V3D_CACHE_CLEAN: return "cache_clean";
+ case V3D_CPU: return "cpu";
+ }
+ return "UNKNOWN";
+}
struct v3d_queue_state {
struct drm_gpu_scheduler sched;
u64 fence_context;
u64 emit_seqno;
+
+ u64 start_ns;
+ u64 enabled_ns;
+ u64 jobs_sent;
};
/* Performance monitor object. The perform lifetime is controlled by userspace
@@ -106,6 +123,7 @@ struct v3d_dev {
struct v3d_render_job *render_job;
struct v3d_tfu_job *tfu_job;
struct v3d_csd_job *csd_job;
+ struct v3d_cpu_job *cpu_job;
struct v3d_queue_state queue[V3D_MAX_QUEUES];
@@ -167,6 +185,12 @@ struct v3d_file_priv {
} perfmon;
struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
+
+ u64 start_ns[V3D_MAX_QUEUES];
+
+ u64 enabled_ns[V3D_MAX_QUEUES];
+
+ u64 jobs_sent[V3D_MAX_QUEUES];
};
struct v3d_bo {
@@ -178,6 +202,8 @@ struct v3d_bo {
* v3d_render_job->unref_list
*/
struct list_head unref_head;
+
+ void *vaddr;
};
static inline struct v3d_bo *
@@ -238,6 +264,11 @@ struct v3d_job {
*/
struct v3d_perfmon *perfmon;
+ /* File descriptor of the process that submitted the job that could be used
+ * for collecting stats by process of GPU usage.
+ */
+ struct drm_file *file;
+
/* Callback for the freeing of the job on refcount going to 0. */
void (*free)(struct kref *ref);
};
@@ -285,6 +316,112 @@ struct v3d_csd_job {
struct drm_v3d_submit_csd args;
};
+enum v3d_cpu_job_type {
+ V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1,
+ V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY,
+ V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY,
+ V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY,
+ V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY,
+ V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY,
+};
+
+struct v3d_timestamp_query {
+ /* Offset of this query in the timestamp BO for its value. */
+ u32 offset;
+
+ /* Syncobj that indicates the timestamp availability */
+ struct drm_syncobj *syncobj;
+};
+
+/* Number of perfmons required to handle all supported performance counters */
+#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_PERFCNT_NUM, \
+ DRM_V3D_MAX_PERF_COUNTERS)
+
+struct v3d_performance_query {
+ /* Performance monitor IDs for this query */
+ u32 kperfmon_ids[V3D_MAX_PERFMONS];
+
+ /* Syncobj that indicates the query availability */
+ struct drm_syncobj *syncobj;
+};
+
+struct v3d_indirect_csd_info {
+ /* Indirect CSD */
+ struct v3d_csd_job *job;
+
+ /* Clean cache job associated to the Indirect CSD job */
+ struct v3d_job *clean_job;
+
+ /* Offset within the BO where the workgroup counts are stored */
+ u32 offset;
+
+ /* Workgroups size */
+ u32 wg_size;
+
+ /* Indices of the uniforms with the workgroup dispatch counts
+ * in the uniform stream.
+ */
+ u32 wg_uniform_offsets[3];
+
+ /* Indirect BO */
+ struct drm_gem_object *indirect;
+
+ /* Context of the Indirect CSD job */
+ struct ww_acquire_ctx acquire_ctx;
+};
+
+struct v3d_timestamp_query_info {
+ struct v3d_timestamp_query *queries;
+
+ u32 count;
+};
+
+struct v3d_performance_query_info {
+ struct v3d_performance_query *queries;
+
+ /* Number of performance queries */
+ u32 count;
+
+ /* Number of performance monitors related to that query pool */
+ u32 nperfmons;
+
+ /* Number of performance counters related to that query pool */
+ u32 ncounters;
+};
+
+struct v3d_copy_query_results_info {
+ /* Define if should write to buffer using 64 or 32 bits */
+ bool do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ bool do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ bool availability_bit;
+
+ /* Offset of the copy buffer in the BO */
+ u32 offset;
+
+ /* Stride of the copy buffer in the BO */
+ u32 stride;
+};
+
+struct v3d_cpu_job {
+ struct v3d_job base;
+
+ enum v3d_cpu_job_type job_type;
+
+ struct v3d_indirect_csd_info indirect_csd;
+
+ struct v3d_timestamp_query_info timestamp_query;
+
+ struct v3d_copy_query_results_info copy;
+
+ struct v3d_performance_query_info performance_query;
+};
+
+typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *);
+
struct v3d_submit_outsync {
struct drm_syncobj *syncobj;
};
@@ -352,12 +489,16 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
void v3d_free_object(struct drm_gem_object *gem_obj);
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t size);
+void v3d_get_bo_vaddr(struct v3d_bo *bo);
+void v3d_put_bo_vaddr(struct v3d_bo *bo);
int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
@@ -372,19 +513,21 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
+void v3d_reset(struct v3d_dev *v3d);
+void v3d_invalidate_caches(struct v3d_dev *v3d);
+void v3d_clean_caches(struct v3d_dev *v3d);
+
+/* v3d_submit.c */
+void v3d_job_cleanup(struct v3d_job *job);
+void v3d_job_put(struct v3d_job *job);
int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void v3d_job_cleanup(struct v3d_job *job);
-void v3d_job_put(struct v3d_job *job);
-void v3d_reset(struct v3d_dev *v3d);
-void v3d_invalidate_caches(struct v3d_dev *v3d);
-void v3d_clean_caches(struct v3d_dev *v3d);
+int v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* v3d_irq.c */
int v3d_irq_init(struct v3d_dev *v3d);
@@ -393,8 +536,6 @@ void v3d_irq_disable(struct v3d_dev *v3d);
void v3d_irq_reset(struct v3d_dev *v3d);
/* v3d_mmu.c */
-int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
- u32 *offset);
int v3d_mmu_set_page_table(struct v3d_dev *v3d);
void v3d_mmu_insert_ptes(struct v3d_bo *bo);
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
@@ -418,3 +559,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+/* v3d_sysfs.c */
+int v3d_sysfs_init(struct device *dev);
+void v3d_sysfs_destroy(struct device *dev);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 2e94ce788c71..afc565078c78 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -11,8 +11,6 @@
#include <linux/uaccess.h>
#include <drm/drm_managed.h>
-#include <drm/drm_syncobj.h>
-#include <uapi/drm/v3d_drm.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
@@ -47,9 +45,9 @@ v3d_init_hw_state(struct v3d_dev *v3d)
static void
v3d_idle_axi(struct v3d_dev *v3d, int core)
{
- V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
+ V3D_CORE_WRITE(core, V3D_GMP_CFG(v3d->ver), V3D_GMP_CFG_STOP_REQ);
- if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
+ if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS(v3d->ver)) &
(V3D_GMP_STATUS_RD_COUNT_MASK |
V3D_GMP_STATUS_WR_COUNT_MASK |
V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
@@ -241,771 +239,6 @@ v3d_invalidate_caches(struct v3d_dev *v3d)
v3d_invalidate_slices(v3d, 0);
}
-/* Takes the reservation lock on all the BOs being referenced, so that
- * at queue submit time we can update the reservations.
- *
- * We don't lock the RCL the tile alloc/state BOs, or overflow memory
- * (all of which are on exec->unref_list). They're entirely private
- * to v3d, so we don't attach dma-buf fences to them.
- */
-static int
-v3d_lock_bo_reservations(struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int i, ret;
-
- ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
- if (ret)
- return ret;
-
- for (i = 0; i < job->bo_count; i++) {
- ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
- if (ret)
- goto fail;
-
- ret = drm_sched_job_add_implicit_dependencies(&job->base,
- job->bo[i], true);
- if (ret)
- goto fail;
- }
-
- return 0;
-
-fail:
- drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
- return ret;
-}
-
-/**
- * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
- * referenced by the job.
- * @dev: DRM device
- * @file_priv: DRM file for this fd
- * @job: V3D job being set up
- * @bo_handles: GEM handles
- * @bo_count: Number of GEM handles passed in
- *
- * The command validator needs to reference BOs by their index within
- * the submitted job's BO list. This does the validation of the job's
- * BO list and reference counting for the lifetime of the job.
- *
- * Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at v3d_exec_cleanup() time.
- */
-static int
-v3d_lookup_bos(struct drm_device *dev,
- struct drm_file *file_priv,
- struct v3d_job *job,
- u64 bo_handles,
- u32 bo_count)
-{
- job->bo_count = bo_count;
-
- if (!job->bo_count) {
- /* See comment on bo_index for why we have to check
- * this.
- */
- DRM_DEBUG("Rendering requires BOs\n");
- return -EINVAL;
- }
-
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)bo_handles,
- job->bo_count, &job->bo);
-}
-
-static void
-v3d_job_free(struct kref *ref)
-{
- struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
- int i;
-
- if (job->bo) {
- for (i = 0; i < job->bo_count; i++)
- drm_gem_object_put(job->bo[i]);
- kvfree(job->bo);
- }
-
- dma_fence_put(job->irq_fence);
- dma_fence_put(job->done_fence);
-
- if (job->perfmon)
- v3d_perfmon_put(job->perfmon);
-
- kfree(job);
-}
-
-static void
-v3d_render_job_free(struct kref *ref)
-{
- struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
- base.refcount);
- struct v3d_bo *bo, *save;
-
- list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
- drm_gem_object_put(&bo->base.base);
- }
-
- v3d_job_free(ref);
-}
-
-void v3d_job_cleanup(struct v3d_job *job)
-{
- if (!job)
- return;
-
- drm_sched_job_cleanup(&job->base);
- v3d_job_put(job);
-}
-
-void v3d_job_put(struct v3d_job *job)
-{
- kref_put(&job->refcount, job->free);
-}
-
-int
-v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
- struct drm_v3d_wait_bo *args = data;
- ktime_t start = ktime_get();
- u64 delta_ns;
- unsigned long timeout_jiffies =
- nsecs_to_jiffies_timeout(args->timeout_ns);
-
- if (args->pad != 0)
- return -EINVAL;
-
- ret = drm_gem_dma_resv_wait(file_priv, args->handle,
- true, timeout_jiffies);
-
- /* Decrement the user's timeout, in case we got interrupted
- * such that the ioctl will be restarted.
- */
- delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
- if (delta_ns < args->timeout_ns)
- args->timeout_ns -= delta_ns;
- else
- args->timeout_ns = 0;
-
- /* Asked to wait beyond the jiffie/scheduler precision? */
- if (ret == -ETIME && args->timeout_ns)
- ret = -EAGAIN;
-
- return ret;
-}
-
-static int
-v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
- void **container, size_t size, void (*free)(struct kref *ref),
- u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
-{
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct v3d_job *job;
- bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
- int ret, i;
-
- *container = kcalloc(1, size, GFP_KERNEL);
- if (!*container) {
- DRM_ERROR("Cannot allocate memory for v3d job.");
- return -ENOMEM;
- }
-
- job = *container;
- job->v3d = v3d;
- job->free = free;
-
- ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- v3d_priv);
- if (ret)
- goto fail;
-
- if (has_multisync) {
- if (se->in_sync_count && se->wait_stage == queue) {
- struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
-
- for (i = 0; i < se->in_sync_count; i++) {
- struct drm_v3d_sem in;
-
- if (copy_from_user(&in, handle++, sizeof(in))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy wait dep handle.\n");
- goto fail_deps;
- }
- ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
-
- // TODO: Investigate why this was filtered out for the IOCTL.
- if (ret && ret != -ENOENT)
- goto fail_deps;
- }
- }
- } else {
- ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
-
- // TODO: Investigate why this was filtered out for the IOCTL.
- if (ret && ret != -ENOENT)
- goto fail_deps;
- }
-
- kref_init(&job->refcount);
-
- return 0;
-
-fail_deps:
- drm_sched_job_cleanup(&job->base);
-fail:
- kfree(*container);
- *container = NULL;
-
- return ret;
-}
-
-static void
-v3d_push_job(struct v3d_job *job)
-{
- drm_sched_job_arm(&job->base);
-
- job->done_fence = dma_fence_get(&job->base.s_fence->finished);
-
- /* put by scheduler job completion */
- kref_get(&job->refcount);
-
- drm_sched_entity_push_job(&job->base);
-}
-
-static void
-v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
- struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx,
- u32 out_sync,
- struct v3d_submit_ext *se,
- struct dma_fence *done_fence)
-{
- struct drm_syncobj *sync_out;
- bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
- int i;
-
- for (i = 0; i < job->bo_count; i++) {
- /* XXX: Use shared fences for read-only objects. */
- dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
- DMA_RESV_USAGE_WRITE);
- }
-
- drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
-
- /* Update the return sync object for the job */
- /* If it only supports a single signal semaphore*/
- if (!has_multisync) {
- sync_out = drm_syncobj_find(file_priv, out_sync);
- if (sync_out) {
- drm_syncobj_replace_fence(sync_out, done_fence);
- drm_syncobj_put(sync_out);
- }
- return;
- }
-
- /* If multiple semaphores extension is supported */
- if (se->out_sync_count) {
- for (i = 0; i < se->out_sync_count; i++) {
- drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
- done_fence);
- drm_syncobj_put(se->out_syncs[i].syncobj);
- }
- kvfree(se->out_syncs);
- }
-}
-
-static void
-v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
-{
- unsigned int i;
-
- if (!(se && se->out_sync_count))
- return;
-
- for (i = 0; i < se->out_sync_count; i++)
- drm_syncobj_put(se->out_syncs[i].syncobj);
- kvfree(se->out_syncs);
-}
-
-static int
-v3d_get_multisync_post_deps(struct drm_file *file_priv,
- struct v3d_submit_ext *se,
- u32 count, u64 handles)
-{
- struct drm_v3d_sem __user *post_deps;
- int i, ret;
-
- if (!count)
- return 0;
-
- se->out_syncs = (struct v3d_submit_outsync *)
- kvmalloc_array(count,
- sizeof(struct v3d_submit_outsync),
- GFP_KERNEL);
- if (!se->out_syncs)
- return -ENOMEM;
-
- post_deps = u64_to_user_ptr(handles);
-
- for (i = 0; i < count; i++) {
- struct drm_v3d_sem out;
-
- if (copy_from_user(&out, post_deps++, sizeof(out))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy post dep handles\n");
- goto fail;
- }
-
- se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
- out.handle);
- if (!se->out_syncs[i].syncobj) {
- ret = -EINVAL;
- goto fail;
- }
- }
- se->out_sync_count = count;
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- drm_syncobj_put(se->out_syncs[i].syncobj);
- kvfree(se->out_syncs);
-
- return ret;
-}
-
-/* Get data for multiple binary semaphores synchronization. Parse syncobj
- * to be signaled when job completes (out_sync).
- */
-static int
-v3d_get_multisync_submit_deps(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- void *data)
-{
- struct drm_v3d_multi_sync multisync;
- struct v3d_submit_ext *se = data;
- int ret;
-
- if (copy_from_user(&multisync, ext, sizeof(multisync)))
- return -EFAULT;
-
- if (multisync.pad)
- return -EINVAL;
-
- ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count,
- multisync.out_syncs);
- if (ret)
- return ret;
-
- se->in_sync_count = multisync.in_sync_count;
- se->in_syncs = multisync.in_syncs;
- se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
- se->wait_stage = multisync.wait_stage;
-
- return 0;
-}
-
-/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
- * according to the extension id (name).
- */
-static int
-v3d_get_extensions(struct drm_file *file_priv,
- u64 ext_handles,
- void *data)
-{
- struct drm_v3d_extension __user *user_ext;
- int ret;
-
- user_ext = u64_to_user_ptr(ext_handles);
- while (user_ext) {
- struct drm_v3d_extension ext;
-
- if (copy_from_user(&ext, user_ext, sizeof(ext))) {
- DRM_DEBUG("Failed to copy submit extension\n");
- return -EFAULT;
- }
-
- switch (ext.id) {
- case DRM_V3D_EXT_ID_MULTI_SYNC:
- ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data);
- if (ret)
- return ret;
- break;
- default:
- DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
- return -EINVAL;
- }
-
- user_ext = u64_to_user_ptr(ext.next);
- }
-
- return 0;
-}
-
-/**
- * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * This is the main entrypoint for userspace to submit a 3D frame to
- * the GPU. Userspace provides the binner command list (if
- * applicable), and the kernel sets up the render command list to draw
- * to the framebuffer described in the ioctl, using the command lists
- * that the 3D engine's binner will produce.
- */
-int
-v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct drm_v3d_submit_cl *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_bin_job *bin = NULL;
- struct v3d_render_job *render = NULL;
- struct v3d_job *clean_job = NULL;
- struct v3d_job *last_job;
- struct ww_acquire_ctx acquire_ctx;
- int ret = 0;
-
- trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
-
- if (args->pad)
- return -EINVAL;
-
- if (args->flags &&
- args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
- DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render),
- v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
- if (ret)
- goto fail;
-
- render->start = args->rcl_start;
- render->end = args->rcl_end;
- INIT_LIST_HEAD(&render->unref_list);
-
- if (args->bcl_start != args->bcl_end) {
- ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin),
- v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
- if (ret)
- goto fail;
-
- bin->start = args->bcl_start;
- bin->end = args->bcl_end;
- bin->qma = args->qma;
- bin->qms = args->qms;
- bin->qts = args->qts;
- bin->render = render;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
- ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
- v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret)
- goto fail;
-
- last_job = clean_job;
- } else {
- last_job = &render->base;
- }
-
- ret = v3d_lookup_bos(dev, file_priv, last_job,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- goto fail;
-
- ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
- if (ret)
- goto fail;
-
- if (args->perfmon_id) {
- render->base.perfmon = v3d_perfmon_find(v3d_priv,
- args->perfmon_id);
-
- if (!render->base.perfmon) {
- ret = -ENOENT;
- goto fail_perfmon;
- }
- }
-
- mutex_lock(&v3d->sched_lock);
- if (bin) {
- bin->base.perfmon = render->base.perfmon;
- v3d_perfmon_get(bin->base.perfmon);
- v3d_push_job(&bin->base);
-
- ret = drm_sched_job_add_dependency(&render->base.base,
- dma_fence_get(bin->base.done_fence));
- if (ret)
- goto fail_unreserve;
- }
-
- v3d_push_job(&render->base);
-
- if (clean_job) {
- struct dma_fence *render_fence =
- dma_fence_get(render->base.done_fence);
- ret = drm_sched_job_add_dependency(&clean_job->base,
- render_fence);
- if (ret)
- goto fail_unreserve;
- clean_job->perfmon = render->base.perfmon;
- v3d_perfmon_get(clean_job->perfmon);
- v3d_push_job(clean_job);
- }
-
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- last_job,
- &acquire_ctx,
- args->out_sync,
- &se,
- last_job->done_fence);
-
- if (bin)
- v3d_job_put(&bin->base);
- v3d_job_put(&render->base);
- if (clean_job)
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
- drm_gem_unlock_reservations(last_job->bo,
- last_job->bo_count, &acquire_ctx);
-fail:
- v3d_job_cleanup((void *)bin);
- v3d_job_cleanup((void *)render);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-/**
- * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the TFU, which we don't
- * need to validate since the TFU is behind the MMU.
- */
-int
-v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_v3d_submit_tfu *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_tfu_job *job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret = 0;
-
- trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_DEBUG("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
- v3d_job_free, args->in_sync, &se, V3D_TFU);
- if (ret)
- goto fail;
-
- job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
- sizeof(*job->base.bo), GFP_KERNEL);
- if (!job->base.bo) {
- ret = -ENOMEM;
- goto fail;
- }
-
- job->args = *args;
-
- for (job->base.bo_count = 0;
- job->base.bo_count < ARRAY_SIZE(args->bo_handles);
- job->base.bo_count++) {
- struct drm_gem_object *bo;
-
- if (!args->bo_handles[job->base.bo_count])
- break;
-
- bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
- if (!bo) {
- DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
- job->base.bo_count,
- args->bo_handles[job->base.bo_count]);
- ret = -ENOENT;
- goto fail;
- }
- job->base.bo[job->base.bo_count] = bo;
- }
-
- ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
- if (ret)
- goto fail;
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&job->base);
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- &job->base, &acquire_ctx,
- args->out_sync,
- &se,
- job->base.done_fence);
-
- v3d_job_put(&job->base);
-
- return 0;
-
-fail:
- v3d_job_cleanup((void *)job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-/**
- * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the CSD, which we don't
- * need to validate since the CSD is behind the MMU.
- */
-int
-v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct drm_v3d_submit_csd *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_csd_job *job = NULL;
- struct v3d_job *clean_job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret;
-
- trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
-
- if (args->pad)
- return -EINVAL;
-
- if (!v3d_has_csd(v3d)) {
- DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
- return -EINVAL;
- }
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
- v3d_job_free, args->in_sync, &se, V3D_CSD);
- if (ret)
- goto fail;
-
- ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
- v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret)
- goto fail;
-
- job->args = *args;
-
- ret = v3d_lookup_bos(dev, file_priv, clean_job,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- goto fail;
-
- ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
- if (ret)
- goto fail;
-
- if (args->perfmon_id) {
- job->base.perfmon = v3d_perfmon_find(v3d_priv,
- args->perfmon_id);
- if (!job->base.perfmon) {
- ret = -ENOENT;
- goto fail_perfmon;
- }
- }
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&job->base);
-
- ret = drm_sched_job_add_dependency(&clean_job->base,
- dma_fence_get(job->base.done_fence));
- if (ret)
- goto fail_unreserve;
-
- v3d_push_job(clean_job);
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- clean_job,
- &acquire_ctx,
- args->out_sync,
- &se,
- clean_job->done_fence);
-
- v3d_job_put(&job->base);
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
- drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
- &acquire_ctx);
-fail:
- v3d_job_cleanup((void *)job);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
int
v3d_gem_init(struct drm_device *dev)
{
@@ -1013,8 +246,12 @@ v3d_gem_init(struct drm_device *dev)
u32 pt_size = 4096 * 1024;
int ret, i;
- for (i = 0; i < V3D_MAX_QUEUES; i++)
+ for (i = 0; i < V3D_MAX_QUEUES; i++) {
v3d->queue[i].fence_context = dma_fence_context_alloc(1);
+ v3d->queue[i].start_ns = 0;
+ v3d->queue[i].enabled_ns = 0;
+ v3d->queue[i].jobs_sent = 0;
+ }
spin_lock_init(&v3d->mm_lock);
spin_lock_init(&v3d->job_lock);
@@ -1072,6 +309,8 @@ v3d_gem_destroy(struct drm_device *dev)
*/
WARN_ON(v3d->bin_job);
WARN_ON(v3d->render_job);
+ WARN_ON(v3d->tfu_job);
+ WARN_ON(v3d->csd_job);
drm_mm_takedown(&v3d->mm);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index e714d5318f30..afc76390a197 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -14,21 +14,23 @@
*/
#include <linux/platform_device.h>
+#include <linux/sched/clock.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
-#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
- V3D_INT_FLDONE | \
- V3D_INT_FRDONE | \
- V3D_INT_CSDDONE | \
- V3D_INT_GMPV))
+#define V3D_CORE_IRQS(ver) ((u32)(V3D_INT_OUTOMEM | \
+ V3D_INT_FLDONE | \
+ V3D_INT_FRDONE | \
+ V3D_INT_CSDDONE(ver) | \
+ (ver < 71 ? V3D_INT_GMPV : 0)))
-#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
- V3D_HUB_INT_MMU_PTI | \
- V3D_HUB_INT_MMU_CAP | \
- V3D_HUB_INT_TFUC))
+#define V3D_HUB_IRQS(ver) ((u32)(V3D_HUB_INT_MMU_WRV | \
+ V3D_HUB_INT_MMU_PTI | \
+ V3D_HUB_INT_MMU_CAP | \
+ V3D_HUB_INT_TFUC | \
+ (ver >= 71 ? V3D_V7_HUB_INT_GMPV : 0)))
static irqreturn_t
v3d_hub_irq(int irq, void *arg);
@@ -100,6 +102,18 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FLDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->bin_job->base.irq_fence);
+ struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
+ u64 runtime = local_clock() - file->start_ns[V3D_BIN];
+
+ file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
+ file->jobs_sent[V3D_BIN]++;
+ v3d->queue[V3D_BIN].jobs_sent++;
+
+ file->start_ns[V3D_BIN] = 0;
+ v3d->queue[V3D_BIN].start_ns = 0;
+
+ file->enabled_ns[V3D_BIN] += runtime;
+ v3d->queue[V3D_BIN].enabled_ns += runtime;
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -109,15 +123,39 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FRDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->render_job->base.irq_fence);
+ struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
+ u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
+
+ file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
+ file->jobs_sent[V3D_RENDER]++;
+ v3d->queue[V3D_RENDER].jobs_sent++;
+
+ file->start_ns[V3D_RENDER] = 0;
+ v3d->queue[V3D_RENDER].start_ns = 0;
+
+ file->enabled_ns[V3D_RENDER] += runtime;
+ v3d->queue[V3D_RENDER].enabled_ns += runtime;
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
- if (intsts & V3D_INT_CSDDONE) {
+ if (intsts & V3D_INT_CSDDONE(v3d->ver)) {
struct v3d_fence *fence =
to_v3d_fence(v3d->csd_job->base.irq_fence);
+ struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
+ u64 runtime = local_clock() - file->start_ns[V3D_CSD];
+
+ file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
+ file->jobs_sent[V3D_CSD]++;
+ v3d->queue[V3D_CSD].jobs_sent++;
+
+ file->start_ns[V3D_CSD] = 0;
+ v3d->queue[V3D_CSD].start_ns = 0;
+
+ file->enabled_ns[V3D_CSD] += runtime;
+ v3d->queue[V3D_CSD].enabled_ns += runtime;
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -127,7 +165,7 @@ v3d_irq(int irq, void *arg)
/* We shouldn't be triggering these if we have GMP in
* always-allowed mode.
*/
- if (intsts & V3D_INT_GMPV)
+ if (v3d->ver < 71 && (intsts & V3D_INT_GMPV))
dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
@@ -154,6 +192,18 @@ v3d_hub_irq(int irq, void *arg)
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
to_v3d_fence(v3d->tfu_job->base.irq_fence);
+ struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
+ u64 runtime = local_clock() - file->start_ns[V3D_TFU];
+
+ file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
+ file->jobs_sent[V3D_TFU]++;
+ v3d->queue[V3D_TFU].jobs_sent++;
+
+ file->start_ns[V3D_TFU] = 0;
+ v3d->queue[V3D_TFU].start_ns = 0;
+
+ file->enabled_ns[V3D_TFU] += runtime;
+ v3d->queue[V3D_TFU].enabled_ns += runtime;
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -197,6 +247,11 @@ v3d_hub_irq(int irq, void *arg)
status = IRQ_HANDLED;
}
+ if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
+ dev_err(v3d->drm.dev, "GMP Violation\n");
+ status = IRQ_HANDLED;
+ }
+
return status;
}
@@ -211,8 +266,8 @@ v3d_irq_init(struct v3d_dev *v3d)
* for us.
*/
for (core = 0; core < v3d->cores; core++)
- V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
- V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
+ V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver));
+ V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver));
irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
@@ -256,12 +311,12 @@ v3d_irq_enable(struct v3d_dev *v3d)
/* Enable our set of interrupts, masking out any others. */
for (core = 0; core < v3d->cores; core++) {
- V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
- V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
+ V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS(v3d->ver));
+ V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS(v3d->ver));
}
- V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
- V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
+ V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS(v3d->ver));
+ V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS(v3d->ver));
}
void
@@ -276,8 +331,8 @@ v3d_irq_disable(struct v3d_dev *v3d)
/* Clear any pending interrupts we might have left. */
for (core = 0; core < v3d->cores; core++)
- V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
- V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
+ V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver));
+ V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver));
cancel_work_sync(&v3d->overflow_mem_work);
}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 3663e0d6bf76..1b1a62ad9585 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -57,6 +57,7 @@
#define V3D_HUB_INT_MSK_STS 0x0005c
#define V3D_HUB_INT_MSK_SET 0x00060
#define V3D_HUB_INT_MSK_CLR 0x00064
+# define V3D_V7_HUB_INT_GMPV BIT(6)
# define V3D_HUB_INT_MMU_WRV BIT(5)
# define V3D_HUB_INT_MMU_PTI BIT(4)
# define V3D_HUB_INT_MMU_CAP BIT(3)
@@ -64,6 +65,7 @@
# define V3D_HUB_INT_TFUC BIT(1)
# define V3D_HUB_INT_TFUF BIT(0)
+/* GCA registers only exist in V3D < 41 */
#define V3D_GCA_CACHE_CTRL 0x0000c
# define V3D_GCA_CACHE_CTRL_FLUSH BIT(0)
@@ -86,7 +88,8 @@
# define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c
# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0)
-#define V3D_TFU_CS 0x00400
+#define V3D_TFU_CS(ver) ((ver >= 71) ? 0x00700 : 0x00400)
+
/* Stops current job, empties input fifo. */
# define V3D_TFU_CS_TFURST BIT(31)
# define V3D_TFU_CS_CVTCT_MASK V3D_MASK(23, 16)
@@ -95,7 +98,7 @@
# define V3D_TFU_CS_NFREE_SHIFT 8
# define V3D_TFU_CS_BUSY BIT(0)
-#define V3D_TFU_SU 0x00404
+#define V3D_TFU_SU(ver) ((ver >= 71) ? 0x00704 : 0x00404)
/* Interrupt when FINTTHR input slots are free (0 = disabled) */
# define V3D_TFU_SU_FINTTHR_MASK V3D_MASK(13, 8)
# define V3D_TFU_SU_FINTTHR_SHIFT 8
@@ -106,39 +109,42 @@
# define V3D_TFU_SU_THROTTLE_MASK V3D_MASK(1, 0)
# define V3D_TFU_SU_THROTTLE_SHIFT 0
-#define V3D_TFU_ICFG 0x00408
+#define V3D_TFU_ICFG(ver) ((ver >= 71) ? 0x00708 : 0x00408)
/* Interrupt when the conversion is complete. */
# define V3D_TFU_ICFG_IOC BIT(0)
/* Input Image Address */
-#define V3D_TFU_IIA 0x0040c
+#define V3D_TFU_IIA(ver) ((ver >= 71) ? 0x0070c : 0x0040c)
/* Input Chroma Address */
-#define V3D_TFU_ICA 0x00410
+#define V3D_TFU_ICA(ver) ((ver >= 71) ? 0x00710 : 0x00410)
/* Input Image Stride */
-#define V3D_TFU_IIS 0x00414
+#define V3D_TFU_IIS(ver) ((ver >= 71) ? 0x00714 : 0x00414)
/* Input Image U-Plane Address */
-#define V3D_TFU_IUA 0x00418
+#define V3D_TFU_IUA(ver) ((ver >= 71) ? 0x00718 : 0x00418)
+/* Image output config (VD 7.x only) */
+#define V3D_V7_TFU_IOC 0x0071c
/* Output Image Address */
-#define V3D_TFU_IOA 0x0041c
+#define V3D_TFU_IOA(ver) ((ver >= 71) ? 0x00720 : 0x0041c)
/* Image Output Size */
-#define V3D_TFU_IOS 0x00420
+#define V3D_TFU_IOS(ver) ((ver >= 71) ? 0x00724 : 0x00420)
/* TFU YUV Coefficient 0 */
-#define V3D_TFU_COEF0 0x00424
-/* Use these regs instead of the defaults. */
+#define V3D_TFU_COEF0(ver) ((ver >= 71) ? 0x00728 : 0x00424)
+/* Use these regs instead of the defaults (V3D 4.x only) */
# define V3D_TFU_COEF0_USECOEF BIT(31)
/* TFU YUV Coefficient 1 */
-#define V3D_TFU_COEF1 0x00428
+#define V3D_TFU_COEF1(ver) ((ver >= 71) ? 0x0072c : 0x00428)
/* TFU YUV Coefficient 2 */
-#define V3D_TFU_COEF2 0x0042c
+#define V3D_TFU_COEF2(ver) ((ver >= 71) ? 0x00730 : 0x0042c)
/* TFU YUV Coefficient 3 */
-#define V3D_TFU_COEF3 0x00430
+#define V3D_TFU_COEF3(ver) ((ver >= 71) ? 0x00734 : 0x00430)
+/* V3D 4.x only */
#define V3D_TFU_CRC 0x00434
/* Per-MMU registers. */
#define V3D_MMUC_CONTROL 0x01000
-# define V3D_MMUC_CONTROL_CLEAR BIT(3)
+#define V3D_MMUC_CONTROL_CLEAR(ver) ((ver >= 71) ? BIT(11) : BIT(3))
# define V3D_MMUC_CONTROL_FLUSHING BIT(2)
# define V3D_MMUC_CONTROL_FLUSH BIT(1)
# define V3D_MMUC_CONTROL_ENABLE BIT(0)
@@ -246,7 +252,6 @@
#define V3D_CTL_L2TCACTL 0x00030
# define V3D_L2TCACTL_TMUWCF BIT(8)
-# define V3D_L2TCACTL_L2T_NO_WM BIT(4)
/* Invalidates cache lines. */
# define V3D_L2TCACTL_FLM_FLUSH 0
/* Removes cachelines without writing dirty lines back. */
@@ -267,8 +272,8 @@
#define V3D_CTL_INT_MSK_CLR 0x00064
# define V3D_INT_QPU_MASK V3D_MASK(27, 16)
# define V3D_INT_QPU_SHIFT 16
-# define V3D_INT_CSDDONE BIT(7)
-# define V3D_INT_PCTR BIT(6)
+#define V3D_INT_CSDDONE(ver) ((ver >= 71) ? BIT(6) : BIT(7))
+#define V3D_INT_PCTR(ver) ((ver >= 71) ? BIT(5) : BIT(6))
# define V3D_INT_GMPV BIT(5)
# define V3D_INT_TRFB BIT(4)
# define V3D_INT_SPILLUSE BIT(3)
@@ -350,21 +355,25 @@
#define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \
4 * (x))
# define V3D_PCTR_S0_MASK V3D_MASK(6, 0)
+# define V3D_V7_PCTR_S0_MASK V3D_MASK(7, 0)
# define V3D_PCTR_S0_SHIFT 0
# define V3D_PCTR_S1_MASK V3D_MASK(14, 8)
+# define V3D_V7_PCTR_S1_MASK V3D_MASK(15, 8)
# define V3D_PCTR_S1_SHIFT 8
# define V3D_PCTR_S2_MASK V3D_MASK(22, 16)
+# define V3D_V7_PCTR_S2_MASK V3D_MASK(23, 16)
# define V3D_PCTR_S2_SHIFT 16
# define V3D_PCTR_S3_MASK V3D_MASK(30, 24)
+# define V3D_V7_PCTR_S3_MASK V3D_MASK(31, 24)
# define V3D_PCTR_S3_SHIFT 24
-# define V3D_PCTR_CYCLE_COUNT 32
+#define V3D_PCTR_CYCLE_COUNT(ver) ((ver >= 71) ? 0 : 32)
/* Output values of the counters. */
#define V3D_PCTR_0_PCTR0 0x00680
#define V3D_PCTR_0_PCTR31 0x006fc
#define V3D_PCTR_0_PCTRX(x) (V3D_PCTR_0_PCTR0 + \
4 * (x))
-#define V3D_GMP_STATUS 0x00800
+#define V3D_GMP_STATUS(ver) ((ver >= 71) ? 0x00600 : 0x00800)
# define V3D_GMP_STATUS_GMPRST BIT(31)
# define V3D_GMP_STATUS_WR_COUNT_MASK V3D_MASK(30, 24)
# define V3D_GMP_STATUS_WR_COUNT_SHIFT 24
@@ -377,13 +386,13 @@
# define V3D_GMP_STATUS_INVPROT BIT(1)
# define V3D_GMP_STATUS_VIO BIT(0)
-#define V3D_GMP_CFG 0x00804
+#define V3D_GMP_CFG(ver) ((ver >= 71) ? 0x00604 : 0x00804)
# define V3D_GMP_CFG_LBURSTEN BIT(3)
# define V3D_GMP_CFG_PGCRSEN BIT()
# define V3D_GMP_CFG_STOP_REQ BIT(1)
# define V3D_GMP_CFG_PROT_ENABLE BIT(0)
-#define V3D_GMP_VIO_ADDR 0x00808
+#define V3D_GMP_VIO_ADDR(ver) ((ver >= 71) ? 0x00608 : 0x00808)
#define V3D_GMP_VIO_TYPE 0x0080c
#define V3D_GMP_TABLE_ADDR 0x00810
#define V3D_GMP_CLEAR_LOAD 0x00814
@@ -398,25 +407,25 @@
# define V3D_CSD_STATUS_HAVE_CURRENT_DISPATCH BIT(1)
# define V3D_CSD_STATUS_HAVE_QUEUED_DISPATCH BIT(0)
-#define V3D_CSD_QUEUED_CFG0 0x00904
+#define V3D_CSD_QUEUED_CFG0(ver) ((ver >= 71) ? 0x00930 : 0x00904)
# define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_MASK V3D_MASK(31, 16)
# define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_SHIFT 16
# define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_MASK V3D_MASK(15, 0)
# define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_SHIFT 0
-#define V3D_CSD_QUEUED_CFG1 0x00908
+#define V3D_CSD_QUEUED_CFG1(ver) ((ver >= 71) ? 0x00934 : 0x00908)
# define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_MASK V3D_MASK(31, 16)
# define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_SHIFT 16
# define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_MASK V3D_MASK(15, 0)
# define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_SHIFT 0
-#define V3D_CSD_QUEUED_CFG2 0x0090c
+#define V3D_CSD_QUEUED_CFG2(ver) ((ver >= 71) ? 0x00938 : 0x0090c)
# define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_MASK V3D_MASK(31, 16)
# define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_SHIFT 16
# define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_MASK V3D_MASK(15, 0)
# define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_SHIFT 0
-#define V3D_CSD_QUEUED_CFG3 0x00910
+#define V3D_CSD_QUEUED_CFG3(ver) ((ver >= 71) ? 0x0093c : 0x00910)
# define V3D_CSD_QUEUED_CFG3_OVERLAP_WITH_PREV BIT(26)
# define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_MASK V3D_MASK(25, 20)
# define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_SHIFT 20
@@ -428,23 +437,28 @@
# define V3D_CSD_QUEUED_CFG3_WG_SIZE_SHIFT 0
/* Number of batches, minus 1 */
-#define V3D_CSD_QUEUED_CFG4 0x00914
+#define V3D_CSD_QUEUED_CFG4(ver) ((ver >= 71) ? 0x00940 : 0x00914)
/* Shader address, pnan, singleseg, threading, like a shader record. */
-#define V3D_CSD_QUEUED_CFG5 0x00918
+#define V3D_CSD_QUEUED_CFG5(ver) ((ver >= 71) ? 0x00944 : 0x00918)
/* Uniforms address (4 byte aligned) */
-#define V3D_CSD_QUEUED_CFG6 0x0091c
-
-#define V3D_CSD_CURRENT_CFG0 0x00920
-#define V3D_CSD_CURRENT_CFG1 0x00924
-#define V3D_CSD_CURRENT_CFG2 0x00928
-#define V3D_CSD_CURRENT_CFG3 0x0092c
-#define V3D_CSD_CURRENT_CFG4 0x00930
-#define V3D_CSD_CURRENT_CFG5 0x00934
-#define V3D_CSD_CURRENT_CFG6 0x00938
-
-#define V3D_CSD_CURRENT_ID0 0x0093c
+#define V3D_CSD_QUEUED_CFG6(ver) ((ver >= 71) ? 0x00948 : 0x0091c)
+
+/* V3D 7.x+ only */
+#define V3D_V7_CSD_QUEUED_CFG7 0x0094c
+
+#define V3D_CSD_CURRENT_CFG0(ver) ((ver >= 71) ? 0x00958 : 0x00920)
+#define V3D_CSD_CURRENT_CFG1(ver) ((ver >= 71) ? 0x0095c : 0x00924)
+#define V3D_CSD_CURRENT_CFG2(ver) ((ver >= 71) ? 0x00960 : 0x00928)
+#define V3D_CSD_CURRENT_CFG3(ver) ((ver >= 71) ? 0x00964 : 0x0092c)
+#define V3D_CSD_CURRENT_CFG4(ver) ((ver >= 71) ? 0x00968 : 0x00930)
+#define V3D_CSD_CURRENT_CFG5(ver) ((ver >= 71) ? 0x0096c : 0x00934)
+#define V3D_CSD_CURRENT_CFG6(ver) ((ver >= 71) ? 0x00970 : 0x00938)
+/* V3D 7.x+ only */
+#define V3D_V7_CSD_CURRENT_CFG7 0x00974
+
+#define V3D_CSD_CURRENT_ID0(ver) ((ver >= 71) ? 0x00978 : 0x0093c)
# define V3D_CSD_CURRENT_ID0_WG_X_MASK V3D_MASK(31, 16)
# define V3D_CSD_CURRENT_ID0_WG_X_SHIFT 16
# define V3D_CSD_CURRENT_ID0_WG_IN_SG_MASK V3D_MASK(11, 8)
@@ -452,7 +466,7 @@
# define V3D_CSD_CURRENT_ID0_L_IDX_MASK V3D_MASK(7, 0)
# define V3D_CSD_CURRENT_ID0_L_IDX_SHIFT 0
-#define V3D_CSD_CURRENT_ID1 0x00940
+#define V3D_CSD_CURRENT_ID1(ver) ((ver >= 71) ? 0x0097c : 0x00940)
# define V3D_CSD_CURRENT_ID0_WG_Z_MASK V3D_MASK(31, 16)
# define V3D_CSD_CURRENT_ID0_WG_Z_SHIFT 16
# define V3D_CSD_CURRENT_ID0_WG_Y_MASK V3D_MASK(15, 0)
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 038e1ae589c7..54015ad765c7 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -18,12 +18,17 @@
* semaphores to interlock between them.
*/
+#include <linux/sched/clock.h>
#include <linux/kthread.h>
+#include <drm/drm_syncobj.h>
+
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
+#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
+
static struct v3d_job *
to_v3d_job(struct drm_sched_job *sched_job)
{
@@ -54,6 +59,12 @@ to_csd_job(struct drm_sched_job *sched_job)
return container_of(sched_job, struct v3d_csd_job, base.base);
}
+static struct v3d_cpu_job *
+to_cpu_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct v3d_cpu_job, base.base);
+}
+
static void
v3d_sched_job_free(struct drm_sched_job *sched_job)
{
@@ -63,6 +74,28 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
}
static void
+v3d_cpu_job_free(struct drm_sched_job *sched_job)
+{
+ struct v3d_cpu_job *job = to_cpu_job(sched_job);
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+
+ if (timestamp_query->queries) {
+ for (int i = 0; i < timestamp_query->count; i++)
+ drm_syncobj_put(timestamp_query->queries[i].syncobj);
+ kvfree(timestamp_query->queries);
+ }
+
+ if (performance_query->queries) {
+ for (int i = 0; i < performance_query->count; i++)
+ drm_syncobj_put(performance_query->queries[i].syncobj);
+ kvfree(performance_query->queries);
+ }
+
+ v3d_job_cleanup(&job->base);
+}
+
+static void
v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
{
if (job->perfmon != v3d->active_perfmon)
@@ -76,6 +109,7 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
unsigned long irqflags;
@@ -107,6 +141,9 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno,
job->start, job->end);
+ file->start_ns[V3D_BIN] = local_clock();
+ v3d->queue[V3D_BIN].start_ns = file->start_ns[V3D_BIN];
+
v3d_switch_perfmon(v3d, &job->base);
/* Set the current and end address of the control list.
@@ -131,6 +168,7 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
{
struct v3d_render_job *job = to_render_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
@@ -158,6 +196,9 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno,
job->start, job->end);
+ file->start_ns[V3D_RENDER] = local_clock();
+ v3d->queue[V3D_RENDER].start_ns = file->start_ns[V3D_RENDER];
+
v3d_switch_perfmon(v3d, &job->base);
/* XXX: Set the QCFG */
@@ -176,6 +217,7 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
{
struct v3d_tfu_job *job = to_tfu_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
@@ -190,20 +232,25 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
- V3D_WRITE(V3D_TFU_IIA, job->args.iia);
- V3D_WRITE(V3D_TFU_IIS, job->args.iis);
- V3D_WRITE(V3D_TFU_ICA, job->args.ica);
- V3D_WRITE(V3D_TFU_IUA, job->args.iua);
- V3D_WRITE(V3D_TFU_IOA, job->args.ioa);
- V3D_WRITE(V3D_TFU_IOS, job->args.ios);
- V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]);
- if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) {
- V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]);
- V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]);
- V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]);
+ file->start_ns[V3D_TFU] = local_clock();
+ v3d->queue[V3D_TFU].start_ns = file->start_ns[V3D_TFU];
+
+ V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia);
+ V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis);
+ V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
+ V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
+ V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
+ if (v3d->ver >= 71)
+ V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
+ V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
+ V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
+ if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
+ V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
+ V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
+ V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
}
/* ICFG kicks off the job. */
- V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC);
+ V3D_WRITE(V3D_TFU_ICFG(v3d->ver), job->args.icfg | V3D_TFU_ICFG_IOC);
return fence;
}
@@ -213,9 +260,10 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
{
struct v3d_csd_job *job = to_csd_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
- int i;
+ int i, csd_cfg0_reg, csd_cfg_reg_count;
v3d->csd_job = job;
@@ -231,24 +279,314 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno);
+ file->start_ns[V3D_CSD] = local_clock();
+ v3d->queue[V3D_CSD].start_ns = file->start_ns[V3D_CSD];
+
v3d_switch_perfmon(v3d, &job->base);
- for (i = 1; i <= 6; i++)
- V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]);
+ csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver);
+ csd_cfg_reg_count = v3d->ver < 71 ? 6 : 7;
+ for (i = 1; i <= csd_cfg_reg_count; i++)
+ V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]);
/* CFG0 write kicks off the job. */
- V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]);
+ V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]);
return fence;
}
+static void
+v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
+{
+ struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
+ struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
+ u32 *wg_counts;
+
+ v3d_get_bo_vaddr(bo);
+ v3d_get_bo_vaddr(indirect);
+
+ wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset);
+
+ if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0)
+ return;
+
+ args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+ (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
+
+ for (int i = 0; i < 3; i++) {
+ /* 0xffffffff indicates that the uniform rewrite is not needed */
+ if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) {
+ u32 uniform_idx = indirect_csd->wg_uniform_offsets[i];
+ ((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i];
+ }
+ }
+
+ v3d_put_bo_vaddr(indirect);
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_timestamp_query(struct v3d_cpu_job *job)
+{
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ u8 *value_addr;
+
+ v3d_get_bo_vaddr(bo);
+
+ for (int i = 0; i < timestamp_query->count; i++) {
+ value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset;
+ *((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull;
+
+ drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj,
+ job->base.done_fence);
+ }
+
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
+{
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_timestamp_query *queries = timestamp_query->queries;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ u8 *value_addr;
+
+ v3d_get_bo_vaddr(bo);
+
+ for (int i = 0; i < timestamp_query->count; i++) {
+ value_addr = ((u8 *)bo->vaddr) + queries[i].offset;
+ *((u64 *)value_addr) = 0;
+
+ drm_syncobj_replace_fence(queries[i].syncobj, NULL);
+ }
+
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value)
+{
+ if (do_64bit) {
+ u64 *dst64 = (u64 *)dst;
+
+ dst64[idx] = value;
+ } else {
+ u32 *dst32 = (u32 *)dst;
+
+ dst32[idx] = (u32)value;
+ }
+}
+
+static void
+v3d_copy_query_results(struct v3d_cpu_job *job)
+{
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_timestamp_query *queries = timestamp_query->queries;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]);
+ struct v3d_copy_query_results_info *copy = &job->copy;
+ struct dma_fence *fence;
+ u8 *query_addr;
+ bool available, write_result;
+ u8 *data;
+ int i;
+
+ v3d_get_bo_vaddr(bo);
+ v3d_get_bo_vaddr(timestamp);
+
+ data = ((u8 *)bo->vaddr) + copy->offset;
+
+ for (i = 0; i < timestamp_query->count; i++) {
+ fence = drm_syncobj_fence_get(queries[i].syncobj);
+ available = fence ? dma_fence_is_signaled(fence) : false;
+
+ write_result = available || copy->do_partial;
+ if (write_result) {
+ query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset;
+ write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr));
+ }
+
+ if (copy->availability_bit)
+ write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u);
+
+ data += copy->stride;
+
+ dma_fence_put(fence);
+ }
+
+ v3d_put_bo_vaddr(timestamp);
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_reset_performance_queries(struct v3d_cpu_job *job)
+{
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+ struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_perfmon *perfmon;
+
+ for (int i = 0; i < performance_query->count; i++) {
+ for (int j = 0; j < performance_query->nperfmons; j++) {
+ perfmon = v3d_perfmon_find(v3d_priv,
+ performance_query->queries[i].kperfmon_ids[j]);
+ if (!perfmon) {
+ DRM_DEBUG("Failed to find perfmon.");
+ continue;
+ }
+
+ v3d_perfmon_stop(v3d, perfmon, false);
+
+ memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64));
+
+ v3d_perfmon_put(perfmon);
+ }
+
+ drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL);
+ }
+}
+
+static void
+v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query)
+{
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+ struct v3d_copy_query_results_info *copy = &job->copy;
+ struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_perfmon *perfmon;
+ u64 counter_values[V3D_PERFCNT_NUM];
+
+ for (int i = 0; i < performance_query->nperfmons; i++) {
+ perfmon = v3d_perfmon_find(v3d_priv,
+ performance_query->queries[query].kperfmon_ids[i]);
+ if (!perfmon) {
+ DRM_DEBUG("Failed to find perfmon.");
+ continue;
+ }
+
+ v3d_perfmon_stop(v3d, perfmon, true);
+
+ memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values,
+ perfmon->ncounters * sizeof(u64));
+
+ v3d_perfmon_put(perfmon);
+ }
+
+ for (int i = 0; i < performance_query->ncounters; i++)
+ write_to_buffer(data, i, copy->do_64bit, counter_values[i]);
+}
+
+static void
+v3d_copy_performance_query(struct v3d_cpu_job *job)
+{
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+ struct v3d_copy_query_results_info *copy = &job->copy;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ struct dma_fence *fence;
+ bool available, write_result;
+ u8 *data;
+
+ v3d_get_bo_vaddr(bo);
+
+ data = ((u8 *)bo->vaddr) + copy->offset;
+
+ for (int i = 0; i < performance_query->count; i++) {
+ fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj);
+ available = fence ? dma_fence_is_signaled(fence) : false;
+
+ write_result = available || copy->do_partial;
+ if (write_result)
+ v3d_write_performance_query_result(job, data, i);
+
+ if (copy->availability_bit)
+ write_to_buffer(data, performance_query->ncounters,
+ copy->do_64bit, available ? 1u : 0u);
+
+ data += copy->stride;
+
+ dma_fence_put(fence);
+ }
+
+ v3d_put_bo_vaddr(bo);
+}
+
+static const v3d_cpu_job_fn cpu_job_function[] = {
+ [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect,
+ [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query,
+ [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries,
+ [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results,
+ [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries,
+ [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query,
+};
+
+static struct dma_fence *
+v3d_cpu_job_run(struct drm_sched_job *sched_job)
+{
+ struct v3d_cpu_job *job = to_cpu_job(sched_job);
+ struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
+ u64 runtime;
+
+ v3d->cpu_job = job;
+
+ if (job->job_type >= ARRAY_SIZE(cpu_job_function)) {
+ DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type);
+ return NULL;
+ }
+
+ file->start_ns[V3D_CPU] = local_clock();
+ v3d->queue[V3D_CPU].start_ns = file->start_ns[V3D_CPU];
+
+ trace_v3d_cpu_job_begin(&v3d->drm, job->job_type);
+
+ cpu_job_function[job->job_type](job);
+
+ trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
+
+ runtime = local_clock() - file->start_ns[V3D_CPU];
+
+ file->enabled_ns[V3D_CPU] += runtime;
+ v3d->queue[V3D_CPU].enabled_ns += runtime;
+
+ file->jobs_sent[V3D_CPU]++;
+ v3d->queue[V3D_CPU].jobs_sent++;
+
+ file->start_ns[V3D_CPU] = 0;
+ v3d->queue[V3D_CPU].start_ns = 0;
+
+ return NULL;
+}
+
static struct dma_fence *
v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_dev *v3d = job->v3d;
+ struct v3d_file_priv *file = job->file->driver_priv;
+ u64 runtime;
+
+ file->start_ns[V3D_CACHE_CLEAN] = local_clock();
+ v3d->queue[V3D_CACHE_CLEAN].start_ns = file->start_ns[V3D_CACHE_CLEAN];
v3d_clean_caches(v3d);
+ runtime = local_clock() - file->start_ns[V3D_CACHE_CLEAN];
+
+ file->enabled_ns[V3D_CACHE_CLEAN] += runtime;
+ v3d->queue[V3D_CACHE_CLEAN].enabled_ns += runtime;
+
+ file->jobs_sent[V3D_CACHE_CLEAN]++;
+ v3d->queue[V3D_CACHE_CLEAN].jobs_sent++;
+
+ file->start_ns[V3D_CACHE_CLEAN] = 0;
+ v3d->queue[V3D_CACHE_CLEAN].start_ns = 0;
+
return NULL;
}
@@ -336,7 +674,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_csd_job *job = to_csd_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4);
+ u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
/* If we've made progress, skip reset and let the timer get
* rearmed.
@@ -379,6 +717,12 @@ static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
.free_job = v3d_sched_job_free
};
+static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
+ .run_job = v3d_cpu_job_run,
+ .timedout_job = v3d_generic_job_timedout,
+ .free_job = v3d_cpu_job_free
+};
+
int
v3d_sched_init(struct v3d_dev *v3d)
{
@@ -388,7 +732,7 @@ v3d_sched_init(struct v3d_dev *v3d)
int ret;
ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
- &v3d_bin_sched_ops,
+ &v3d_bin_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
@@ -397,7 +741,7 @@ v3d_sched_init(struct v3d_dev *v3d)
return ret;
ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
- &v3d_render_sched_ops,
+ &v3d_render_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
@@ -406,7 +750,7 @@ v3d_sched_init(struct v3d_dev *v3d)
goto fail;
ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
- &v3d_tfu_sched_ops,
+ &v3d_tfu_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
@@ -416,7 +760,7 @@ v3d_sched_init(struct v3d_dev *v3d)
if (v3d_has_csd(v3d)) {
ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
- &v3d_csd_sched_ops,
+ &v3d_csd_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
@@ -425,7 +769,7 @@ v3d_sched_init(struct v3d_dev *v3d)
goto fail;
ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
- &v3d_cache_clean_sched_ops,
+ &v3d_cache_clean_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
@@ -434,6 +778,15 @@ v3d_sched_init(struct v3d_dev *v3d)
goto fail;
}
+ ret = drm_sched_init(&v3d->queue[V3D_CPU].sched,
+ &v3d_cpu_sched_ops, NULL,
+ DRM_SCHED_PRIORITY_COUNT,
+ 1, job_hang_limit,
+ msecs_to_jiffies(hang_limit_ms), NULL,
+ NULL, "v3d_cpu", v3d->drm.dev);
+ if (ret)
+ goto fail;
+
return 0;
fail:
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
new file mode 100644
index 000000000000..fcff41dd2315
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -0,0 +1,1320 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2014-2018 Broadcom
+ * Copyright (C) 2023 Raspberry Pi
+ */
+
+#include <drm/drm_syncobj.h>
+
+#include "v3d_drv.h"
+#include "v3d_regs.h"
+#include "v3d_trace.h"
+
+/* Takes the reservation lock on all the BOs being referenced, so that
+ * at queue submit time we can update the reservations.
+ *
+ * We don't lock the RCL the tile alloc/state BOs, or overflow memory
+ * (all of which are on exec->unref_list). They're entirely private
+ * to v3d, so we don't attach dma-buf fences to them.
+ */
+static int
+v3d_lock_bo_reservations(struct v3d_job *job,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int i, ret;
+
+ ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < job->bo_count; i++) {
+ ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
+ if (ret)
+ goto fail;
+
+ ret = drm_sched_job_add_implicit_dependencies(&job->base,
+ job->bo[i], true);
+ if (ret)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+ return ret;
+}
+
+/**
+ * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @job: V3D job being set up
+ * @bo_handles: GEM handles
+ * @bo_count: Number of GEM handles passed in
+ *
+ * The command validator needs to reference BOs by their index within
+ * the submitted job's BO list. This does the validation of the job's
+ * BO list and reference counting for the lifetime of the job.
+ *
+ * Note that this function doesn't need to unreference the BOs on
+ * failure, because that will happen at v3d_exec_cleanup() time.
+ */
+static int
+v3d_lookup_bos(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct v3d_job *job,
+ u64 bo_handles,
+ u32 bo_count)
+{
+ job->bo_count = bo_count;
+
+ if (!job->bo_count) {
+ /* See comment on bo_index for why we have to check
+ * this.
+ */
+ DRM_DEBUG("Rendering requires BOs\n");
+ return -EINVAL;
+ }
+
+ return drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)bo_handles,
+ job->bo_count, &job->bo);
+}
+
+static void
+v3d_job_free(struct kref *ref)
+{
+ struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
+ int i;
+
+ if (job->bo) {
+ for (i = 0; i < job->bo_count; i++)
+ drm_gem_object_put(job->bo[i]);
+ kvfree(job->bo);
+ }
+
+ dma_fence_put(job->irq_fence);
+ dma_fence_put(job->done_fence);
+
+ if (job->perfmon)
+ v3d_perfmon_put(job->perfmon);
+
+ kfree(job);
+}
+
+static void
+v3d_render_job_free(struct kref *ref)
+{
+ struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
+ base.refcount);
+ struct v3d_bo *bo, *save;
+
+ list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
+ drm_gem_object_put(&bo->base.base);
+ }
+
+ v3d_job_free(ref);
+}
+
+void v3d_job_cleanup(struct v3d_job *job)
+{
+ if (!job)
+ return;
+
+ drm_sched_job_cleanup(&job->base);
+ v3d_job_put(job);
+}
+
+void v3d_job_put(struct v3d_job *job)
+{
+ if (!job)
+ return;
+
+ kref_put(&job->refcount, job->free);
+}
+
+static int
+v3d_job_allocate(void **container, size_t size)
+{
+ *container = kcalloc(1, size, GFP_KERNEL);
+ if (!*container) {
+ DRM_ERROR("Cannot allocate memory for V3D job.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
+ struct v3d_job *job, void (*free)(struct kref *ref),
+ u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+ int ret, i;
+
+ job->v3d = v3d;
+ job->free = free;
+ job->file = file_priv;
+
+ ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
+ 1, v3d_priv);
+ if (ret)
+ return ret;
+
+ if (has_multisync) {
+ if (se->in_sync_count && se->wait_stage == queue) {
+ struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
+
+ for (i = 0; i < se->in_sync_count; i++) {
+ struct drm_v3d_sem in;
+
+ if (copy_from_user(&in, handle++, sizeof(in))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy wait dep handle.\n");
+ goto fail_deps;
+ }
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
+
+ // TODO: Investigate why this was filtered out for the IOCTL.
+ if (ret && ret != -ENOENT)
+ goto fail_deps;
+ }
+ }
+ } else {
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
+
+ // TODO: Investigate why this was filtered out for the IOCTL.
+ if (ret && ret != -ENOENT)
+ goto fail_deps;
+ }
+
+ kref_init(&job->refcount);
+
+ return 0;
+
+fail_deps:
+ drm_sched_job_cleanup(&job->base);
+ return ret;
+}
+
+static void
+v3d_push_job(struct v3d_job *job)
+{
+ drm_sched_job_arm(&job->base);
+
+ job->done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ /* put by scheduler job completion */
+ kref_get(&job->refcount);
+
+ drm_sched_entity_push_job(&job->base);
+}
+
+static void
+v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
+ struct v3d_job *job,
+ struct ww_acquire_ctx *acquire_ctx,
+ u32 out_sync,
+ struct v3d_submit_ext *se,
+ struct dma_fence *done_fence)
+{
+ struct drm_syncobj *sync_out;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+ int i;
+
+ for (i = 0; i < job->bo_count; i++) {
+ /* XXX: Use shared fences for read-only objects. */
+ dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
+ DMA_RESV_USAGE_WRITE);
+ }
+
+ drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+
+ /* Update the return sync object for the job */
+ /* If it only supports a single signal semaphore*/
+ if (!has_multisync) {
+ sync_out = drm_syncobj_find(file_priv, out_sync);
+ if (sync_out) {
+ drm_syncobj_replace_fence(sync_out, done_fence);
+ drm_syncobj_put(sync_out);
+ }
+ return;
+ }
+
+ /* If multiple semaphores extension is supported */
+ if (se->out_sync_count) {
+ for (i = 0; i < se->out_sync_count; i++) {
+ drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
+ done_fence);
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ }
+ kvfree(se->out_syncs);
+ }
+}
+
+static int
+v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
+ struct v3d_dev *v3d,
+ struct drm_v3d_submit_csd *args,
+ struct v3d_csd_job **job,
+ struct v3d_job **clean_job,
+ struct v3d_submit_ext *se,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int ret;
+
+ ret = v3d_job_allocate((void *)job, sizeof(**job));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, &(*job)->base,
+ v3d_job_free, args->in_sync, se, V3D_CSD);
+ if (ret)
+ return ret;
+
+ ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, *clean_job,
+ v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
+ if (ret)
+ return ret;
+
+ (*job)->args = *args;
+
+ ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job,
+ args->bo_handles, args->bo_handle_count);
+ if (ret)
+ return ret;
+
+ return v3d_lock_bo_reservations(*clean_job, acquire_ctx);
+}
+
+static void
+v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
+{
+ unsigned int i;
+
+ if (!(se && se->out_sync_count))
+ return;
+
+ for (i = 0; i < se->out_sync_count; i++)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+}
+
+static int
+v3d_get_multisync_post_deps(struct drm_file *file_priv,
+ struct v3d_submit_ext *se,
+ u32 count, u64 handles)
+{
+ struct drm_v3d_sem __user *post_deps;
+ int i, ret;
+
+ if (!count)
+ return 0;
+
+ se->out_syncs = (struct v3d_submit_outsync *)
+ kvmalloc_array(count,
+ sizeof(struct v3d_submit_outsync),
+ GFP_KERNEL);
+ if (!se->out_syncs)
+ return -ENOMEM;
+
+ post_deps = u64_to_user_ptr(handles);
+
+ for (i = 0; i < count; i++) {
+ struct drm_v3d_sem out;
+
+ if (copy_from_user(&out, post_deps++, sizeof(out))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy post dep handles\n");
+ goto fail;
+ }
+
+ se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
+ out.handle);
+ if (!se->out_syncs[i].syncobj) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+ se->out_sync_count = count;
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+
+ return ret;
+}
+
+/* Get data for multiple binary semaphores synchronization. Parse syncobj
+ * to be signaled when job completes (out_sync).
+ */
+static int
+v3d_get_multisync_submit_deps(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_submit_ext *se)
+{
+ struct drm_v3d_multi_sync multisync;
+ int ret;
+
+ if (se->in_sync_count || se->out_sync_count) {
+ DRM_DEBUG("Two multisync extensions were added to the same job.");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&multisync, ext, sizeof(multisync)))
+ return -EFAULT;
+
+ if (multisync.pad)
+ return -EINVAL;
+
+ ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count,
+ multisync.out_syncs);
+ if (ret)
+ return ret;
+
+ se->in_sync_count = multisync.in_sync_count;
+ se->in_syncs = multisync.in_syncs;
+ se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
+ se->wait_stage = multisync.wait_stage;
+
+ return 0;
+}
+
+/* Get data for the indirect CSD job submission. */
+static int
+v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct v3d_dev *v3d = v3d_priv->v3d;
+ struct drm_v3d_indirect_csd indirect_csd;
+ struct v3d_indirect_csd_info *info = &job->indirect_csd;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd)))
+ return -EFAULT;
+
+ if (!v3d_has_csd(v3d)) {
+ DRM_DEBUG("Attempting CSD submit on non-CSD hardware.\n");
+ return -EINVAL;
+ }
+
+ job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD;
+ info->offset = indirect_csd.offset;
+ info->wg_size = indirect_csd.wg_size;
+ memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets,
+ sizeof(indirect_csd.wg_uniform_offsets));
+
+ info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect);
+
+ return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit,
+ &info->job, &info->clean_job,
+ NULL, &info->acquire_ctx);
+}
+
+/* Get data for the query timestamp job submission. */
+static int
+v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *offsets, *syncs;
+ struct drm_v3d_timestamp_query timestamp;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&timestamp, ext, sizeof(timestamp)))
+ return -EFAULT;
+
+ if (timestamp.pad)
+ return -EINVAL;
+
+ job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
+
+ job->timestamp_query.queries = kvmalloc_array(timestamp.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!job->timestamp_query.queries)
+ return -ENOMEM;
+
+ offsets = u64_to_user_ptr(timestamp.offsets);
+ syncs = u64_to_user_ptr(timestamp.syncs);
+
+ for (int i = 0; i < timestamp.count; i++) {
+ u32 offset, sync;
+
+ if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].offset = offset;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->timestamp_query.count = timestamp.count;
+
+ return 0;
+}
+
+static int
+v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *syncs;
+ struct drm_v3d_reset_timestamp_query reset;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&reset, ext, sizeof(reset)))
+ return -EFAULT;
+
+ job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
+
+ job->timestamp_query.queries = kvmalloc_array(reset.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!job->timestamp_query.queries)
+ return -ENOMEM;
+
+ syncs = u64_to_user_ptr(reset.syncs);
+
+ for (int i = 0; i < reset.count; i++) {
+ u32 sync;
+
+ job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->timestamp_query.count = reset.count;
+
+ return 0;
+}
+
+/* Get data for the copy timestamp query results job submission. */
+static int
+v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *offsets, *syncs;
+ struct drm_v3d_copy_timestamp_query copy;
+ int i;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&copy, ext, sizeof(copy)))
+ return -EFAULT;
+
+ if (copy.pad)
+ return -EINVAL;
+
+ job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
+
+ job->timestamp_query.queries = kvmalloc_array(copy.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!job->timestamp_query.queries)
+ return -ENOMEM;
+
+ offsets = u64_to_user_ptr(copy.offsets);
+ syncs = u64_to_user_ptr(copy.syncs);
+
+ for (i = 0; i < copy.count; i++) {
+ u32 offset, sync;
+
+ if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].offset = offset;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->timestamp_query.count = copy.count;
+
+ job->copy.do_64bit = copy.do_64bit;
+ job->copy.do_partial = copy.do_partial;
+ job->copy.availability_bit = copy.availability_bit;
+ job->copy.offset = copy.offset;
+ job->copy.stride = copy.stride;
+
+ return 0;
+}
+
+static int
+v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *syncs;
+ u64 __user *kperfmon_ids;
+ struct drm_v3d_reset_performance_query reset;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&reset, ext, sizeof(reset)))
+ return -EFAULT;
+
+ job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
+
+ job->performance_query.queries = kvmalloc_array(reset.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!job->performance_query.queries)
+ return -ENOMEM;
+
+ syncs = u64_to_user_ptr(reset.syncs);
+ kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
+
+ for (int i = 0; i < reset.count; i++) {
+ u32 sync;
+ u64 ids;
+ u32 __user *ids_pointer;
+ u32 id;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+
+ if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ ids_pointer = u64_to_user_ptr(ids);
+
+ for (int j = 0; j < reset.nperfmons; j++) {
+ if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].kperfmon_ids[j] = id;
+ }
+ }
+ job->performance_query.count = reset.count;
+ job->performance_query.nperfmons = reset.nperfmons;
+
+ return 0;
+}
+
+static int
+v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *syncs;
+ u64 __user *kperfmon_ids;
+ struct drm_v3d_copy_performance_query copy;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&copy, ext, sizeof(copy)))
+ return -EFAULT;
+
+ if (copy.pad)
+ return -EINVAL;
+
+ job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
+
+ job->performance_query.queries = kvmalloc_array(copy.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!job->performance_query.queries)
+ return -ENOMEM;
+
+ syncs = u64_to_user_ptr(copy.syncs);
+ kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
+
+ for (int i = 0; i < copy.count; i++) {
+ u32 sync;
+ u64 ids;
+ u32 __user *ids_pointer;
+ u32 id;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+
+ if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ ids_pointer = u64_to_user_ptr(ids);
+
+ for (int j = 0; j < copy.nperfmons; j++) {
+ if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].kperfmon_ids[j] = id;
+ }
+ }
+ job->performance_query.count = copy.count;
+ job->performance_query.nperfmons = copy.nperfmons;
+ job->performance_query.ncounters = copy.ncounters;
+
+ job->copy.do_64bit = copy.do_64bit;
+ job->copy.do_partial = copy.do_partial;
+ job->copy.availability_bit = copy.availability_bit;
+ job->copy.offset = copy.offset;
+ job->copy.stride = copy.stride;
+
+ return 0;
+}
+
+/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
+ * according to the extension id (name).
+ */
+static int
+v3d_get_extensions(struct drm_file *file_priv,
+ u64 ext_handles,
+ struct v3d_submit_ext *se,
+ struct v3d_cpu_job *job)
+{
+ struct drm_v3d_extension __user *user_ext;
+ int ret;
+
+ user_ext = u64_to_user_ptr(ext_handles);
+ while (user_ext) {
+ struct drm_v3d_extension ext;
+
+ if (copy_from_user(&ext, user_ext, sizeof(ext))) {
+ DRM_DEBUG("Failed to copy submit extension\n");
+ return -EFAULT;
+ }
+
+ switch (ext.id) {
+ case DRM_V3D_EXT_ID_MULTI_SYNC:
+ ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se);
+ break;
+ case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD:
+ ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY:
+ ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY:
+ ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY:
+ ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY:
+ ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY:
+ ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job);
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ user_ext = u64_to_user_ptr(ext.next);
+ }
+
+ return 0;
+}
+
+/**
+ * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * This is the main entrypoint for userspace to submit a 3D frame to
+ * the GPU. Userspace provides the binner command list (if
+ * applicable), and the kernel sets up the render command list to draw
+ * to the framebuffer described in the ioctl, using the command lists
+ * that the 3D engine's binner will produce.
+ */
+int
+v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_submit_cl *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_bin_job *bin = NULL;
+ struct v3d_render_job *render = NULL;
+ struct v3d_job *clean_job = NULL;
+ struct v3d_job *last_job;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->flags &&
+ args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
+ DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_job_allocate((void *)&render, sizeof(*render));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, &render->base,
+ v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
+ if (ret)
+ goto fail;
+
+ render->start = args->rcl_start;
+ render->end = args->rcl_end;
+ INIT_LIST_HEAD(&render->unref_list);
+
+ if (args->bcl_start != args->bcl_end) {
+ ret = v3d_job_allocate((void *)&bin, sizeof(*bin));
+ if (ret)
+ goto fail;
+
+ ret = v3d_job_init(v3d, file_priv, &bin->base,
+ v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
+ if (ret)
+ goto fail;
+
+ bin->start = args->bcl_start;
+ bin->end = args->bcl_end;
+ bin->qma = args->qma;
+ bin->qms = args->qms;
+ bin->qts = args->qts;
+ bin->render = render;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job));
+ if (ret)
+ goto fail;
+
+ ret = v3d_job_init(v3d, file_priv, clean_job,
+ v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
+ if (ret)
+ goto fail;
+
+ last_job = clean_job;
+ } else {
+ last_job = &render->base;
+ }
+
+ ret = v3d_lookup_bos(dev, file_priv, last_job,
+ args->bo_handles, args->bo_handle_count);
+ if (ret)
+ goto fail;
+
+ ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ if (args->perfmon_id) {
+ render->base.perfmon = v3d_perfmon_find(v3d_priv,
+ args->perfmon_id);
+
+ if (!render->base.perfmon) {
+ ret = -ENOENT;
+ goto fail_perfmon;
+ }
+ }
+
+ mutex_lock(&v3d->sched_lock);
+ if (bin) {
+ bin->base.perfmon = render->base.perfmon;
+ v3d_perfmon_get(bin->base.perfmon);
+ v3d_push_job(&bin->base);
+
+ ret = drm_sched_job_add_dependency(&render->base.base,
+ dma_fence_get(bin->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+ }
+
+ v3d_push_job(&render->base);
+
+ if (clean_job) {
+ struct dma_fence *render_fence =
+ dma_fence_get(render->base.done_fence);
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ render_fence);
+ if (ret)
+ goto fail_unreserve;
+ clean_job->perfmon = render->base.perfmon;
+ v3d_perfmon_get(clean_job->perfmon);
+ v3d_push_job(clean_job);
+ }
+
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ last_job,
+ &acquire_ctx,
+ args->out_sync,
+ &se,
+ last_job->done_fence);
+
+ v3d_job_put(&bin->base);
+ v3d_job_put(&render->base);
+ v3d_job_put(clean_job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+fail_perfmon:
+ drm_gem_unlock_reservations(last_job->bo,
+ last_job->bo_count, &acquire_ctx);
+fail:
+ v3d_job_cleanup((void *)bin);
+ v3d_job_cleanup((void *)render);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
+
+ return ret;
+}
+
+/**
+ * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the TFU, which we don't
+ * need to validate since the TFU is behind the MMU.
+ */
+int
+v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct drm_v3d_submit_tfu *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_tfu_job *job = NULL;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
+
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_DEBUG("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_job_allocate((void *)&job, sizeof(*job));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, &job->base,
+ v3d_job_free, args->in_sync, &se, V3D_TFU);
+ if (ret)
+ goto fail;
+
+ job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
+ sizeof(*job->base.bo), GFP_KERNEL);
+ if (!job->base.bo) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ job->args = *args;
+
+ for (job->base.bo_count = 0;
+ job->base.bo_count < ARRAY_SIZE(args->bo_handles);
+ job->base.bo_count++) {
+ struct drm_gem_object *bo;
+
+ if (!args->bo_handles[job->base.bo_count])
+ break;
+
+ bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
+ if (!bo) {
+ DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
+ job->base.bo_count,
+ args->bo_handles[job->base.bo_count]);
+ ret = -ENOENT;
+ goto fail;
+ }
+ job->base.bo[job->base.bo_count] = bo;
+ }
+
+ ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&v3d->sched_lock);
+ v3d_push_job(&job->base);
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ &job->base, &acquire_ctx,
+ args->out_sync,
+ &se,
+ job->base.done_fence);
+
+ v3d_job_put(&job->base);
+
+ return 0;
+
+fail:
+ v3d_job_cleanup((void *)job);
+ v3d_put_multisync_post_deps(&se);
+
+ return ret;
+}
+
+/**
+ * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the CSD, which we don't
+ * need to validate since the CSD is behind the MMU.
+ */
+int
+v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_submit_csd *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_csd_job *job = NULL;
+ struct v3d_job *clean_job = NULL;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret;
+
+ trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (!v3d_has_csd(v3d)) {
+ DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
+ return -EINVAL;
+ }
+
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args,
+ &job, &clean_job, &se,
+ &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ if (args->perfmon_id) {
+ job->base.perfmon = v3d_perfmon_find(v3d_priv,
+ args->perfmon_id);
+ if (!job->base.perfmon) {
+ ret = -ENOENT;
+ goto fail_perfmon;
+ }
+ }
+
+ mutex_lock(&v3d->sched_lock);
+ v3d_push_job(&job->base);
+
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ dma_fence_get(job->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+
+ v3d_push_job(clean_job);
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ clean_job,
+ &acquire_ctx,
+ args->out_sync,
+ &se,
+ clean_job->done_fence);
+
+ v3d_job_put(&job->base);
+ v3d_job_put(clean_job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+fail_perfmon:
+ drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
+ &acquire_ctx);
+fail:
+ v3d_job_cleanup((void *)job);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
+
+ return ret;
+}
+
+static const unsigned int cpu_job_bo_handle_count[] = {
+ [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1,
+ [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1,
+ [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1,
+ [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2,
+ [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0,
+ [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1,
+};
+
+/**
+ * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace specifies the CPU job type and data required to perform its
+ * operations through the drm_v3d_extension struct.
+ */
+int
+v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct drm_v3d_submit_cpu *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_submit_ext *out_se = NULL;
+ struct v3d_cpu_job *cpu_job = NULL;
+ struct v3d_csd_job *csd_job = NULL;
+ struct v3d_job *clean_job = NULL;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret;
+
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("Invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ ret = v3d_job_allocate((void *)&cpu_job, sizeof(*cpu_job));
+ if (ret)
+ return ret;
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ goto fail;
+ }
+ }
+
+ /* Every CPU job must have a CPU job user extension */
+ if (!cpu_job->job_type) {
+ DRM_DEBUG("CPU job must have a CPU job user extension.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) {
+ DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type);
+
+ ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
+ v3d_job_free, 0, &se, V3D_CPU);
+ if (ret)
+ goto fail;
+
+ clean_job = cpu_job->indirect_csd.clean_job;
+ csd_job = cpu_job->indirect_csd.job;
+
+ if (args->bo_handle_count) {
+ ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base,
+ args->bo_handles, args->bo_handle_count);
+ if (ret)
+ goto fail;
+
+ ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx);
+ if (ret)
+ goto fail;
+ }
+
+ mutex_lock(&v3d->sched_lock);
+ v3d_push_job(&cpu_job->base);
+
+ switch (cpu_job->job_type) {
+ case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
+ ret = drm_sched_job_add_dependency(&csd_job->base.base,
+ dma_fence_get(cpu_job->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+
+ v3d_push_job(&csd_job->base);
+
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ dma_fence_get(csd_job->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+
+ v3d_push_job(clean_job);
+
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&v3d->sched_lock);
+
+ out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se;
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ &cpu_job->base,
+ &acquire_ctx, 0,
+ out_se, cpu_job->base.done_fence);
+
+ switch (cpu_job->job_type) {
+ case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ clean_job,
+ &cpu_job->indirect_csd.acquire_ctx,
+ 0, &se, clean_job->done_fence);
+ break;
+ default:
+ break;
+ }
+
+ v3d_job_put(&cpu_job->base);
+ v3d_job_put(&csd_job->base);
+ v3d_job_put(clean_job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+
+ drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count,
+ &acquire_ctx);
+
+ drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
+ &cpu_job->indirect_csd.acquire_ctx);
+
+fail:
+ v3d_job_cleanup((void *)cpu_job);
+ v3d_job_cleanup((void *)csd_job);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
+ kvfree(cpu_job->timestamp_query.queries);
+ kvfree(cpu_job->performance_query.queries);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_sysfs.c b/drivers/gpu/drm/v3d/v3d_sysfs.c
new file mode 100644
index 000000000000..d106845ba890
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_sysfs.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Igalia S.L.
+ */
+
+#include <linux/sched/clock.h>
+#include <linux/sysfs.h>
+
+#include "v3d_drv.h"
+
+static ssize_t
+gpu_stats_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct v3d_dev *v3d = to_v3d_dev(drm);
+ enum v3d_queue queue;
+ u64 timestamp = local_clock();
+ u64 active_runtime;
+ ssize_t len = 0;
+
+ len += sysfs_emit(buf, "queue\ttimestamp\tjobs\truntime\n");
+
+ for (queue = 0; queue < V3D_MAX_QUEUES; queue++) {
+ if (v3d->queue[queue].start_ns)
+ active_runtime = timestamp - v3d->queue[queue].start_ns;
+ else
+ active_runtime = 0;
+
+ /* Each line will display the queue name, timestamp, the number
+ * of jobs sent to that queue and the runtime, as can be seem here:
+ *
+ * queue timestamp jobs runtime
+ * bin 239043069420 22620 17438164056
+ * render 239043069420 22619 27284814161
+ * tfu 239043069420 8763 394592566
+ * csd 239043069420 3168 10787905530
+ * cache_clean 239043069420 6127 237375940
+ */
+ len += sysfs_emit_at(buf, len, "%s\t%llu\t%llu\t%llu\n",
+ v3d_queue_to_string(queue),
+ timestamp,
+ v3d->queue[queue].jobs_sent,
+ v3d->queue[queue].enabled_ns + active_runtime);
+ }
+
+ return len;
+}
+static DEVICE_ATTR_RO(gpu_stats);
+
+static struct attribute *v3d_sysfs_entries[] = {
+ &dev_attr_gpu_stats.attr,
+ NULL,
+};
+
+static struct attribute_group v3d_sysfs_attr_group = {
+ .attrs = v3d_sysfs_entries,
+};
+
+int
+v3d_sysfs_init(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &v3d_sysfs_attr_group);
+}
+
+void
+v3d_sysfs_destroy(struct device *dev)
+{
+ return sysfs_remove_group(&dev->kobj, &v3d_sysfs_attr_group);
+}
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h
index 7aa8dc356e54..5917b94148f5 100644
--- a/drivers/gpu/drm/v3d/v3d_trace.h
+++ b/drivers/gpu/drm/v3d/v3d_trace.h
@@ -225,6 +225,63 @@ TRACE_EVENT(v3d_submit_csd,
__entry->seqno)
);
+TRACE_EVENT(v3d_submit_cpu_ioctl,
+ TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+ TP_ARGS(dev, job_type),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(enum v3d_cpu_job_type, job_type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->job_type = job_type;
+ ),
+
+ TP_printk("dev=%u, job_type=%d",
+ __entry->dev,
+ __entry->job_type)
+);
+
+TRACE_EVENT(v3d_cpu_job_begin,
+ TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+ TP_ARGS(dev, job_type),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(enum v3d_cpu_job_type, job_type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->job_type = job_type;
+ ),
+
+ TP_printk("dev=%u, job_type=%d",
+ __entry->dev,
+ __entry->job_type)
+);
+
+TRACE_EVENT(v3d_cpu_job_end,
+ TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+ TP_ARGS(dev, job_type),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(enum v3d_cpu_job_type, job_type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->job_type = job_type;
+ ),
+
+ TP_printk("dev=%u, job_type=%d",
+ __entry->dev,
+ __entry->job_type)
+);
+
TRACE_EVENT(v3d_cache_clean_begin,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 047b95812334..cd9e66a06596 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -182,7 +182,7 @@ DEFINE_DRM_GEM_FOPS(vbox_fops);
static const struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT,
.fops = &vbox_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 341edd982cb3..9ff3bade9795 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -429,8 +429,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
VBOX_MOUSE_POINTER_ALPHA;
hgsmi_update_pointer_shape(vbox->guest_pool, flags,
- min_t(u32, max(fb->hot_x, 0), width),
- min_t(u32, max(fb->hot_y, 0), height),
+ min_t(u32, max(new_state->hotspot_x, 0), width),
+ min_t(u32, max(new_state->hotspot_y, 0), height),
width, height, vbox->cursor_data, data_size);
mutex_unlock(&vbox->hw_mutex);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 4334c7608408..f8e9abe647b9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -177,7 +177,7 @@ static const struct drm_driver driver = {
* out via drm_device::driver_features:
*/
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
- DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_CURSOR_HOTSPOT,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 96365a772f77..bb7d86a0c6a1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -58,6 +58,9 @@
#define MAX_CAPSET_ID 63
#define MAX_RINGS 64
+/* See virtio_gpu_ctx_create. One additional character for NULL terminator. */
+#define DEBUG_NAME_MAX_LEN 65
+
struct virtio_gpu_object_params {
unsigned long size;
bool dumb;
@@ -274,6 +277,8 @@ struct virtio_gpu_fpriv {
uint64_t base_fence_ctx;
uint64_t ring_idx_mask;
struct mutex context_lock;
+ char debug_name[DEBUG_NAME_MAX_LEN];
+ bool explicit_debug_name;
};
/* virtgpu_ioctl.c */
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index b24b11f25197..e4f76f315550 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -42,12 +42,19 @@
static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fpriv *vfpriv)
{
- char dbgname[TASK_COMM_LEN];
+ if (vfpriv->explicit_debug_name) {
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init,
+ strlen(vfpriv->debug_name),
+ vfpriv->debug_name);
+ } else {
+ char dbgname[TASK_COMM_LEN];
- get_task_comm(dbgname, current);
- virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
- vfpriv->context_init, strlen(dbgname),
- dbgname);
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init, strlen(dbgname),
+ dbgname);
+ }
vfpriv->context_created = true;
}
@@ -107,6 +114,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
value = vgdev->capset_id_mask;
break;
+ case VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME:
+ value = vgdev->has_context_init ? 1 : 0;
+ break;
default:
return -EINVAL;
}
@@ -565,8 +575,8 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
int ret = 0;
- uint32_t num_params, i, param, value;
- uint64_t valid_ring_mask;
+ uint32_t num_params, i;
+ uint64_t valid_ring_mask, param, value;
size_t len;
struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
struct virtio_gpu_device *vgdev = dev->dev_private;
@@ -580,7 +590,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
return -EINVAL;
/* Number of unique parameters supported at this time. */
- if (num_params > 3)
+ if (num_params > 4)
return -EINVAL;
ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
@@ -642,6 +652,21 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
vfpriv->ring_idx_mask = value;
break;
+ case VIRTGPU_CONTEXT_PARAM_DEBUG_NAME:
+ if (vfpriv->explicit_debug_name) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = strncpy_from_user(vfpriv->debug_name,
+ u64_to_user_ptr(value),
+ DEBUG_NAME_MAX_LEN - 1);
+ if (ret < 0)
+ goto out_unlock;
+
+ vfpriv->explicit_debug_name = true;
+ ret = 0;
+ break;
default:
ret = -EINVAL;
goto out_unlock;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a2e045f3a000..a72a2dbda031 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -79,6 +79,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
struct drm_crtc_state *crtc_state;
int ret;
@@ -86,6 +88,14 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
return 0;
+ /*
+ * Ignore damage clips if the framebuffer attached to the plane's state
+ * has changed since the last plane update (page-flip). In this case, a
+ * full plane update should happen because uploads are done per-buffer.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ new_plane_state->ignore_damage_clips = true;
+
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
@@ -323,16 +333,16 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
plane->state->crtc_x,
plane->state->crtc_y,
- plane->state->fb ? plane->state->fb->hot_x : 0,
- plane->state->fb ? plane->state->fb->hot_y : 0);
+ plane->state->hotspot_x,
+ plane->state->hotspot_y);
output->cursor.hdr.type =
cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
output->cursor.resource_id = cpu_to_le32(handle);
if (plane->state->fb) {
output->cursor.hot_x =
- cpu_to_le32(plane->state->fb->hot_x);
+ cpu_to_le32(plane->state->hotspot_x);
output->cursor.hot_y =
- cpu_to_le32(plane->state->fb->hot_y);
+ cpu_to_le32(plane->state->hotspot_y);
} else {
output->cursor.hot_x = cpu_to_le32(0);
output->cursor.hot_y = cpu_to_le32(0);
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index d7e63aa14663..bc724cbd5e3a 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -30,17 +30,25 @@ static const struct drm_connector_funcs vkms_wb_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static int vkms_wb_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb;
- const struct drm_display_mode *mode = &crtc_state->mode;
+ const struct drm_display_mode *mode;
int ret;
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
return 0;
+ if (!conn_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->mode;
+
fb = conn_state->writeback_job->fb;
if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
@@ -48,17 +56,13 @@ static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
- ret = drm_atomic_helper_check_wb_encoder_state(encoder, conn_state);
+ ret = drm_atomic_helper_check_wb_connector_state(connector, state);
if (ret < 0)
return ret;
return 0;
}
-static const struct drm_encoder_helper_funcs vkms_wb_encoder_helper_funcs = {
- .atomic_check = vkms_wb_encoder_atomic_check,
-};
-
static int vkms_wb_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -161,6 +165,7 @@ static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
.prepare_writeback_job = vkms_wb_prepare_job,
.cleanup_writeback_job = vkms_wb_cleanup_job,
.atomic_commit = vkms_wb_atomic_commit,
+ .atomic_check = vkms_wb_atomic_check,
};
int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
@@ -171,7 +176,7 @@ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
return drm_writeback_connector_init(&vkmsdev->drm, wb,
&vkms_wb_connector_funcs,
- &vkms_wb_encoder_helper_funcs,
+ NULL,
vkms_wb_formats,
ARRAY_SIZE(vkms_wb_formats),
1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8b24ecf60e3e..d3e308fdfd5b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1611,7 +1611,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static const struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
+ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 818b7f109f53..5fd0ccaa0b41 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -768,13 +768,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
s32 hotspot_x, hotspot_y;
- hotspot_x = du->hotspot_x;
- hotspot_y = du->hotspot_y;
-
- if (new_state->fb) {
- hotspot_x += new_state->fb->hot_x;
- hotspot_y += new_state->fb->hot_y;
- }
+ hotspot_x = du->hotspot_x + new_state->hotspot_x;
+ hotspot_y = du->hotspot_y + new_state->hotspot_y;
du->cursor_surface = vps->surf;
du->cursor_bo = vps->bo;
@@ -837,10 +832,21 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state = NULL;
struct drm_framebuffer *new_fb = new_state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
int ret;
+ /*
+ * Ignore damage clips if the framebuffer attached to the plane's state
+ * has changed since the last plane update (page-flip). In this case, a
+ * full plane update should happen because uploads are done per-buffer.
+ */
+ if (old_fb != new_fb)
+ new_state->ignore_damage_clips = true;
+
if (new_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
new_state->crtc);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index a7f8611be6f4..db3bb4afbfc4 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -27,7 +27,6 @@
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 3ca45975c686..d9e9829b2200 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -345,6 +345,8 @@ static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
{ "AONE" },
{ "GANSS" },
{ "Hailuck" },
+ { "Jamesdonkey" },
+ { "A3R" },
};
static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index fd61dba88233..78cdfb8b9a7a 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -381,7 +381,7 @@ static int asus_raw_event(struct hid_device *hdev,
return 0;
}
-static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size)
+static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t buf_size)
{
unsigned char *dmabuf;
int ret;
@@ -404,7 +404,7 @@ static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size
static int asus_kbd_init(struct hid_device *hdev)
{
- u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
+ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
int ret;
@@ -418,7 +418,7 @@ static int asus_kbd_init(struct hid_device *hdev)
static int asus_kbd_get_functions(struct hid_device *hdev,
unsigned char *kbd_func)
{
- u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
+ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
u8 *readbuf;
int ret;
@@ -449,7 +449,7 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
static int rog_nkey_led_init(struct hid_device *hdev)
{
- u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
+ const u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
u8 buf_init2[] = { FEATURE_KBD_LED_REPORT_ID1, 0x41, 0x53, 0x55, 0x53, 0x20,
0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1,
@@ -1000,6 +1000,24 @@ static int asus_start_multitouch(struct hid_device *hdev)
return 0;
}
+static int __maybe_unused asus_resume(struct hid_device *hdev) {
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+ int ret = 0;
+
+ if (drvdata->kbd_backlight) {
+ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4,
+ drvdata->kbd_backlight->cdev.brightness };
+ ret = asus_kbd_set_report(hdev, buf, sizeof(buf));
+ if (ret < 0) {
+ hid_err(hdev, "Asus failed to set keyboard backlight: %d\n", ret);
+ goto asus_resume_err;
+ }
+ }
+
+asus_resume_err:
+ return ret;
+}
+
static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
@@ -1294,6 +1312,7 @@ static struct hid_driver asus_driver = {
.input_configured = asus_input_configured,
#ifdef CONFIG_PM
.reset_resume = asus_reset_resume,
+ .resume = asus_resume,
#endif
.event = asus_event,
.raw_event = asus_raw_event
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8992e3c1e769..e0181218ad85 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -702,15 +702,22 @@ static void hid_close_report(struct hid_device *device)
* Free a device structure, all reports, and all fields.
*/
-static void hid_device_release(struct device *dev)
+void hiddev_free(struct kref *ref)
{
- struct hid_device *hid = to_hid_device(dev);
+ struct hid_device *hid = container_of(ref, struct hid_device, ref);
hid_close_report(hid);
kfree(hid->dev_rdesc);
kfree(hid);
}
+static void hid_device_release(struct device *dev)
+{
+ struct hid_device *hid = to_hid_device(dev);
+
+ kref_put(&hid->ref, hiddev_free);
+}
+
/*
* Fetch a report description item from the data stream. We support long
* items, though they are not used yet.
@@ -2846,6 +2853,7 @@ struct hid_device *hid_allocate_device(void)
spin_lock_init(&hdev->debug_list_lock);
sema_init(&hdev->driver_input_lock, 1);
mutex_init(&hdev->ll_open_lock);
+ kref_init(&hdev->ref);
hid_bpf_device_init(hdev);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index e7ef1ea107c9..7dd83ec74f8a 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1135,6 +1135,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
goto out;
}
list->hdev = (struct hid_device *) inode->i_private;
+ kref_get(&list->hdev->ref);
file->private_data = list;
mutex_init(&list->read_mutex);
@@ -1227,6 +1228,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
list_del(&list->node);
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
kfifo_free(&list->hid_debug_fifo);
+
+ kref_put(&list->hdev->ref, hiddev_free);
kfree(list);
return 0;
diff --git a/drivers/hid/hid-glorious.c b/drivers/hid/hid-glorious.c
index 558eb08c19ef..281b3a7187ce 100644
--- a/drivers/hid/hid-glorious.c
+++ b/drivers/hid/hid-glorious.c
@@ -21,6 +21,10 @@ MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice");
* Glorious Model O and O- specify the const flag in the consumer input
* report descriptor, which leads to inputs being ignored. Fix this
* by patching the descriptor.
+ *
+ * Glorious Model I incorrectly specifes the Usage Minimum for its
+ * keyboard HID report, causing keycodes to be misinterpreted.
+ * Fix this by setting Usage Minimum to 0 in that report.
*/
static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
@@ -32,6 +36,10 @@ static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[85] = rdesc[113] = rdesc[141] = \
HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE;
}
+ if (*rsize == 156 && rdesc[41] == 1) {
+ hid_info(hdev, "patching Glorious Model I keyboard report descriptor\n");
+ rdesc[41] = 0;
+ }
return rdesc;
}
@@ -44,6 +52,8 @@ static void glorious_update_name(struct hid_device *hdev)
model = "Model O"; break;
case USB_DEVICE_ID_GLORIOUS_MODEL_D:
model = "Model D"; break;
+ case USB_DEVICE_ID_GLORIOUS_MODEL_I:
+ model = "Model I"; break;
}
snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model);
@@ -66,10 +76,12 @@ static int glorious_probe(struct hid_device *hdev,
}
static const struct hid_device_id glorious_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH,
USB_DEVICE_ID_GLORIOUS_MODEL_O) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH,
USB_DEVICE_ID_GLORIOUS_MODEL_D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LAVIEW,
+ USB_DEVICE_ID_GLORIOUS_MODEL_I) },
{ }
};
MODULE_DEVICE_TABLE(hid, glorious_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f7973ccd84a2..c6e4e0d1f214 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -511,10 +511,6 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
-#define USB_VENDOR_ID_GLORIOUS 0x258a
-#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
-#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
-
#define I2C_VENDOR_ID_GOODIX 0x27c6
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
@@ -745,6 +741,9 @@
#define USB_VENDOR_ID_LABTEC 0x1020
#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
+#define USB_VENDOR_ID_LAVIEW 0x22D4
+#define USB_DEVICE_ID_GLORIOUS_MODEL_I 0x1503
+
#define USB_VENDOR_ID_LCPOWER 0x1241
#define USB_DEVICE_ID_LCPOWER_LC1000 0xf767
@@ -869,7 +868,6 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
-#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc547
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
@@ -1160,6 +1158,10 @@
#define USB_VENDOR_ID_SIGMATEL 0x066F
#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
+#define USB_VENDOR_ID_SINOWEALTH 0x258a
+#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
+#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
+
#define USB_VENDOR_ID_SIS_TOUCH 0x0457
#define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 8afe3be683ba..e6a8b6d8eab7 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1695,12 +1695,11 @@ static int logi_dj_raw_event(struct hid_device *hdev,
}
/*
* Mouse-only receivers send unnumbered mouse data. The 27 MHz
- * receiver uses 6 byte packets, the nano receiver 8 bytes,
- * the lightspeed receiver (Pro X Superlight) 13 bytes.
+ * receiver uses 6 byte packets, the nano receiver 8 bytes.
*/
if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
- size <= 13){
- u8 mouse_report[14];
+ size <= 8) {
+ u8 mouse_report[9];
/* Prepend report id */
mouse_report[0] = REPORT_TYPE_MOUSE;
@@ -1984,10 +1983,6 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
.driver_data = recvr_type_gaming_hidpp},
- { /* Logitech lightspeed receiver (0xc547) */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
- .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index 72883e0ce757..aef0785c91cc 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -1142,6 +1142,8 @@ static int mcp2221_probe(struct hid_device *hdev,
if (ret)
return ret;
+ hid_device_io_start(hdev);
+
/* Set I2C bus clock diviser */
if (i2c_clk_freq > 400)
i2c_clk_freq = 400;
@@ -1157,12 +1159,12 @@ static int mcp2221_probe(struct hid_device *hdev,
snprintf(mcp->adapter.name, sizeof(mcp->adapter.name),
"MCP2221 usb-i2c bridge");
+ i2c_set_adapdata(&mcp->adapter, mcp);
ret = devm_i2c_add_adapter(&hdev->dev, &mcp->adapter);
if (ret) {
hid_err(hdev, "can't add usb-i2c adapter: %d\n", ret);
return ret;
}
- i2c_set_adapdata(&mcp->adapter, mcp);
#if IS_REACHABLE(CONFIG_GPIOLIB)
/* Setup GPIO chip */
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index e098cc7b3944..fd5b0637dad6 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2046,6 +2046,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+ /* HONOR GLO-GXXX panel */
+ { .driver_data = MT_CLS_VTL,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ 0x347d, 0x7853) },
+
/* Ilitek dual touch panel */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index a4dccdcda26f..d7dddd99d325 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -505,6 +505,7 @@ int picolcd_init_framebuffer(struct picolcd_data *data)
dev_err(dev, "can't get a free page for framebuffer\n");
goto err_nomem;
}
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = fbdata->bitmap;
info->fix.smem_start = (unsigned long)fbdata->bitmap;
memset(fbdata->vbitmap, 0xff, PICOLCDFB_SIZE);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 5a48fcaa32f0..ea472923fab0 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index affcfb243f0f..35f762872b8a 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -63,7 +63,7 @@ static int dw_reg_read(void *context, unsigned int reg, unsigned int *val)
{
struct dw_i2c_dev *dev = context;
- *val = readl_relaxed(dev->base + reg);
+ *val = readl(dev->base + reg);
return 0;
}
@@ -72,7 +72,7 @@ static int dw_reg_write(void *context, unsigned int reg, unsigned int val)
{
struct dw_i2c_dev *dev = context;
- writel_relaxed(val, dev->base + reg);
+ writel(val, dev->base + reg);
return 0;
}
@@ -81,7 +81,7 @@ static int dw_reg_read_swab(void *context, unsigned int reg, unsigned int *val)
{
struct dw_i2c_dev *dev = context;
- *val = swab32(readl_relaxed(dev->base + reg));
+ *val = swab32(readl(dev->base + reg));
return 0;
}
@@ -90,7 +90,7 @@ static int dw_reg_write_swab(void *context, unsigned int reg, unsigned int val)
{
struct dw_i2c_dev *dev = context;
- writel_relaxed(swab32(val), dev->base + reg);
+ writel(swab32(val), dev->base + reg);
return 0;
}
@@ -99,8 +99,8 @@ static int dw_reg_read_word(void *context, unsigned int reg, unsigned int *val)
{
struct dw_i2c_dev *dev = context;
- *val = readw_relaxed(dev->base + reg) |
- (readw_relaxed(dev->base + reg + 2) << 16);
+ *val = readw(dev->base + reg) |
+ (readw(dev->base + reg + 2) << 16);
return 0;
}
@@ -109,8 +109,8 @@ static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val)
{
struct dw_i2c_dev *dev = context;
- writew_relaxed(val, dev->base + reg);
- writew_relaxed(val >> 16, dev->base + reg + 2);
+ writew(val, dev->base + reg);
+ writew(val >> 16, dev->base + reg + 2);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 041a76f71a49..e106af83cef4 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -771,8 +771,8 @@ static int ocores_i2c_resume(struct device *dev)
return ocores_init(dev, i2c);
}
-static DEFINE_SIMPLE_DEV_PM_OPS(ocores_i2c_pm,
- ocores_i2c_suspend, ocores_i2c_resume);
+static DEFINE_NOIRQ_DEV_PM_OPS(ocores_i2c_pm,
+ ocores_i2c_suspend, ocores_i2c_resume);
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 1d7648242749..76f79b68cef8 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -265,6 +265,9 @@ struct pxa_i2c {
u32 hs_mask;
struct i2c_bus_recovery_info recovery;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinctrl_default;
+ struct pinctrl_state *pinctrl_recovery;
};
#define _IBMR(i2c) ((i2c)->reg_ibmr)
@@ -1299,12 +1302,13 @@ static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap)
*/
gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS);
gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS);
+
+ WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery));
}
static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
{
struct pxa_i2c *i2c = adap->algo_data;
- struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
u32 isr;
/*
@@ -1318,7 +1322,7 @@ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
i2c_pxa_do_reset(i2c);
}
- WARN_ON(pinctrl_select_state(bri->pinctrl, bri->pins_default));
+ WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default));
dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n",
readl(_IBMR(i2c)), readl(_ISR(i2c)));
@@ -1340,20 +1344,76 @@ static int i2c_pxa_init_recovery(struct pxa_i2c *i2c)
if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE))
return 0;
- bri->pinctrl = devm_pinctrl_get(dev);
- if (PTR_ERR(bri->pinctrl) == -ENODEV) {
- bri->pinctrl = NULL;
+ i2c->pinctrl = devm_pinctrl_get(dev);
+ if (PTR_ERR(i2c->pinctrl) == -ENODEV)
+ i2c->pinctrl = NULL;
+ if (IS_ERR(i2c->pinctrl))
+ return PTR_ERR(i2c->pinctrl);
+
+ if (!i2c->pinctrl)
+ return 0;
+
+ i2c->pinctrl_default = pinctrl_lookup_state(i2c->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ i2c->pinctrl_recovery = pinctrl_lookup_state(i2c->pinctrl, "recovery");
+
+ if (IS_ERR(i2c->pinctrl_default) || IS_ERR(i2c->pinctrl_recovery)) {
+ dev_info(dev, "missing pinmux recovery information: %ld %ld\n",
+ PTR_ERR(i2c->pinctrl_default),
+ PTR_ERR(i2c->pinctrl_recovery));
+ return 0;
+ }
+
+ /*
+ * Claiming GPIOs can influence the pinmux state, and may glitch the
+ * I2C bus. Do this carefully.
+ */
+ bri->scl_gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
+ if (bri->scl_gpiod == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+ if (IS_ERR(bri->scl_gpiod)) {
+ dev_info(dev, "missing scl gpio recovery information: %pe\n",
+ bri->scl_gpiod);
+ return 0;
+ }
+
+ /*
+ * We have SCL. Pull SCL low and wait a bit so that SDA glitches
+ * have no effect.
+ */
+ gpiod_direction_output(bri->scl_gpiod, 0);
+ udelay(10);
+ bri->sda_gpiod = devm_gpiod_get(dev, "sda", GPIOD_OUT_HIGH_OPEN_DRAIN);
+
+ /* Wait a bit in case of a SDA glitch, and then release SCL. */
+ udelay(10);
+ gpiod_direction_output(bri->scl_gpiod, 1);
+
+ if (bri->sda_gpiod == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ if (IS_ERR(bri->sda_gpiod)) {
+ dev_info(dev, "missing sda gpio recovery information: %pe\n",
+ bri->sda_gpiod);
return 0;
}
- if (IS_ERR(bri->pinctrl))
- return PTR_ERR(bri->pinctrl);
bri->prepare_recovery = i2c_pxa_prepare_recovery;
bri->unprepare_recovery = i2c_pxa_unprepare_recovery;
+ bri->recover_bus = i2c_generic_scl_recovery;
i2c->adap.bus_recovery_info = bri;
- return 0;
+ /*
+ * Claiming GPIOs can change the pinmux state, which confuses the
+ * pinctrl since pinctrl's idea of the current setting is unaffected
+ * by the pinmux change caused by claiming the GPIO. Work around that
+ * by switching pinctrl to the GPIO state here. We do it this way to
+ * avoid glitching the I2C bus.
+ */
+ pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery);
+
+ return pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default);
}
static int i2c_pxa_probe(struct platform_device *dev)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index a8c89df1a997..9a7a74239eab 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2379,12 +2379,12 @@ retry_baser:
break;
}
+ if (!shr)
+ gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
+
its_write_baser(its, baser, val);
tmp = baser->val;
- if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
- tmp &= ~GITS_BASER_SHAREABILITY_MASK;
-
if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
/*
* Shareability didn't stick. Just use
@@ -2394,10 +2394,9 @@ retry_baser:
* non-cacheable as well.
*/
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
- if (!shr) {
+ if (!shr)
cache = GITS_BASER_nC;
- gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
- }
+
goto retry_baser;
}
@@ -2609,6 +2608,11 @@ static int its_alloc_tables(struct its_node *its)
/* erratum 24313: ignore memory access type */
cache = GITS_BASER_nCnB;
+ if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
+ cache = GITS_BASER_nC;
+ shr = 0;
+ }
+
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
struct its_baser *baser = its->tables + i;
u64 val = its_read_baser(its, baser);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 05be59ae21b2..6ae2329052c9 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -265,6 +265,7 @@ struct bcache_device {
#define BCACHE_DEV_WB_RUNNING 3
#define BCACHE_DEV_RATE_DW_RUNNING 4
int nr_stripes;
+#define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT)
unsigned int stripe_size;
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ae5cbb55861f..de3019972b35 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1000,6 +1000,9 @@ err:
*
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
+ *
+ * Note: Only error code or btree pointer will be returned, it is unncessary
+ * for callers to check NULL pointer.
*/
struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
struct bkey *k, int level, bool write,
@@ -1111,6 +1114,10 @@ retry:
mutex_unlock(&b->c->bucket_lock);
}
+/*
+ * Only error code or btree pointer will be returned, it is unncessary for
+ * callers to check NULL pointer.
+ */
struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
int level, bool wait,
struct btree *parent)
@@ -1368,7 +1375,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
memset(new_nodes, 0, sizeof(new_nodes));
closure_init_stack(&cl);
- while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
+ while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
keys += r[nodes++].keys;
blocks = btree_default_blocks(b->c) * 2 / 3;
@@ -1532,6 +1539,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return 0;
n = btree_node_alloc_replacement(replace, NULL);
+ if (IS_ERR(n))
+ return 0;
/* recheck reserve after allocating replacement node */
if (btree_check_reserve(b, NULL)) {
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 8bd899766372..bfe1685dbae5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -905,6 +905,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
if (!d->stripe_size)
d->stripe_size = 1 << 31;
+ else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
+ d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size);
n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
if (!n || n > max_stripes) {
@@ -2016,7 +2018,7 @@ static int run_cache_set(struct cache_set *c)
c->root = bch_btree_node_get(c, NULL, k,
j->btree_level,
true, NULL);
- if (IS_ERR_OR_NULL(c->root))
+ if (IS_ERR(c->root))
goto err;
list_del_init(&c->root->list);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 45d8af755de6..a438efb66069 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -1104,7 +1104,7 @@ SHOW(__bch_cache)
sum += INITIAL_PRIO - cached[i];
if (n)
- do_div(sum, n);
+ sum = div64_u64(sum, n);
for (i = 0; i < ARRAY_SIZE(q); i++)
q[i] = INITIAL_PRIO - cached[n * (i + 1) /
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 24c049067f61..3accfdaee6b1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -913,7 +913,7 @@ static int bch_dirty_init_thread(void *arg)
int cur_idx, prev_idx, skip_nr;
k = p = NULL;
- cur_idx = prev_idx = 0;
+ prev_idx = 0;
bch_btree_iter_init(&c->root->keys, &iter, NULL);
k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
@@ -977,24 +977,35 @@ static int bch_btre_dirty_init_thread_nr(void)
void bch_sectors_dirty_init(struct bcache_device *d)
{
int i;
+ struct btree *b = NULL;
struct bkey *k = NULL;
struct btree_iter iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
struct bch_dirty_init_state state;
+retry_lock:
+ b = c->root;
+ rw_lock(0, b, b->level);
+ if (b != c->root) {
+ rw_unlock(0, b);
+ goto retry_lock;
+ }
+
/* Just count root keys if no leaf node */
- rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
for_each_key_filter(&c->root->keys,
- k, &iter, bch_ptr_invalid)
+ k, &iter, bch_ptr_invalid) {
+ if (KEY_INODE(k) != op.inode)
+ continue;
sectors_dirty_init_fn(&op.op, c->root, k);
+ }
- rw_unlock(0, c->root);
+ rw_unlock(0, b);
return;
}
@@ -1014,23 +1025,24 @@ void bch_sectors_dirty_init(struct bcache_device *d)
if (atomic_read(&state.enough))
break;
+ atomic_inc(&state.started);
state.infos[i].state = &state;
state.infos[i].thread =
kthread_run(bch_dirty_init_thread, &state.infos[i],
"bch_dirtcnt[%d]", i);
if (IS_ERR(state.infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i);
+ atomic_dec(&state.started);
for (--i; i >= 0; i--)
kthread_stop(state.infos[i].thread);
goto out;
}
- atomic_inc(&state.started);
}
out:
/* Must wait for all threads to stop. */
wait_event(state.wait, atomic_read(&state.started) == 0);
- rw_unlock(0, c->root);
+ rw_unlock(0, b);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 62eb27639c9b..f03d7dba270c 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -254,7 +254,7 @@ enum evict_result {
typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
-static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context)
+static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
{
unsigned long tested = 0;
struct list_head *h = lru->cursor;
@@ -295,7 +295,8 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con
h = h->next;
- cond_resched();
+ if (!no_sleep)
+ cond_resched();
}
return NULL;
@@ -382,7 +383,10 @@ struct dm_buffer {
*/
struct buffer_tree {
- struct rw_semaphore lock;
+ union {
+ struct rw_semaphore lock;
+ rwlock_t spinlock;
+ } u;
struct rb_root root;
} ____cacheline_aligned_in_smp;
@@ -393,9 +397,12 @@ struct dm_buffer_cache {
* on the locks.
*/
unsigned int num_locks;
+ bool no_sleep;
struct buffer_tree trees[];
};
+static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
+
static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
{
return dm_hash_locks_index(block, num_locks);
@@ -403,22 +410,34 @@ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
{
- down_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
+ read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
+ else
+ down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
}
static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
{
- up_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
+ read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
+ else
+ up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
}
static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
{
- down_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
+ write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
+ else
+ down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
}
static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
{
- up_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
+ write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
+ else
+ up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
}
/*
@@ -442,18 +461,32 @@ static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool
static void __lh_lock(struct lock_history *lh, unsigned int index)
{
- if (lh->write)
- down_write(&lh->cache->trees[index].lock);
- else
- down_read(&lh->cache->trees[index].lock);
+ if (lh->write) {
+ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
+ write_lock_bh(&lh->cache->trees[index].u.spinlock);
+ else
+ down_write(&lh->cache->trees[index].u.lock);
+ } else {
+ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
+ read_lock_bh(&lh->cache->trees[index].u.spinlock);
+ else
+ down_read(&lh->cache->trees[index].u.lock);
+ }
}
static void __lh_unlock(struct lock_history *lh, unsigned int index)
{
- if (lh->write)
- up_write(&lh->cache->trees[index].lock);
- else
- up_read(&lh->cache->trees[index].lock);
+ if (lh->write) {
+ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
+ write_unlock_bh(&lh->cache->trees[index].u.spinlock);
+ else
+ up_write(&lh->cache->trees[index].u.lock);
+ } else {
+ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
+ read_unlock_bh(&lh->cache->trees[index].u.spinlock);
+ else
+ up_read(&lh->cache->trees[index].u.lock);
+ }
}
/*
@@ -502,14 +535,18 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
return le_to_buffer(le);
}
-static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks)
+static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
{
unsigned int i;
bc->num_locks = num_locks;
+ bc->no_sleep = no_sleep;
for (i = 0; i < bc->num_locks; i++) {
- init_rwsem(&bc->trees[i].lock);
+ if (no_sleep)
+ rwlock_init(&bc->trees[i].u.spinlock);
+ else
+ init_rwsem(&bc->trees[i].u.lock);
bc->trees[i].root = RB_ROOT;
}
@@ -648,7 +685,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode
struct lru_entry *le;
struct dm_buffer *b;
- le = lru_evict(&bc->lru[list_mode], __evict_pred, &w);
+ le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
if (!le)
return NULL;
@@ -702,7 +739,7 @@ static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_
struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
while (true) {
- le = lru_evict(&bc->lru[old_mode], __evict_pred, &w);
+ le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
if (!le)
break;
@@ -915,10 +952,11 @@ static void cache_remove_range(struct dm_buffer_cache *bc,
{
unsigned int i;
+ BUG_ON(bc->no_sleep);
for (i = 0; i < bc->num_locks; i++) {
- down_write(&bc->trees[i].lock);
+ down_write(&bc->trees[i].u.lock);
__remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
- up_write(&bc->trees[i].lock);
+ up_write(&bc->trees[i].u.lock);
}
}
@@ -979,8 +1017,6 @@ struct dm_bufio_client {
struct dm_buffer_cache cache; /* must be last member */
};
-static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
-
/*----------------------------------------------------------------*/
#define dm_bufio_in_request() (!!current->bio_list)
@@ -1871,7 +1907,8 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
if (need_submit)
submit_io(b, REQ_OP_READ, read_endio);
- wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
+ if (nf != NF_GET) /* we already tested this condition above */
+ wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
if (b->read_error) {
int error = blk_status_to_errno(b->read_error);
@@ -2421,7 +2458,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
r = -ENOMEM;
goto bad_client;
}
- cache_init(&c->cache, num_locks);
+ cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
c->bdev = bdev;
c->block_size = block_size;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 6de107aff331..2ae8560b6a14 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1673,7 +1673,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
unsigned int remaining_size;
- unsigned int order = MAX_ORDER - 1;
+ unsigned int order = MAX_ORDER;
retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index efd510984e25..5eabdb06c649 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -33,7 +33,7 @@ struct delay_c {
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
struct task_struct *worker;
- atomic_t may_delay;
+ bool may_delay;
struct delay_class read;
struct delay_class write;
@@ -73,39 +73,6 @@ static inline bool delay_is_fast(struct delay_c *dc)
return !!dc->worker;
}
-static void flush_delayed_bios_fast(struct delay_c *dc, bool flush_all)
-{
- struct dm_delay_info *delayed, *next;
-
- mutex_lock(&delayed_bios_lock);
- list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
- if (flush_all || time_after_eq(jiffies, delayed->expires)) {
- struct bio *bio = dm_bio_from_per_bio_data(delayed,
- sizeof(struct dm_delay_info));
- list_del(&delayed->list);
- dm_submit_bio_remap(bio, NULL);
- delayed->class->ops--;
- }
- }
- mutex_unlock(&delayed_bios_lock);
-}
-
-static int flush_worker_fn(void *data)
-{
- struct delay_c *dc = data;
-
- while (1) {
- flush_delayed_bios_fast(dc, false);
- if (unlikely(list_empty(&dc->delayed_bios))) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- } else
- cond_resched();
- }
-
- return 0;
-}
-
static void flush_bios(struct bio *bio)
{
struct bio *n;
@@ -118,36 +85,61 @@ static void flush_bios(struct bio *bio)
}
}
-static struct bio *flush_delayed_bios(struct delay_c *dc, bool flush_all)
+static void flush_delayed_bios(struct delay_c *dc, bool flush_all)
{
struct dm_delay_info *delayed, *next;
+ struct bio_list flush_bio_list;
unsigned long next_expires = 0;
- unsigned long start_timer = 0;
- struct bio_list flush_bios = { };
+ bool start_timer = false;
+ bio_list_init(&flush_bio_list);
mutex_lock(&delayed_bios_lock);
list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
+ cond_resched();
if (flush_all || time_after_eq(jiffies, delayed->expires)) {
struct bio *bio = dm_bio_from_per_bio_data(delayed,
sizeof(struct dm_delay_info));
list_del(&delayed->list);
- bio_list_add(&flush_bios, bio);
+ bio_list_add(&flush_bio_list, bio);
delayed->class->ops--;
continue;
}
- if (!start_timer) {
- start_timer = 1;
- next_expires = delayed->expires;
- } else
- next_expires = min(next_expires, delayed->expires);
+ if (!delay_is_fast(dc)) {
+ if (!start_timer) {
+ start_timer = true;
+ next_expires = delayed->expires;
+ } else {
+ next_expires = min(next_expires, delayed->expires);
+ }
+ }
}
mutex_unlock(&delayed_bios_lock);
if (start_timer)
queue_timeout(dc, next_expires);
- return bio_list_get(&flush_bios);
+ flush_bios(bio_list_get(&flush_bio_list));
+}
+
+static int flush_worker_fn(void *data)
+{
+ struct delay_c *dc = data;
+
+ while (!kthread_should_stop()) {
+ flush_delayed_bios(dc, false);
+ mutex_lock(&delayed_bios_lock);
+ if (unlikely(list_empty(&dc->delayed_bios))) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ mutex_unlock(&delayed_bios_lock);
+ schedule();
+ } else {
+ mutex_unlock(&delayed_bios_lock);
+ cond_resched();
+ }
+ }
+
+ return 0;
}
static void flush_expired_bios(struct work_struct *work)
@@ -155,10 +147,7 @@ static void flush_expired_bios(struct work_struct *work)
struct delay_c *dc;
dc = container_of(work, struct delay_c, flush_expired_bios);
- if (delay_is_fast(dc))
- flush_delayed_bios_fast(dc, false);
- else
- flush_bios(flush_delayed_bios(dc, false));
+ flush_delayed_bios(dc, false);
}
static void delay_dtr(struct dm_target *ti)
@@ -177,8 +166,7 @@ static void delay_dtr(struct dm_target *ti)
if (dc->worker)
kthread_stop(dc->worker);
- if (!delay_is_fast(dc))
- mutex_destroy(&dc->timer_lock);
+ mutex_destroy(&dc->timer_lock);
kfree(dc);
}
@@ -236,7 +224,8 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = dc;
INIT_LIST_HEAD(&dc->delayed_bios);
- atomic_set(&dc->may_delay, 1);
+ mutex_init(&dc->timer_lock);
+ dc->may_delay = true;
dc->argc = argc;
ret = delay_class_ctr(ti, &dc->read, argv);
@@ -282,12 +271,12 @@ out:
"dm-delay-flush-worker");
if (IS_ERR(dc->worker)) {
ret = PTR_ERR(dc->worker);
+ dc->worker = NULL;
goto bad;
}
} else {
timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
- mutex_init(&dc->timer_lock);
dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!dc->kdelayd_wq) {
ret = -EINVAL;
@@ -312,7 +301,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
struct dm_delay_info *delayed;
unsigned long expires = 0;
- if (!c->delay || !atomic_read(&dc->may_delay))
+ if (!c->delay)
return DM_MAPIO_REMAPPED;
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
@@ -321,6 +310,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
mutex_lock(&delayed_bios_lock);
+ if (unlikely(!dc->may_delay)) {
+ mutex_unlock(&delayed_bios_lock);
+ return DM_MAPIO_REMAPPED;
+ }
c->ops++;
list_add_tail(&delayed->list, &dc->delayed_bios);
mutex_unlock(&delayed_bios_lock);
@@ -337,21 +330,20 @@ static void delay_presuspend(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- atomic_set(&dc->may_delay, 0);
+ mutex_lock(&delayed_bios_lock);
+ dc->may_delay = false;
+ mutex_unlock(&delayed_bios_lock);
- if (delay_is_fast(dc))
- flush_delayed_bios_fast(dc, true);
- else {
+ if (!delay_is_fast(dc))
del_timer_sync(&dc->delay_timer);
- flush_bios(flush_delayed_bios(dc, true));
- }
+ flush_delayed_bios(dc, true);
}
static void delay_resume(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- atomic_set(&dc->may_delay, 1);
+ dc->may_delay = true;
}
static int delay_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 3ef9f018da60..2099c755119e 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -185,7 +185,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
{
if (unlikely(verity_hash(v, verity_io_hash_req(v, io),
data, 1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io))))
+ verity_io_real_digest(v, io), true)))
return 0;
return memcmp(verity_io_real_digest(v, io), want_digest,
@@ -386,7 +386,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
/* Always re-validate the corrected block against the expected hash */
r = verity_hash(v, verity_io_hash_req(v, io), fio->output,
1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io));
+ verity_io_real_digest(v, io), true);
if (unlikely(r < 0))
return r;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 26adcfea0302..e115fcfe723c 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -135,20 +135,21 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
* Wrapper for crypto_ahash_init, which handles verity salting.
*/
static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
- struct crypto_wait *wait)
+ struct crypto_wait *wait, bool may_sleep)
{
int r;
ahash_request_set_tfm(req, v->tfm);
- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, (void *)wait);
+ ahash_request_set_callback(req,
+ may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
+ crypto_req_done, (void *)wait);
crypto_init_wait(wait);
r = crypto_wait_req(crypto_ahash_init(req), wait);
if (unlikely(r < 0)) {
- DMERR("crypto_ahash_init failed: %d", r);
+ if (r != -ENOMEM)
+ DMERR("crypto_ahash_init failed: %d", r);
return r;
}
@@ -179,12 +180,12 @@ out:
}
int verity_hash(struct dm_verity *v, struct ahash_request *req,
- const u8 *data, size_t len, u8 *digest)
+ const u8 *data, size_t len, u8 *digest, bool may_sleep)
{
int r;
struct crypto_wait wait;
- r = verity_hash_init(v, req, &wait);
+ r = verity_hash_init(v, req, &wait, may_sleep);
if (unlikely(r < 0))
goto out;
@@ -322,7 +323,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
r = verity_hash(v, verity_io_hash_req(v, io),
data, 1 << v->hash_dev_block_bits,
- verity_io_real_digest(v, io));
+ verity_io_real_digest(v, io), !io->in_tasklet);
if (unlikely(r < 0))
goto release_ret_r;
@@ -556,7 +557,7 @@ static int verity_verify_io(struct dm_verity_io *io)
continue;
}
- r = verity_hash_init(v, req, &wait);
+ r = verity_hash_init(v, req, &wait, !io->in_tasklet);
if (unlikely(r < 0))
return r;
@@ -652,7 +653,7 @@ static void verity_tasklet(unsigned long data)
io->in_tasklet = true;
err = verity_verify_io(io);
- if (err == -EAGAIN) {
+ if (err == -EAGAIN || err == -ENOMEM) {
/* fallback to retrying with work-queue */
INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work);
@@ -1033,7 +1034,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
goto out;
r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
- v->zero_digest);
+ v->zero_digest, true);
out:
kfree(req);
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 2f555b420367..f96f4e281ee4 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -128,7 +128,7 @@ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
u8 *data, size_t len));
extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
- const u8 *data, size_t len, u8 *digest);
+ const u8 *data, size_t len, u8 *digest, bool may_sleep);
extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4ee4593c874a..c94373d64f2c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8666,7 +8666,8 @@ static void md_end_clone_io(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
- orig_bio->bi_status = bio->bi_status;
+ if (bio->bi_status && !orig_bio->bi_status)
+ orig_bio->bi_status = bio->bi_status;
if (md_io_clone->start_time)
bio_end_io_acct(orig_bio, md_io_clone->start_time);
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index 9be52101bc4f..2498f9079b75 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -48,9 +48,7 @@ config VIDEO_IVTV_ALSA
config VIDEO_FB_IVTV
tristate "Conexant cx23415 framebuffer support"
depends on VIDEO_IVTV && FB
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
This is a framebuffer driver for the Conexant cx23415 MPEG
encoder/decoder.
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 23c8c094e791..410477e3e621 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -927,17 +927,17 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info)
static const struct fb_ops ivtvfb_ops = {
.owner = THIS_MODULE,
+ .fb_read = fb_io_read,
.fb_write = ivtvfb_write,
.fb_check_var = ivtvfb_check_var,
.fb_set_par = ivtvfb_set_par,
.fb_setcolreg = ivtvfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ __FB_DEFAULT_IOMEM_OPS_DRAW,
.fb_cursor = NULL,
.fb_ioctl = ivtvfb_ioctl,
.fb_pan_display = ivtvfb_pan_display,
.fb_blank = ivtvfb_blank,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
/* Restore hardware after firmware restart */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 51d47eda1c87..8e6cc0e133b7 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1500,6 +1500,10 @@ done:
static void bond_setup_by_slave(struct net_device *bond_dev,
struct net_device *slave_dev)
{
+ bool was_up = !!(bond_dev->flags & IFF_UP);
+
+ dev_close(bond_dev);
+
bond_dev->header_ops = slave_dev->header_ops;
bond_dev->type = slave_dev->type;
@@ -1514,6 +1518,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
}
+ if (was_up)
+ dev_open(bond_dev, NULL);
}
/* On bonding slaves other than the currently active slave, suppress
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index 045fe133f6ee..5beadabc2136 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -146,7 +146,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
}
queue_work(pdsc->wq, &qcq->work);
- pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
+ pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index f3a7deda9972..e35d3e7006bf 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -15,7 +15,7 @@
#define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver"
#define PDSC_WATCHDOG_SECS 5
-#define PDSC_QUEUE_NAME_MAX_SZ 32
+#define PDSC_QUEUE_NAME_MAX_SZ 16
#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
#define PDSC_TEARDOWN_RECOVERY false
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index 7c1b965d61a9..31940b857e0e 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -261,10 +261,14 @@ static int pdsc_identify(struct pdsc *pdsc)
struct pds_core_drv_identity drv = {};
size_t sz;
int err;
+ int n;
drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
- snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
- "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
+ /* Catching the return quiets a Wformat-truncation complaint */
+ n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
+ "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
+ if (n > sizeof(drv.driver_ver_str))
+ dev_dbg(pdsc->dev, "release name truncated, don't care\n");
/* Next let's get some info about the device
* We use the devcmd_lock at this level in order to
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 57f88c8b37de..e9948ea5bbcd 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -104,7 +104,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct pds_core_fw_list_info fw_list;
struct pdsc *pdsc = devlink_priv(dl);
union pds_core_dev_comp comp;
- char buf[16];
+ char buf[32];
int listlen;
int err;
int i;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 614c0278419b..6b73648b3779 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
static void xgbe_service_timer(struct timer_list *t)
{
struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
+ struct xgbe_channel *channel;
+ unsigned int i;
queue_work(pdata->dev_workqueue, &pdata->service_work);
mod_timer(&pdata->service_timer, jiffies + HZ);
+
+ if (!pdata->tx_usecs)
+ return;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ if (!channel->tx_ring || channel->tx_timer_active)
+ break;
+ channel->tx_timer_active = 1;
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
}
static void xgbe_init_timers(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 6e83ff59172a..32fab5e77246 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
cmd->base.phy_address = pdata->phy.address;
- cmd->base.autoneg = pdata->phy.autoneg;
- cmd->base.speed = pdata->phy.speed;
- cmd->base.duplex = pdata->phy.duplex;
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = pdata->phy.speed;
+ cmd->base.duplex = pdata->phy.duplex;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ cmd->base.autoneg = pdata->phy.autoneg;
cmd->base.port = PORT_NONE;
XGBE_LM_COPY(cmd, supported, lks, supported);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 32d2c6fac652..4a2dc705b528 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1193,7 +1193,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
if (pdata->phy.duplex != DUPLEX_FULL)
return -EINVAL;
- xgbe_set_mode(pdata, mode);
+ /* Force the mode change for SFI in Fixed PHY config.
+ * Fixed PHY configs needs PLL to be enabled while doing mode set.
+ * When the SFP module isn't connected during boot, driver assumes
+ * AN is ON and attempts autonegotiation. However, if the connected
+ * SFP comes up in Fixed PHY config, the link will not come up as
+ * PLL isn't enabled while the initial mode set command is issued.
+ * So, force the mode change for SFI in Fixed PHY configuration to
+ * fix link issues.
+ */
+ if (mode == XGBE_MODE_SFI)
+ xgbe_change_mode(pdata, mode);
+ else
+ xgbe_set_mode(pdata, mode);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1dee27349367..48b6191efa56 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6889,7 +6889,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
desc_idx, *post_ptr);
drop_it_no_recycle:
/* Other statistics kept track of by card. */
- tp->rx_dropped++;
+ tnapi->rx_dropped++;
goto next_pkt;
}
@@ -7918,8 +7918,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
segs = skb_gso_segment(skb, tp->dev->features &
~(NETIF_F_TSO | NETIF_F_TSO6));
- if (IS_ERR(segs) || !segs)
+ if (IS_ERR(segs) || !segs) {
+ tnapi->tx_dropped++;
goto tg3_tso_bug_end;
+ }
skb_list_walk_safe(segs, seg, next) {
skb_mark_not_on_list(seg);
@@ -8190,7 +8192,7 @@ dma_error:
drop:
dev_kfree_skb_any(skb);
drop_nofree:
- tp->tx_dropped++;
+ tnapi->tx_dropped++;
return NETDEV_TX_OK;
}
@@ -9405,7 +9407,7 @@ static void __tg3_set_rx_mode(struct net_device *);
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, bool silent)
{
- int err;
+ int err, i;
tg3_stop_fw(tp);
@@ -9426,6 +9428,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
/* And make sure the next sample is new data */
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+ for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tnapi->rx_dropped = 0;
+ tnapi->tx_dropped = 0;
+ }
}
return err;
@@ -11975,6 +11984,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
{
struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+ int i;
stats->rx_packets = old_stats->rx_packets +
get_stat64(&hw_stats->rx_ucast_packets) +
@@ -12021,8 +12033,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
stats->rx_missed_errors = old_stats->rx_missed_errors +
get_stat64(&hw_stats->rx_discards);
- stats->rx_dropped = tp->rx_dropped;
- stats->tx_dropped = tp->tx_dropped;
+ /* Aggregate per-queue counters. The per-queue counters are updated
+ * by a single writer, race-free. The result computed by this loop
+ * might not be 100% accurate (counters can be updated in the middle of
+ * the loop) but the next tg3_get_nstats() will recompute the current
+ * value so it is acceptable.
+ *
+ * Note that these counters wrap around at 4G on 32bit machines.
+ */
+ rx_dropped = (unsigned long)(old_stats->rx_dropped);
+ tx_dropped = (unsigned long)(old_stats->tx_dropped);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ rx_dropped += tnapi->rx_dropped;
+ tx_dropped += tnapi->tx_dropped;
+ }
+
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
}
static int tg3_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index ae5c01bd1110..5016475e5005 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3018,6 +3018,7 @@ struct tg3_napi {
u16 *rx_rcb_prod_idx;
struct tg3_rx_prodring_set prodring;
struct tg3_rx_buffer_desc *rx_rcb;
+ unsigned long rx_dropped;
u32 tx_prod ____cacheline_aligned;
u32 tx_cons;
@@ -3026,6 +3027,7 @@ struct tg3_napi {
u32 prodmbox;
struct tg3_tx_buffer_desc *tx_ring;
struct tg3_tx_ring_info *tx_buffers;
+ unsigned long tx_dropped;
dma_addr_t status_mapping;
dma_addr_t rx_rcb_mapping;
@@ -3220,8 +3222,6 @@ struct tg3 {
/* begin "everything else" cacheline(s) section */
- unsigned long rx_dropped;
- unsigned long tx_dropped;
struct rtnl_link_stats64 net_stats_prev;
struct tg3_ethtool_stats estats_prev;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5423fe26b4ef..78287cfcbf63 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
.val = CONFIG0_MAXLEN_1536,
},
{
- .max_l3_len = 1542,
- .val = CONFIG0_MAXLEN_1542,
+ .max_l3_len = 1548,
+ .val = CONFIG0_MAXLEN_1548,
},
{
.max_l3_len = 9212,
@@ -1145,6 +1145,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
dma_addr_t mapping;
unsigned short mtu;
void *buffer;
+ int ret;
mtu = ETH_HLEN;
mtu += netdev->mtu;
@@ -1159,9 +1160,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
word3 |= mtu;
}
- if (skb->ip_summed != CHECKSUM_NONE) {
+ if (skb->len >= ETH_FRAME_LEN) {
+ /* Hardware offloaded checksumming isn't working on frames
+ * bigger than 1514 bytes. A hypothesis about this is that the
+ * checksum buffer is only 1518 bytes, so when the frames get
+ * bigger they get truncated, or the last few bytes get
+ * overwritten by the FCS.
+ *
+ * Just use software checksumming and bypass on bigger frames.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ret = skb_checksum_help(skb);
+ if (ret)
+ return ret;
+ }
+ word1 |= TSS_BYPASS_BIT;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
int tcp = 0;
+ /* We do not switch off the checksumming on non TCP/UDP
+ * frames: as is shown from tests, the checksumming engine
+ * is smart enough to see that a frame is not actually TCP
+ * or UDP and then just pass it through without any changes
+ * to the frame.
+ */
if (skb->protocol == htons(ETH_P_IP)) {
word1 |= TSS_IP_CHKSUM_BIT;
tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
@@ -1978,15 +2000,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static netdev_features_t gmac_fix_features(struct net_device *netdev,
- netdev_features_t features)
-{
- if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
- features &= ~GMAC_OFFLOAD_FEATURES;
-
- return features;
-}
-
static int gmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2212,7 +2225,6 @@ static const struct net_device_ops gmac_351x_ops = {
.ndo_set_mac_address = gmac_set_mac_address,
.ndo_get_stats64 = gmac_get_stats64,
.ndo_change_mtu = gmac_change_mtu,
- .ndo_fix_features = gmac_fix_features,
.ndo_set_features = gmac_set_features,
};
@@ -2464,11 +2476,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->hw_features = GMAC_OFFLOAD_FEATURES;
netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
- /* We can handle jumbo frames up to 10236 bytes so, let's accept
- * payloads of 10236 bytes minus VLAN and ethernet header
+ /* We can receive jumbo frames up to 10236 bytes but only
+ * transmit 2047 bytes so, let's accept payloads of 2047
+ * bytes minus VLAN and ethernet header
*/
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
+ netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
port->freeq_refill = 0;
netif_napi_add(netdev, &port->napi, gmac_napi_poll);
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
index 9fdf77d5eb37..24bb989981f2 100644
--- a/drivers/net/ethernet/cortina/gemini.h
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -502,7 +502,7 @@ union gmac_txdesc_3 {
#define SOF_BIT 0x80000000
#define EOF_BIT 0x40000000
#define EOFIE_BIT BIT(29)
-#define MTU_SIZE_BIT_MASK 0x1fff
+#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */
/* GMAC Tx Descriptor */
struct gmac_txdesc {
@@ -787,7 +787,7 @@ union gmac_config0 {
#define CONFIG0_MAXLEN_1536 0
#define CONFIG0_MAXLEN_1518 1
#define CONFIG0_MAXLEN_1522 2
-#define CONFIG0_MAXLEN_1542 3
+#define CONFIG0_MAXLEN_1548 3
#define CONFIG0_MAXLEN_9k 4 /* 9212 */
#define CONFIG0_MAXLEN_10k 5 /* 10236 */
#define CONFIG0_MAXLEN_1518__6 6
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 276f996f95dc..2d42e733837b 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -254,10 +254,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
if (block->tx) {
if (block->tx->q_num < priv->tx_cfg.num_queues)
reschedule |= gve_tx_poll(block, budget);
- else
+ else if (budget)
reschedule |= gve_xdp_poll(block, budget);
}
+ if (!budget)
+ return 0;
+
if (block->rx) {
work_done = gve_rx_poll(block, budget);
reschedule |= work_done == budget;
@@ -298,6 +301,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
if (block->tx)
reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ if (!budget)
+ return 0;
+
if (block->rx) {
work_done = gve_rx_poll_dqo(block, budget);
reschedule |= work_done == budget;
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index e84a066aa1a4..73655347902d 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -1007,10 +1007,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget)
feat = block->napi.dev->features;
- /* If budget is 0, do all the work */
- if (budget == 0)
- budget = INT_MAX;
-
if (budget > 0)
work_done = gve_clean_rx_done(rx, budget, feat);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 6957a865cff3..9f6ffc4a54f0 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -925,10 +925,6 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
bool repoll;
u32 to_do;
- /* If budget is 0, do all the work */
- if (budget == 0)
- budget = INT_MAX;
-
/* Find out how much work there is to be done */
nic_done = gve_tx_load_event_counter(priv, tx);
to_do = min_t(u32, (nic_done - tx->done), budget);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 0b138635bafa..c083d1d10767 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -503,11 +503,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
}
sprintf(result[j++], "%d", i);
- sprintf(result[j++], "%s", dim_state_str[dim->state]);
+ sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
+ dim_state_str[dim->state] : "unknown");
sprintf(result[j++], "%u", dim->profile_ix);
- sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
+ sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
+ dim_cqe_mode_str[dim->mode] : "unknown");
sprintf(result[j++], "%s",
- dim_tune_stat_str[dim->tune_state]);
+ dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
+ dim_tune_stat_str[dim->tune_state] : "unknown");
sprintf(result[j++], "%u", dim->steps_left);
sprintf(result[j++], "%u", dim->steps_right);
sprintf(result[j++], "%u", dim->tired);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 06117502001f..b618797a7e8d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -5139,7 +5139,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
struct hns3_nic_priv *priv = netdev_priv(netdev);
char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = priv->ae_handle;
- u8 mac_addr_temp[ETH_ALEN];
+ u8 mac_addr_temp[ETH_ALEN] = {0};
int ret = 0;
if (h->ae_algo->ops->get_mac_addr)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 66e5807903a0..5ea9e59569ef 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev);
static void hclge_update_fec_stats(struct hclge_dev *hdev);
static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
int wait_cnt);
+static int hclge_update_port_info(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -3041,6 +3042,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (state != hdev->hw.mac.link) {
hdev->hw.mac.link = state;
+ if (state == HCLGE_LINK_STATUS_UP)
+ hclge_update_port_info(hdev);
+
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
if (rclient && rclient->ops->link_status_change)
@@ -10025,8 +10029,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
- mutex_lock(&hdev->vport_lock);
-
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->vlan_id == vlan_id) {
if (is_write_tbl && vlan->hd_tbl_status)
@@ -10041,8 +10043,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
break;
}
}
-
- mutex_unlock(&hdev->vport_lock);
}
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
@@ -10451,11 +10451,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
* handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
+ mutex_lock(&hdev->vport_lock);
if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ mutex_unlock(&hdev->vport_lock);
return -EBUSY;
+ } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
}
+ mutex_unlock(&hdev->vport_lock);
/* when port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table
@@ -10470,17 +10475,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
}
if (!ret) {
- if (!is_kill)
+ if (!is_kill) {
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
- else if (is_kill && vlan_id != 0)
+ } else if (is_kill && vlan_id != 0) {
+ mutex_lock(&hdev->vport_lock);
hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ mutex_unlock(&hdev->vport_lock);
+ }
} else if (is_kill) {
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack
*/
+ mutex_lock(&hdev->vport_lock);
set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ mutex_unlock(&hdev->vport_lock);
}
hclge_set_vport_vlan_fltr_change(vport);
@@ -10520,6 +10530,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
int i, ret, sync_cnt = 0;
u16 vlan_id;
+ mutex_lock(&hdev->vport_lock);
/* start from vport 1 for PF is always alive */
for (i = 0; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
@@ -10530,21 +10541,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vlan_id,
true);
- if (ret && ret != -EINVAL)
+ if (ret && ret != -EINVAL) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
hclge_rm_vport_vlan_table(vport, vlan_id, false);
hclge_set_vport_vlan_fltr_change(vport);
sync_cnt++;
- if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
+ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
VLAN_N_VID);
}
}
+ mutex_unlock(&hdev->vport_lock);
hclge_sync_vlan_fltr_state(hdev);
}
@@ -11651,6 +11667,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_msi_irq_uninit;
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
if (hnae3_dev_phy_imp_supported(hdev))
ret = hclge_update_tp_port_info(hdev);
else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a4d68fb216fb..0aa9beefd1c7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1206,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY;
+ } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
}
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
@@ -1233,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
int ret, sync_cnt = 0;
u16 vlan_id;
+ if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
+ return;
+
+ rtnl_lock();
vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
while (vlan_id != VLAN_N_VID) {
ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
vlan_id, true);
if (ret)
- return;
+ break;
clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
sync_cnt++;
if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
- return;
+ break;
vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
}
+ rtnl_unlock();
}
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
@@ -1974,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
return HCLGEVF_VECTOR0_EVENT_OTHER;
}
+static void hclgevf_reset_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
+
+ hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
+ hclgevf_reset_task_schedule(hdev);
+}
+
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
+#define HCLGEVF_RESET_DELAY 5
+
enum hclgevf_evt_cause event_cause;
struct hclgevf_dev *hdev = data;
u32 clearval;
@@ -1987,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
switch (event_cause) {
case HCLGEVF_VECTOR0_EVENT_RST:
- hclgevf_reset_task_schedule(hdev);
+ mod_timer(&hdev->reset_timer,
+ jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
break;
case HCLGEVF_VECTOR0_EVENT_MBX:
hclgevf_mbx_handler(hdev);
@@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
HCLGEVF_DRIVER_NAME);
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 81c16b8c8da2..a73f2bf3a56a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -219,6 +219,7 @@ struct hclgevf_dev {
enum hnae3_reset_type reset_level;
unsigned long reset_pending;
enum hnae3_reset_type reset_type;
+ struct timer_list reset_timer;
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index bbf7b14079de..85c2a634c8f9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
i++;
}
+ /* ensure additional_info will be seen after received_resp */
+ smp_rmb();
+
if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev,
"VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
@@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
resp->resp_status = hclgevf_resp_to_errno(resp_status);
memcpy(resp->additional_info, req->msg.resp_data,
HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
+
+ /* ensure additional_info will be seen before setting received_resp */
+ smp_wmb();
+
if (match_id) {
/* If match_id is not zero, it means PF support match_id.
* if the match_id is right, VF get the right response, or
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 08d7edccfb8d..3f99eb198245 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3844,7 +3844,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
int aq_ret = 0;
- int i, ret;
+ int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = -EINVAL;
@@ -3868,8 +3868,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
- if (!cfilter)
- return -ENOMEM;
+ if (!cfilter) {
+ aq_ret = -ENOMEM;
+ goto err_out;
+ }
/* parse destination mac address */
for (i = 0; i < ETH_ALEN; i++)
@@ -3917,13 +3919,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
/* Adding cloud filter programmed as TC filter */
if (tcf.dst_port)
- ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
else
- ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
- if (ret) {
+ aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+ if (aq_ret) {
dev_err(&pf->pdev->dev,
"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
- vf->vf_id, ERR_PTR(ret),
+ vf->vf_id, ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err_free;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index cfb1580f5850..8b7504a9df31 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1479,14 +1479,14 @@ ice_post_dwnld_pkg_actions(struct ice_hw *hw)
}
/**
- * ice_download_pkg
+ * ice_download_pkg_with_sig_seg
* @hw: pointer to the hardware structure
* @pkg_hdr: pointer to package header
*
* Handles the download of a complete package.
*/
static enum ice_ddp_state
-ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
enum ice_aq_err aq_err = hw->adminq.sq_last_status;
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
@@ -1519,6 +1519,103 @@ ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
state = ice_post_dwnld_pkg_actions(hw);
ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware.
+ */
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_ddp_state state;
+ struct ice_buf_hdr *bh;
+ int status;
+
+ if (!bufs || !count)
+ return ICE_DDP_PKG_ERR;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_DDP_PKG_SUCCESS;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == -EALREADY)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
+ }
+
+ state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
+ if (!state)
+ state = ice_post_dwnld_pkg_actions(hw);
+
+ ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_download_pkg_without_sig_seg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package without signature segment.
+ */
+static enum ice_ddp_state
+ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_buf_table *ice_buf_tbl;
+
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+ le32_to_cpu(ice_seg->hdr.seg_type),
+ le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
+
+ ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ le32_to_cpu(ice_buf_tbl->buf_count));
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_ddp_state
+ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ struct ice_seg *ice_seg)
+{
+ enum ice_ddp_state state;
+
+ if (hw->pkg_has_signing_seg)
+ state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
+ else
+ state = ice_download_pkg_without_sig_seg(hw, ice_seg);
+
ice_post_pkg_dwnld_vlan_mode_cfg(hw);
return state;
@@ -2083,7 +2180,7 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
/* initialize package hints and then download package */
ice_init_pkg_hints(hw, seg);
- state = ice_download_pkg(hw, pkg);
+ state = ice_download_pkg(hw, pkg, seg);
if (state == ICE_DDP_PKG_ALREADY_LOADED) {
ice_debug(hw, ICE_DBG_INIT,
"package previously loaded - no work.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 835c419ccc74..86b180cb32a0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -815,12 +815,6 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
- if (prio > ICE_DPLL_PRIO_MAX) {
- NL_SET_ERR_MSG_FMT(extack, "prio out of supported range 0-%d",
- ICE_DPLL_PRIO_MAX);
- return -EINVAL;
- }
-
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
mutex_unlock(&pf->dplls.lock);
@@ -1756,6 +1750,7 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
}
d->pf = pf;
if (cgu) {
+ ice_dpll_update_state(pf, d, true);
ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
if (ret) {
dpll_device_put(d->dpll);
@@ -1796,8 +1791,6 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
struct ice_dplls *d = &pf->dplls;
struct kthread_worker *kworker;
- ice_dpll_update_state(pf, &d->eec, true);
- ice_dpll_update_state(pf, &d->pps, true);
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
kworker = kthread_create_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf)));
@@ -1830,6 +1823,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
int num_pins, i, ret = -EINVAL;
struct ice_hw *hw = &pf->hw;
struct ice_dpll_pin *pins;
+ unsigned long caps;
u8 freq_supp_num;
bool input;
@@ -1849,6 +1843,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
}
for (i = 0; i < num_pins; i++) {
+ caps = 0;
pins[i].idx = i;
pins[i].prop.board_label = ice_cgu_get_pin_name(hw, i, input);
pins[i].prop.type = ice_cgu_get_pin_type(hw, i, input);
@@ -1861,8 +1856,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
&dp->input_prio[i]);
if (ret)
return ret;
- pins[i].prop.capabilities |=
- DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE;
+ caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
pins[i].prop.phase_range.min =
pf->dplls.input_phase_adj_max;
pins[i].prop.phase_range.max =
@@ -1872,9 +1867,11 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
pf->dplls.output_phase_adj_max;
pins[i].prop.phase_range.max =
-pf->dplls.output_phase_adj_max;
+ ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
+ if (ret)
+ return ret;
}
- pins[i].prop.capabilities |=
- DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ pins[i].prop.capabilities = caps;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index bb32b6d88373..93172e93995b 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -6,7 +6,6 @@
#include "ice.h"
-#define ICE_DPLL_PRIO_MAX 0xF
#define ICE_DPLL_RCLK_NUM_MAX 4
/** ice_dpll_pin - store info about pins
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 6607fa6fe556..fb9c93f37e84 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -7401,15 +7401,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- /* configure PTP timestamping after VSI rebuild */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) {
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
- ice_ptp_cfg_timestamp(pf, false);
- else if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL)
- /* for E82x PHC owner always need to have interrupts */
- ice_ptp_cfg_timestamp(pf, true);
- }
-
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
if (err) {
dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
@@ -7461,6 +7452,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
+
+ /* Restore timestamp mode settings after VSI rebuild */
+ ice_ptp_restore_timestamp_mode(pf);
return;
err_vsi_rebuild:
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 1eddcbe89b0c..71f405f8a6fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -256,48 +256,42 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
}
/**
- * ice_ptp_configure_tx_tstamp - Enable or disable Tx timestamp interrupt
- * @pf: The PF pointer to search in
- * @on: bool value for whether timestamp interrupt is enabled or disabled
+ * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
+ * @pf: Board private structure
+ *
+ * Program the device to respond appropriately to the Tx timestamp interrupt
+ * cause.
*/
-static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on)
+static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
{
+ struct ice_hw *hw = &pf->hw;
+ bool enable;
u32 val;
+ switch (pf->ptp.tx_interrupt_mode) {
+ case ICE_PTP_TX_INTERRUPT_ALL:
+ /* React to interrupts across all quads. */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
+ enable = true;
+ break;
+ case ICE_PTP_TX_INTERRUPT_NONE:
+ /* Do not react to interrupts on any quad. */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
+ enable = false;
+ break;
+ case ICE_PTP_TX_INTERRUPT_SELF:
+ default:
+ enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
+ break;
+ }
+
/* Configure the Tx timestamp interrupt */
- val = rd32(&pf->hw, PFINT_OICR_ENA);
- if (on)
+ val = rd32(hw, PFINT_OICR_ENA);
+ if (enable)
val |= PFINT_OICR_TSYN_TX_M;
else
val &= ~PFINT_OICR_TSYN_TX_M;
- wr32(&pf->hw, PFINT_OICR_ENA, val);
-}
-
-/**
- * ice_set_tx_tstamp - Enable or disable Tx timestamping
- * @pf: The PF pointer to search in
- * @on: bool value for whether timestamps are enabled or disabled
- */
-static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
-{
- struct ice_vsi *vsi;
- u16 i;
-
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
- return;
-
- /* Set the timestamp enable flag for all the Tx rings */
- ice_for_each_txq(vsi, i) {
- if (!vsi->tx_rings[i])
- continue;
- vsi->tx_rings[i]->ptp_tx = on;
- }
-
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
- ice_ptp_configure_tx_tstamp(pf, on);
-
- pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ wr32(hw, PFINT_OICR_ENA, val);
}
/**
@@ -311,7 +305,7 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
u16 i;
vsi = ice_get_main_vsi(pf);
- if (!vsi)
+ if (!vsi || !vsi->rx_rings)
return;
/* Set the timestamp flag for all the Rx rings */
@@ -320,23 +314,50 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
continue;
vsi->rx_rings[i]->ptp_rx = on;
}
+}
+
+/**
+ * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
+ * @pf: Board private structure
+ *
+ * Called during preparation for reset to temporarily disable timestamping on
+ * the device. Called during remove to disable timestamping while cleaning up
+ * driver resources.
+ */
+static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ val = rd32(hw, PFINT_OICR_ENA);
+ val &= ~PFINT_OICR_TSYN_TX_M;
+ wr32(hw, PFINT_OICR_ENA, val);
- pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
- HWTSTAMP_FILTER_NONE;
+ ice_set_rx_tstamp(pf, false);
}
/**
- * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit
+ * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
* @pf: Board private structure
- * @ena: bool value to enable or disable time stamp
*
- * This function will configure timestamping during PTP initialization
- * and deinitialization
+ * Called at the end of rebuild to restore timestamp configuration after
+ * a device reset.
*/
-void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
+void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
{
- ice_set_tx_tstamp(pf, ena);
- ice_set_rx_tstamp(pf, ena);
+ struct ice_hw *hw = &pf->hw;
+ bool enable_rx;
+
+ ice_ptp_cfg_tx_interrupt(pf);
+
+ enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
+ ice_set_rx_tstamp(pf, enable_rx);
+
+ /* Trigger an immediate software interrupt to ensure that timestamps
+ * which occurred during reset are handled now.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
}
/**
@@ -2037,10 +2058,10 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- ice_set_tx_tstamp(pf, false);
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
break;
case HWTSTAMP_TX_ON:
- ice_set_tx_tstamp(pf, true);
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
break;
default:
return -ERANGE;
@@ -2048,7 +2069,7 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- ice_set_rx_tstamp(pf, false);
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
@@ -2064,12 +2085,15 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
- ice_set_rx_tstamp(pf, true);
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
+ /* Immediately update the device timestamping mode */
+ ice_ptp_restore_timestamp_mode(pf);
+
return 0;
}
@@ -2737,7 +2761,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf)
clear_bit(ICE_FLAG_PTP, pf->flags);
/* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_disable_timestamp_mode(pf);
kthread_cancel_delayed_work_sync(&ptp->work);
@@ -2803,15 +2827,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) {
- /* The clock owner for this device type handles the timestamp
- * interrupt for all ports.
- */
- ice_ptp_configure_tx_tstamp(pf, true);
-
- /* React on all quads interrupts for E82x */
- wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
-
+ if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
err = ice_ptp_tx_ena_intr(pf, true, itr);
if (err)
@@ -2881,13 +2897,6 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
case ICE_PHY_E822:
- /* Non-owner PFs don't react to any interrupts on E82x,
- * neither on own quad nor on others
- */
- if (!ice_ptp_pf_handles_tx_interrupt(pf)) {
- ice_ptp_configure_tx_tstamp(pf, false);
- wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
- }
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
@@ -3032,6 +3041,9 @@ void ice_ptp_init(struct ice_pf *pf)
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
+ /* Configure initial Tx interrupt settings */
+ ice_ptp_cfg_tx_interrupt(pf);
+
set_bit(ICE_FLAG_PTP, pf->flags);
err = ice_ptp_init_work(pf, ptp);
if (err)
@@ -3067,7 +3079,7 @@ void ice_ptp_release(struct ice_pf *pf)
return;
/* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_disable_timestamp_mode(pf);
ice_ptp_remove_auxbus_device(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 8f6f94392756..06a330867fc9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -292,7 +292,7 @@ int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
-void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
+void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
@@ -317,8 +317,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
return -EOPNOTSUPP;
}
-static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
-
+static inline void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { }
static inline void ice_ptp_extts_event(struct ice_pf *pf) { }
static inline s8
ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 6d573908de7a..a00b55e14aac 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -3961,3 +3961,57 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
return ret;
}
+
+/**
+ * ice_cgu_get_output_pin_state_caps - get output pin state capabilities
+ * @hw: pointer to the hw struct
+ * @pin_id: id of a pin
+ * @caps: capabilities to modify
+ *
+ * Return:
+ * * 0 - success, state capabilities were modified
+ * * negative - failure, capabilities were not modified
+ */
+int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
+ unsigned long *caps)
+{
+ bool can_change = true;
+
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3)
+ can_change = false;
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4)
+ can_change = false;
+ break;
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 &&
+ pin_id == ZL_OUT2)
+ can_change = false;
+ else if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 &&
+ pin_id == SI_OUT1)
+ can_change = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (can_change)
+ *caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ else
+ *caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 36aeeef99ec0..cf76701566c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -282,6 +282,8 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
void ice_ptp_init_phy_model(struct ice_hw *hw);
+int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
+ unsigned long *caps);
#define PFTSYN_SEM_BYTES 4
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 52d0a126eb61..9e97ea863068 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2306,9 +2306,6 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
return;
- if (!tx_ring->ptp_tx)
- return;
-
/* Tx timestamps cannot be sampled when doing TSO */
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 166413fc33f4..daf7b9dbb143 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -380,7 +380,6 @@ struct ice_tx_ring {
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
- u8 ptp_tx;
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 90817136808d..29aac327574d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4790,14 +4790,17 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
if (sset == ETH_SS_STATS) {
+ struct mvneta_port *pp = netdev_priv(netdev);
int i;
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
memcpy(data + i * ETH_GSTRING_LEN,
mvneta_statistics[i].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
- page_pool_ethtool_stats_get_strings(data);
+ if (!pp->bm_priv) {
+ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+ page_pool_ethtool_stats_get_strings(data);
+ }
}
}
@@ -4915,8 +4918,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
struct page_pool_stats stats = {};
int i;
- for (i = 0; i < rxq_number; i++)
- page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
+ for (i = 0; i < rxq_number; i++) {
+ if (pp->rxqs[i].page_pool)
+ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
+ }
page_pool_ethtool_stats_get(data, &stats);
}
@@ -4932,14 +4937,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
*data++ = pp->ethtool_stats[i];
- mvneta_ethtool_pp_stats(pp, data);
+ if (!pp->bm_priv)
+ mvneta_ethtool_pp_stats(pp, data);
}
static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{
- if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(mvneta_statistics) +
- page_pool_ethtool_stats_get_count();
+ if (sset == ETH_SS_STATS) {
+ int count = ARRAY_SIZE(mvneta_statistics);
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (!pp->bm_priv)
+ count += page_pool_ethtool_stats_get_count();
+
+ return count;
+ }
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 4762dbea64a1..97a71e9b8563 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -1088,6 +1088,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
struct ethhdr *eth_hdr;
bool new = false;
int err = 0;
+ u64 vf_num;
u32 ring;
if (!flow_cfg->max_flows) {
@@ -1100,7 +1101,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
return -ENOMEM;
- if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ /* Number of queues on a VF can be greater or less than
+ * the PF's queue. Hence no need to check for the
+ * queue count. Hence no need to check queue count if PF
+ * is installing for its VF. Below is the expected vf_num value
+ * based on the ethtool commands.
+ *
+ * e.g.
+ * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
+ * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
+ * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
+ * vf_num:vf_idx+1
+ */
+ vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+ if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
+ ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
if (fsp->location >= otx2_get_maxflows(flow_cfg))
@@ -1182,6 +1197,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
flow_cfg->nr_flows++;
}
+ if (flow->is_vf)
+ netdev_info(pfvf->netdev,
+ "Make sure that VF's queue number is within its queue limit\n");
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 91b99fd70361..ba95ac913274 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1934,6 +1934,8 @@ int otx2_stop(struct net_device *netdev)
/* Clear RSS enable flag */
rss = &pf->hw.rss_info;
rss->enable = false;
+ if (!netif_is_rxfh_configured(netdev))
+ kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
/* Cleanup Queue IRQ */
vec = pci_irq_vector(pf->pdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index bb11e644d24f..af3928eddafd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -177,6 +177,8 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
+ u8 *md_buff,
+ u8 *md_buff_sz,
int budget)
{
struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
@@ -211,19 +213,24 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
- mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
+ md_buff[*md_buff_sz++] = metadata_id;
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
}
-static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
- struct mlx5_cqwq *cqwq = &cq->wq;
+ int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
+ u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
+ u8 metadata_buff_sz = 0;
+ struct mlx5_cqwq *cqwq;
struct mlx5_cqe64 *cqe;
int work_done = 0;
+ cqwq = &cq->wq;
+
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
return false;
@@ -234,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
do {
mlx5_cqwq_pop(cqwq);
- mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
+ mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
+ metadata_buff, &metadata_buff_sz, napi_budget);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
mlx5_cqwq_update_db_record(cqwq);
@@ -242,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
/* ensure cq space is freed before enabling more cqes */
wmb();
+ while (metadata_buff_sz > 0)
+ mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
+ metadata_buff[--metadata_buff_sz]);
+
mlx5e_txqsq_wake(&ptpsq->txqsq);
return work_done == budget;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index fea8c0a5fe89..4358798d6ce1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -492,11 +492,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
{
- char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
struct mlx5e_icosq *icosq = rq->icosq;
struct mlx5e_priv *priv = rq->priv;
struct mlx5e_err_ctx err_ctx = {};
+ char icosq_str[32] = {};
err_ctx.ctx = rq;
err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
@@ -505,7 +505,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
if (icosq)
snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
snprintf(err_str, sizeof(err_str),
- "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
+ "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 00a04fdd756f..668da5c70e63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -300,9 +300,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (err)
goto destroy_neigh_entry;
- e->encap_size = ipv4_encap_size;
- e->encap_header = encap_header;
-
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
@@ -322,6 +319,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
goto destroy_neigh_entry;
}
+ e->encap_size = ipv4_encap_size;
+ e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv4_put(&attr);
@@ -404,16 +403,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
if (err)
goto free_encap;
- e->encap_size = ipv4_encap_size;
- kfree(e->encap_header);
- e->encap_header = encap_header;
-
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
- goto release_neigh;
+ goto free_encap;
}
memset(&reformat_params, 0, sizeof(reformat_params));
@@ -427,6 +422,10 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
goto free_encap;
}
+ e->encap_size = ipv4_encap_size;
+ kfree(e->encap_header);
+ e->encap_header = encap_header;
+
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv4_put(&attr);
@@ -568,9 +567,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (err)
goto destroy_neigh_entry;
- e->encap_size = ipv6_encap_size;
- e->encap_header = encap_header;
-
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
@@ -590,6 +586,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto destroy_neigh_entry;
}
+ e->encap_size = ipv6_encap_size;
+ e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv6_put(&attr);
@@ -671,16 +669,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
if (err)
goto free_encap;
- e->encap_size = ipv6_encap_size;
- kfree(e->encap_header);
- e->encap_header = encap_header;
-
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
- goto release_neigh;
+ goto free_encap;
}
memset(&reformat_params, 0, sizeof(reformat_params));
@@ -694,6 +688,10 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
goto free_encap;
}
+ e->encap_size = ipv6_encap_size;
+ kfree(e->encap_header);
+ e->encap_header = encap_header;
+
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv6_put(&attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 215261a69255..792a0ea544cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ int count;
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%04d (%.16s)",
- fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
- mdev->board_id);
+ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
+ if (count == sizeof(drvinfo->fw_version))
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev));
+
strscpy(drvinfo->bus_info, dev_name(mdev->device),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 693e55b010d9..3ab682bbcf86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ int count;
strscpy(drvinfo->driver, mlx5e_rep_driver_name,
sizeof(drvinfo->driver));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%04d (%.16s)",
- fw_rev_maj(mdev), fw_rev_min(mdev),
- fw_rev_sub(mdev), mdev->board_id);
+ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
+ if (count == sizeof(drvinfo->fw_version))
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev));
}
static const struct counter_desc sw_rep_stats_desc[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9a5a5c2c7da9..7ca9e5b86778 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3147,7 +3147,7 @@ static struct mlx5_fields fields[] = {
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
- OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
+ OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp),
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
@@ -3158,21 +3158,31 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
-static unsigned long mask_to_le(unsigned long mask, int size)
+static u32 mask_field_get(void *mask, struct mlx5_fields *f)
{
- __be32 mask_be32;
- __be16 mask_be16;
-
- if (size == 32) {
- mask_be32 = (__force __be32)(mask);
- mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
- } else if (size == 16) {
- mask_be32 = (__force __be32)(mask);
- mask_be16 = *(__be16 *)&mask_be32;
- mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
+ switch (f->field_bsize) {
+ case 32:
+ return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
+ case 16:
+ return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
+ default:
+ return *(u8 *)mask & (u8)f->field_mask;
}
+}
- return mask;
+static void mask_field_clear(void *mask, struct mlx5_fields *f)
+{
+ switch (f->field_bsize) {
+ case 32:
+ *(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
+ break;
+ case 16:
+ *(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
+ break;
+ default:
+ *(u8 *)mask &= ~(u8)f->field_mask;
+ break;
+ }
}
static int offload_pedit_fields(struct mlx5e_priv *priv,
@@ -3184,11 +3194,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
struct pedit_headers_action *hdrs = parse_attr->hdrs;
void *headers_c, *headers_v, *action, *vals_p;
- u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
- unsigned long mask, field_mask;
+ void *s_masks_p, *a_masks_p;
int i, first, last, next_z;
struct mlx5_fields *f;
+ unsigned long mask;
+ u32 s_mask, a_mask;
u8 cmd;
mod_acts = &parse_attr->mod_hdr_acts;
@@ -3204,15 +3215,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
bool skip;
f = &fields[i];
- /* avoid seeing bits set from previous iterations */
- s_mask = 0;
- a_mask = 0;
-
s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset;
- s_mask = *s_masks_p & f->field_mask;
- a_mask = *a_masks_p & f->field_mask;
+ s_mask = mask_field_get(s_masks_p, f);
+ a_mask = mask_field_get(a_masks_p, f);
if (!s_mask && !a_mask) /* nothing to offload here */
continue;
@@ -3239,22 +3246,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
match_mask, f->field_bsize))
skip = true;
/* clear to denote we consumed this field */
- *s_masks_p &= ~f->field_mask;
+ mask_field_clear(s_masks_p, f);
} else {
cmd = MLX5_ACTION_TYPE_ADD;
mask = a_mask;
vals_p = (void *)add_vals + f->offset;
/* add 0 is no change */
- if ((*(u32 *)vals_p & f->field_mask) == 0)
+ if (!mask_field_get(vals_p, f))
skip = true;
/* clear to denote we consumed this field */
- *a_masks_p &= ~f->field_mask;
+ mask_field_clear(a_masks_p, f);
}
if (skip)
continue;
- mask = mask_to_le(mask, f->field_bsize);
-
first = find_first_bit(&mask, f->field_bsize);
next_z = find_next_zero_bit(&mask, f->field_bsize, first);
last = find_last_bit(&mask, f->field_bsize);
@@ -3281,10 +3286,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) {
+ unsigned long field_mask = f->field_mask;
int start;
- field_mask = mask_to_le(f->field_mask, f->field_bsize);
-
/* if field is bit sized it can start not from first bit */
start = find_first_bit(&field_mask, f->field_bsize);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index d41435c22ce5..f0b506e562df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -399,9 +399,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
mlx5e_skb_cb_hwtstamp_init(skb);
- mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
+ mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
netif_tx_stop_queue(sq->txq);
@@ -494,10 +494,10 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
- dev_kfree_skb_any(skb);
if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
be32_to_cpu(eseg->flow_table_metadata));
+ dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ea0405e0a43f..40a6cb052a2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -885,11 +885,14 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_irq *irq;
+ int cpu;
irq = xa_load(&table->comp_irqs, vecidx);
if (!irq)
return;
+ cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
+ cpumask_clear_cpu(cpu, &table->used_cpus);
xa_erase(&table->comp_irqs, vecidx);
mlx5_irq_affinity_irq_release(dev, irq);
}
@@ -897,16 +900,26 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
- irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
- if (IS_ERR(irq)) {
- /* In case SF irq pool does not exist, fallback to the PF irqs*/
- if (PTR_ERR(irq) == -ENOENT)
- return comp_irq_request_pci(dev, vecidx);
+ /* In case SF irq pool does not exist, fallback to the PF irqs*/
+ if (!mlx5_irq_pool_is_sf_pool(pool))
+ return comp_irq_request_pci(dev, vecidx);
+ af_desc.is_managed = 1;
+ cpumask_copy(&af_desc.mask, cpu_online_mask);
+ cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
+ irq = mlx5_irq_affinity_request(pool, &af_desc);
+ if (IS_ERR(irq))
return PTR_ERR(irq);
- }
+
+ cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
+ mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
+ pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
+ cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
+ mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b296ac52a439..88236e75fd90 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -984,7 +984,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
+ if (rep->vport == MLX5_VPORT_UPLINK &&
+ on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 047d5fed5f89..612e666ec263 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -168,45 +168,3 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
if (pool->irqs_per_cpu)
cpu_put(pool, cpu);
}
-
-/**
- * mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
- * @dev: mlx5 device that is requesting the IRQ.
- * @used_cpus: cpumask of bounded cpus by the device
- * @vecidx: vector index to request an IRQ for.
- *
- * Each IRQ is bounded to at most 1 CPU.
- * This function is requesting an IRQ according to the default assignment.
- * The default assignment policy is:
- * - request the least loaded IRQ which is not bound to any
- * CPU of the previous IRQs requested.
- *
- * On success, this function updates used_cpus mask and returns an irq pointer.
- * In case of an error, an appropriate error pointer is returned.
- */
-struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
- struct cpumask *used_cpus, u16 vecidx)
-{
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
- struct irq_affinity_desc af_desc = {};
- struct mlx5_irq *irq;
-
- if (!mlx5_irq_pool_is_sf_pool(pool))
- return ERR_PTR(-ENOENT);
-
- af_desc.is_managed = 1;
- cpumask_copy(&af_desc.mask, cpu_online_mask);
- cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
- irq = mlx5_irq_affinity_request(pool, &af_desc);
-
- if (IS_ERR(irq))
- return irq;
-
- cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
- mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
- pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
- cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
- mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
-
- return irq;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index aa29f09e8356..0c83ef174275 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -384,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
- return mlx5_ptp_adjtime(ptp, delta);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
+
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
+ return mlx5_ptp_adjtime_real_time(mdev, delta);
}
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 653648216730..4dcf995cb1a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -28,7 +28,7 @@
struct mlx5_irq {
struct atomic_notifier_head nh;
cpumask_var_t mask;
- char name[MLX5_MAX_IRQ_NAME];
+ char name[MLX5_MAX_IRQ_FORMATTED_NAME];
struct mlx5_irq_pool *pool;
int refcount;
struct msi_map map;
@@ -292,8 +292,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
else
irq_sf_set_name(pool, name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
- snprintf(irq->name, MLX5_MAX_IRQ_NAME,
- "%s@pci:%s", name, pci_name(dev->pdev));
+ snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
+ MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
&irq->nh);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
index d3a77a0ab848..c4d377f8df30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -7,6 +7,9 @@
#include <linux/mlx5/driver.h>
#define MLX5_MAX_IRQ_NAME (32)
+#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
+#define MLX5_MAX_IRQ_FORMATTED_NAME \
+ (MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
/* max irq_index is 2047, so four chars */
#define MLX5_MAX_IRQ_IDX_CHARS (4)
#define MLX5_EQ_REFS_PER_IRQ (2)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 6ea88a581804..e3ec559369fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -57,7 +57,8 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
{
- return (MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
+ return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX ||
+ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 4e8527a724f5..6fa06ba2d346 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -52,7 +52,6 @@ struct dr_qp_init_attr {
u32 cqn;
u32 pdn;
u32 max_send_wr;
- u32 max_send_sge;
struct mlx5_uars_page *uar;
u8 isolate_vl_tc:1;
};
@@ -247,37 +246,6 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
return err == CQ_POLL_ERR ? err : npolled;
}
-static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr)
-{
- return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_flow_update_ctrl_seg) +
- sizeof(struct mlx5_wqe_header_modify_argument_update_seg));
-}
-
-/* We calculate for specific RC QP with the required functionality */
-static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr)
-{
- int update_arg_size;
- int inl_size = 0;
- int tot_size;
- int size;
-
- update_arg_size = dr_qp_get_args_update_send_wqe_size(attr);
-
- size = sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg);
- inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) +
- DR_STE_SIZE, 16);
-
- size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg);
-
- size = max(size, update_arg_size);
-
- tot_size = max(size, inl_size);
-
- return ALIGN(tot_size, MLX5_SEND_WQE_BB);
-}
-
static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
struct dr_qp_init_attr *attr)
{
@@ -285,7 +253,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
struct mlx5_wq_param wqp;
struct mlx5dr_qp *dr_qp;
- int wqe_size;
int inlen;
void *qpc;
void *in;
@@ -365,15 +332,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
if (err)
goto err_in;
dr_qp->uar = attr->uar;
- wqe_size = dr_qp_calc_rc_send_wqe(attr);
- dr_qp->max_inline_data = min(wqe_size -
- (sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_inline_seg)),
- (2 * MLX5_SEND_WQE_BB -
- (sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_inline_seg))));
return dr_qp;
@@ -437,48 +395,8 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
MLX5_SEND_WQE_DS;
}
-static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp,
- struct dr_data_seg *data_seg, void *wqe)
-{
- int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_inline_seg);
- struct mlx5_wqe_inline_seg *seg;
- int left_space;
- int inl = 0;
- void *addr;
- int len;
- int idx;
-
- seg = wqe;
- wqe += sizeof(*seg);
- addr = (void *)(unsigned long)(data_seg->addr);
- len = data_seg->length;
- inl += len;
- left_space = MLX5_SEND_WQE_BB - inline_header_size;
-
- if (likely(len > left_space)) {
- memcpy(wqe, addr, left_space);
- len -= left_space;
- addr += left_space;
- idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1);
- wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
- }
-
- memcpy(wqe, addr, len);
-
- if (likely(inl)) {
- seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
- return DIV_ROUND_UP(inl + sizeof(seg->byte_count),
- MLX5_SEND_WQE_DS);
- } else {
- return 0;
- }
-}
-
static void
-dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
- struct mlx5_wqe_ctrl_seg *wq_ctrl,
+dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
u64 remote_addr,
u32 rkey,
struct dr_data_seg *data_seg,
@@ -494,17 +412,15 @@ dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
wq_raddr->reserved = 0;
wq_dseg = (void *)(wq_raddr + 1);
- /* WQE ctrl segment + WQE remote addr segment */
- *size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS;
- if (data_seg->send_flags & IB_SEND_INLINE) {
- *size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg);
- } else {
- wq_dseg->byte_count = cpu_to_be32(data_seg->length);
- wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
- wq_dseg->addr = cpu_to_be64(data_seg->addr);
- *size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */
- }
+ wq_dseg->byte_count = cpu_to_be32(data_seg->length);
+ wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
+ wq_dseg->addr = cpu_to_be64(data_seg->addr);
+
+ *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
+ sizeof(*wq_dseg) + /* WQE data segment */
+ sizeof(*wq_raddr)) / /* WQE remote addr segment */
+ MLX5_SEND_WQE_DS;
}
static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
@@ -535,7 +451,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
switch (opcode) {
case MLX5_OPCODE_RDMA_READ:
case MLX5_OPCODE_RDMA_WRITE:
- dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr,
+ dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
rkey, data_seg, &size);
break;
case MLX5_OPCODE_FLOW_TBL_ACCESS:
@@ -656,7 +572,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->write.send_flags |= IB_SEND_SIGNALED;
else
- send_info->write.send_flags &= ~IB_SEND_SIGNALED;
+ send_info->write.send_flags = 0;
}
static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
@@ -680,13 +596,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
}
send_ring->pending_wqe++;
- if (!send_info->write.lkey)
- send_info->write.send_flags |= IB_SEND_INLINE;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->write.send_flags |= IB_SEND_SIGNALED;
- else
- send_info->write.send_flags &= ~IB_SEND_SIGNALED;
send_ring->pending_wqe++;
send_info->read.length = send_info->write.length;
@@ -696,9 +608,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
send_info->read.lkey = send_ring->sync_mr->mkey;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
- send_info->read.send_flags |= IB_SEND_SIGNALED;
+ send_info->read.send_flags = IB_SEND_SIGNALED;
else
- send_info->read.send_flags &= ~IB_SEND_SIGNALED;
+ send_info->read.send_flags = 0;
}
static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
@@ -1345,7 +1257,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
dmn->send_ring->cq->qp = dmn->send_ring->qp;
dmn->info.max_send_wr = QUEUE_SIZE;
- init_attr.max_send_sge = 1;
dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
DR_STE_SIZE);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0c76c162b8a9..295366a85c63 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -624,6 +624,7 @@ struct rtl8169_private {
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
+ unsigned dash_enabled:1;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
@@ -1253,14 +1254,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
return r8168ep_ocp_read(tp, 0x128) & BIT(0);
}
-static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
+static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
+{
+ switch (tp->dash_type) {
+ case RTL_DASH_DP:
+ return r8168dp_check_dash(tp);
+ case RTL_DASH_EP:
+ return r8168ep_check_dash(tp);
+ default:
+ return false;
+ }
+}
+
+static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
- return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
+ return RTL_DASH_DP;
case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
- return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
+ return RTL_DASH_EP;
default:
return RTL_DASH_NONE;
}
@@ -1453,7 +1466,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
- if (tp->dash_type == RTL_DASH_NONE) {
+ if (!tp->dash_enabled) {
rtl_set_d3_pll_down(tp, !wolopts);
tp->dev->wol_enabled = wolopts ? 1 : 0;
}
@@ -2512,7 +2525,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
static void rtl_prepare_power_down(struct rtl8169_private *tp)
{
- if (tp->dash_type != RTL_DASH_NONE)
+ if (tp->dash_enabled)
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
@@ -2586,9 +2599,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
rx_mode &= ~AcceptMulticast;
} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
dev->flags & IFF_ALLMULTI ||
- tp->mac_version == RTL_GIGA_MAC_VER_35 ||
- tp->mac_version == RTL_GIGA_MAC_VER_46 ||
- tp->mac_version == RTL_GIGA_MAC_VER_48) {
+ tp->mac_version == RTL_GIGA_MAC_VER_35) {
/* accept all multicasts */
} else if (netdev_mc_empty(dev)) {
rx_mode &= ~AcceptMulticast;
@@ -4648,10 +4659,16 @@ static void rtl8169_down(struct rtl8169_private *tp)
rtl8169_cleanup(tp);
rtl_disable_exit_l1(tp);
rtl_prepare_power_down(tp);
+
+ if (tp->dash_type != RTL_DASH_NONE)
+ rtl8168_driver_stop(tp);
}
static void rtl8169_up(struct rtl8169_private *tp)
{
+ if (tp->dash_type != RTL_DASH_NONE)
+ rtl8168_driver_start(tp);
+
pci_set_master(tp->pci_dev);
phy_init_hw(tp->phydev);
phy_resume(tp->phydev);
@@ -4869,7 +4886,7 @@ static int rtl8169_runtime_idle(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
- if (tp->dash_type != RTL_DASH_NONE)
+ if (tp->dash_enabled)
return -EBUSY;
if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
@@ -4895,8 +4912,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
- if (system_state == SYSTEM_POWER_OFF &&
- tp->dash_type == RTL_DASH_NONE) {
+ if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
pci_wake_from_d3(pdev, tp->saved_wolopts);
pci_set_power_state(pdev, PCI_D3hot);
}
@@ -5254,7 +5270,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
tp->aspm_manageable = !rc;
- tp->dash_type = rtl_check_dash(tp);
+ tp->dash_type = rtl_get_dash_type(tp);
+ tp->dash_enabled = rtl_dash_is_enabled(tp);
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
@@ -5325,7 +5342,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure chip for default features */
rtl8169_set_features(dev, dev->features);
- if (tp->dash_type == RTL_DASH_NONE) {
+ if (!tp->dash_enabled) {
rtl_set_d3_pll_down(tp, true);
} else {
rtl_set_d3_pll_down(tp, false);
@@ -5365,7 +5382,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"ok" : "ko");
if (tp->dash_type != RTL_DASH_NONE) {
- netdev_info(dev, "DASH enabled\n");
+ netdev_info(dev, "DASH %s\n",
+ tp->dash_enabled ? "enabled" : "disabled");
rtl8168_driver_start(tp);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index a2b9e289aa36..85dcda51df05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -280,7 +280,7 @@ config DWMAC_INTEL
config DWMAC_LOONGSON
tristate "Loongson PCI DWMAC support"
default MACH_LOONGSON64
- depends on STMMAC_ETH && PCI
+ depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI
depends on COMMON_CLK
help
This selects the LOONGSON PCI bus support for the stmmac driver,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3e50fd53a617..2afb2bd25977 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -5293,6 +5293,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -5328,10 +5329,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
len = 0;
}
+read_again:
if (count >= limit)
break;
-read_again:
buf1_len = 0;
buf2_len = 0;
entry = next_entry;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 6c4b64227ac8..411898a4f38c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -2063,7 +2063,7 @@ static int prueth_probe(struct platform_device *pdev)
&prueth->shram);
if (ret) {
dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
- pruss_put(prueth->pruss);
+ goto put_pruss;
}
prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
@@ -2105,10 +2105,7 @@ static int prueth_probe(struct platform_device *pdev)
prueth->iep1 = icss_iep_get_idx(np, 1);
if (IS_ERR(prueth->iep1)) {
ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
- icss_iep_put(prueth->iep0);
- prueth->iep0 = NULL;
- prueth->iep1 = NULL;
- goto free_pool;
+ goto put_iep0;
}
if (prueth->pdata.quirk_10m_link_issue) {
@@ -2205,6 +2202,12 @@ netdev_exit:
exit_iep:
if (prueth->pdata.quirk_10m_link_issue)
icss_iep_exit_fw(prueth->iep1);
+ icss_iep_put(prueth->iep1);
+
+put_iep0:
+ icss_iep_put(prueth->iep0);
+ prueth->iep0 = NULL;
+ prueth->iep1 = NULL;
free_pool:
gen_pool_free(prueth->sram_pool,
@@ -2212,6 +2215,8 @@ free_pool:
put_mem:
pruss_release_mem_region(prueth->pruss, &prueth->shram);
+
+put_pruss:
pruss_put(prueth->pruss);
put_cores:
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index a3c5de9d547a..533e912af089 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1769,10 +1769,12 @@ int wx_sw_init(struct wx *wx)
wx->subsystem_device_id = pdev->subsystem_device;
} else {
err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
- if (!err)
- wx->subsystem_device_id = swab16((u16)ssid);
+ if (err < 0) {
+ wx_err(wx, "read of internal subsystem device id failed\n");
+ return err;
+ }
- return err;
+ wx->subsystem_device_id = swab16((u16)ssid);
}
wx->mac_table = kcalloc(wx->mac.num_rar_entries,
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 3d43f808c86b..8db804543e66 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -121,10 +121,8 @@ static int ngbe_sw_init(struct wx *wx)
/* PCI config space info */
err = wx_sw_init(wx);
- if (err < 0) {
- wx_err(wx, "read of internal subsystem device id failed\n");
+ if (err < 0)
return err;
- }
/* mac type, phy type , oem type */
ngbe_init_type_code(wx);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 70f0b5c01dac..526250102db2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -364,10 +364,8 @@ static int txgbe_sw_init(struct wx *wx)
/* PCI config space info */
err = wx_sw_init(wx);
- if (err < 0) {
- wx_err(wx, "read of internal subsystem device id failed\n");
+ if (err < 0)
return err;
- }
txgbe_init_type_code(wx);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 82d0d44b2b02..bf6e33990490 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -822,7 +822,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
/* Tx Full Checksum Offload Enabled */
cur_p->app0 |= 2;
- } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
+ } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
csum_start_off = skb_transport_offset(skb);
csum_index_off = csum_start_off + skb->csum_offset;
/* Tx Partial Checksum Offload Enabled */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3ba3c8fb28a5..706ea5263e87 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2206,9 +2206,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto upper_link_failed;
}
- /* set slave flag before open to prevent IPv6 addrconf */
- vf_netdev->flags |= IFF_SLAVE;
-
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
@@ -2315,16 +2312,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
}
- /* Fallback path to check synthetic vf with
- * help of mac addr
+ /* Fallback path to check synthetic vf with help of mac addr.
+ * Because this function can be called before vf_netdev is
+ * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
+ * from dev_addr, also try to match to its dev_addr.
+ * Note: On Hyper-V and Azure, it's not possible to set a MAC address
+ * on a VF that matches to the MAC of a unrelated NETVSC device.
*/
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
- if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
- netdev_notice(vf_netdev,
- "falling back to mac addr based matching\n");
+ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
+ ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
return ndev;
- }
}
netdev_notice(vf_netdev,
@@ -2332,6 +2331,19 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
return NULL;
}
+static int netvsc_prepare_bonding(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+
+ ndev = get_netvsc_byslot(vf_netdev);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ /* set slave flag before open to prevent IPv6 addrconf */
+ vf_netdev->flags |= IFF_SLAVE;
+ return NOTIFY_DONE;
+}
+
static int netvsc_register_vf(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
@@ -2531,15 +2543,6 @@ static int netvsc_probe(struct hv_device *dev,
goto devinfo_failed;
}
- nvdev = rndis_filter_device_add(dev, device_info);
- if (IS_ERR(nvdev)) {
- ret = PTR_ERR(nvdev);
- netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- goto rndis_failed;
- }
-
- eth_hw_addr_set(net, device_info->mac_adr);
-
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
* all subchannels to show up, but that may not happen because
@@ -2547,9 +2550,23 @@ static int netvsc_probe(struct hv_device *dev,
* -> ... -> device_add() -> ... -> __device_attach() can't get
* the device lock, so all the subchannels can't be processed --
* finally netvsc_subchan_work() hangs forever.
+ *
+ * The rtnl lock also needs to be held before rndis_filter_device_add()
+ * which advertises nvsp_2_vsc_capability / sriov bit, and triggers
+ * VF NIC offering and registering. If VF NIC finished register_netdev()
+ * earlier it may cause name based config failure.
*/
rtnl_lock();
+ nvdev = rndis_filter_device_add(dev, device_info);
+ if (IS_ERR(nvdev)) {
+ ret = PTR_ERR(nvdev);
+ netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+ goto rndis_failed;
+ }
+
+ eth_hw_addr_set(net, device_info->mac_adr);
+
if (nvdev->num_chn > 1)
schedule_work(&nvdev->subchan_work);
@@ -2586,9 +2603,9 @@ static int netvsc_probe(struct hv_device *dev,
return 0;
register_failed:
- rtnl_unlock();
rndis_filter_device_remove(dev, nvdev);
rndis_failed:
+ rtnl_unlock();
netvsc_devinfo_put(device_info);
devinfo_failed:
free_percpu(net_device_ctx->vf_stats);
@@ -2753,6 +2770,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
switch (event) {
+ case NETDEV_POST_INIT:
+ return netvsc_prepare_bonding(event_dev);
case NETDEV_REGISTER:
return netvsc_register_vf(event_dev);
case NETDEV_UNREGISTER:
@@ -2788,12 +2807,17 @@ static int __init netvsc_drv_init(void)
}
netvsc_ring_bytes = ring_size * PAGE_SIZE;
+ register_netdevice_notifier(&netvsc_netdev_notifier);
+
ret = vmbus_driver_register(&netvsc_drv);
if (ret)
- return ret;
+ goto err_vmbus_reg;
- register_netdevice_notifier(&netvsc_netdev_notifier);
return 0;
+
+err_vmbus_reg:
+ unregister_netdevice_notifier(&netvsc_netdev_notifier);
+ return ret;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c
index d7b81a36d673..145eb0bd096d 100644
--- a/drivers/net/ipa/reg/gsi_reg-v5.0.c
+++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c
@@ -78,7 +78,7 @@ REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x0001c000 + 0x12000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
- [R_LENGTH] = GENMASK(19, 0),
+ [R_LENGTH] = GENMASK(23, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 21e9cac73121..2d5b021b4ea6 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -411,7 +411,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
return addr;
}
-static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
{
const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
@@ -453,13 +453,11 @@ out:
}
#if IS_ENABLED(CONFIG_IPV6)
-static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+
+static noinline_for_stack int
+ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- struct net_device *dev = skb->dev;
- struct net *net = dev_net(dev);
- struct dst_entry *dst;
- int err, ret = NET_XMIT_DROP;
struct flowi6 fl6 = {
.flowi6_oif = dev->ifindex,
.daddr = ip6h->daddr,
@@ -469,27 +467,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
.flowi6_mark = skb->mark,
.flowi6_proto = ip6h->nexthdr,
};
+ struct dst_entry *dst;
+ int err;
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst->error) {
- ret = dst->error;
+ dst = ip6_route_output(dev_net(dev), NULL, &fl6);
+ err = dst->error;
+ if (err) {
dst_release(dst);
- goto err;
+ return err;
}
skb_dst_set(skb, dst);
+ return 0;
+}
+
+static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ int err, ret = NET_XMIT_DROP;
+
+ err = ipvlan_route_v6_outbound(dev, skb);
+ if (unlikely(err)) {
+ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return err;
+ }
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
- err = ip6_local_out(net, skb->sk, skb);
+ err = ip6_local_out(dev_net(dev), skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
DEV_STATS_INC(dev, tx_errors);
else
ret = NET_XMIT_SUCCESS;
- goto out;
-err:
- DEV_STATS_INC(dev, tx_errors);
- kfree_skb(skb);
-out:
return ret;
}
#else
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 02bd201bc7e5..c8da94af4161 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -780,7 +780,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
if (dev->flags & IFF_UP) {
if (change & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
- if (change & IFF_PROMISC)
+ if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
dev_set_promiscuity(lowerdev,
dev->flags & IFF_PROMISC ? 1 : -1);
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 5a0f86f38f09..97bd6705c241 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -7,6 +7,7 @@
#include <linux/filter.h>
#include <linux/netfilter_netdev.h>
#include <linux/bpf_mprog.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/netkit.h>
#include <net/dst.h>
@@ -68,6 +69,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_t ret_dev = NET_XMIT_SUCCESS;
const struct bpf_mprog_entry *entry;
struct net_device *peer;
+ int len = skb->len;
rcu_read_lock();
peer = rcu_dereference(nk->peer);
@@ -85,15 +87,22 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
case NETKIT_PASS:
skb->protocol = eth_type_trans(skb, skb->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
- __netif_rx(skb);
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
+ dev_sw_netstats_tx_add(dev, 1, len);
+ dev_sw_netstats_rx_add(peer, len);
+ } else {
+ goto drop_stats;
+ }
break;
case NETKIT_REDIRECT:
+ dev_sw_netstats_tx_add(dev, 1, len);
skb_do_redirect(skb);
break;
case NETKIT_DROP:
default:
drop:
kfree_skb(skb);
+drop_stats:
dev_core_stats_tx_dropped_inc(dev);
ret_dev = NET_XMIT_DROP;
break;
@@ -169,11 +178,18 @@ out:
rcu_read_unlock();
}
-static struct net_device *netkit_peer_dev(struct net_device *dev)
+INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev)
{
return rcu_dereference(netkit_priv(dev)->peer);
}
+static void netkit_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ dev_fetch_sw_netstats(stats, dev->tstats);
+ stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
+}
+
static void netkit_uninit(struct net_device *dev);
static const struct net_device_ops netkit_netdev_ops = {
@@ -184,6 +200,7 @@ static const struct net_device_ops netkit_netdev_ops = {
.ndo_set_rx_headroom = netkit_set_headroom,
.ndo_get_iflink = netkit_get_iflink,
.ndo_get_peer_dev = netkit_peer_dev,
+ .ndo_get_stats64 = netkit_get_stats,
.ndo_uninit = netkit_uninit,
.ndo_features_check = passthru_features_check,
};
@@ -218,6 +235,7 @@ static void netkit_setup(struct net_device *dev)
ether_setup(dev);
dev->max_mtu = ETH_MAX_MTU;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->flags |= IFF_NOARP;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index ebcdffdf4f0e..52d05ce4a281 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -453,6 +453,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
case PPPIOCSMRU:
if (get_user(val, (int __user *) argp))
break;
+ if (val > U16_MAX) {
+ err = -EINVAL;
+ break;
+ }
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
@@ -687,7 +691,7 @@ ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags, int count)
/* strip address/control field if present */
p = skb->data;
- if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+ if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
/* chop off address/control */
if (skb->len < 3)
goto err;
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index a017e9de2119..7b8afa589a53 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1079,17 +1079,17 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 pkt_count = 0;
u64 desc_hdr = 0;
u16 vlan_tag = 0;
- u32 skb_len = 0;
+ u32 skb_len;
if (!skb)
goto err;
- if (skb->len == 0)
+ skb_len = skb->len;
+ if (skb_len < sizeof(desc_hdr))
goto err;
- skb_len = skb->len;
/* RX Descriptor Header */
- skb_trim(skb, skb->len - sizeof(desc_hdr));
+ skb_trim(skb, skb_len - sizeof(desc_hdr));
desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb));
/* Check these packets */
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index aff39bf3161d..4ea0e155bb0d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1583,11 +1583,11 @@ static int ax88179_reset(struct usbnet *dev)
*tmp16 = AX_PHYPWR_RSTCTL_IPRL;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
- msleep(200);
+ msleep(500);
*tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
- msleep(100);
+ msleep(200);
/* Ethernet PHY Auto Detach*/
ax88179_auto_detach(dev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 344af3c5c836..e2e181378f41 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1289,6 +1289,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x0168, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0176, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0178, 3)},
+ {QMI_FIXED_INTF(0x19d2, 0x0189, 4)}, /* ZTE MF290 */
{QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */
{QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 9980517ed8b0..57efb3454c57 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -236,8 +236,8 @@ static void veth_get_ethtool_stats(struct net_device *dev,
data[tx_idx + j] += *(u64 *)(base + offset);
}
} while (u64_stats_fetch_retry(&rq_stats->syncp, start));
- pp_idx = tx_idx + VETH_TQ_STATS_LEN;
}
+ pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;
page_pool_stats:
veth_get_page_pool_stats(dev, &data[pp_idx]);
@@ -373,7 +373,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
if (!use_napi)
- dev_lstats_add(dev, length);
+ dev_sw_netstats_tx_add(dev, 1, length);
else
__veth_xdp_flush(rq);
} else {
@@ -387,14 +387,6 @@ drop:
return ret;
}
-static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
-{
- struct veth_priv *priv = netdev_priv(dev);
-
- dev_lstats_read(dev, packets, bytes);
- return atomic64_read(&priv->dropped);
-}
-
static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
@@ -432,24 +424,24 @@ static void veth_get_stats64(struct net_device *dev,
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
struct veth_stats rx;
- u64 packets, bytes;
- tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
- tot->tx_bytes = bytes;
- tot->tx_packets = packets;
+ tot->tx_dropped = atomic64_read(&priv->dropped);
+ dev_fetch_sw_netstats(tot, dev->tstats);
veth_stats_rx(&rx, dev);
tot->tx_dropped += rx.xdp_tx_err;
tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
- tot->rx_bytes = rx.xdp_bytes;
- tot->rx_packets = rx.xdp_packets;
+ tot->rx_bytes += rx.xdp_bytes;
+ tot->rx_packets += rx.xdp_packets;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (peer) {
- veth_stats_tx(peer, &packets, &bytes);
- tot->rx_bytes += bytes;
- tot->rx_packets += packets;
+ struct rtnl_link_stats64 tot_peer = {};
+
+ dev_fetch_sw_netstats(&tot_peer, peer->tstats);
+ tot->rx_bytes += tot_peer.tx_bytes;
+ tot->rx_packets += tot_peer.tx_packets;
veth_stats_rx(&rx, peer);
tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
@@ -1506,25 +1498,12 @@ static void veth_free_queues(struct net_device *dev)
static int veth_dev_init(struct net_device *dev)
{
- int err;
-
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
-
- err = veth_alloc_queues(dev);
- if (err) {
- free_percpu(dev->lstats);
- return err;
- }
-
- return 0;
+ return veth_alloc_queues(dev);
}
static void veth_dev_free(struct net_device *dev)
{
veth_free_queues(dev);
- free_percpu(dev->lstats);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1796,6 +1775,7 @@ static void veth_setup(struct net_device *dev)
NETIF_F_HW_VLAN_STAG_RX);
dev->needs_free_netdev = true;
dev->priv_destructor = veth_dev_free;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->max_mtu = ETH_MAX_MTU;
dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index db766941b78f..bb95ce43cd97 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -121,22 +121,12 @@ struct net_vrf {
int ifindex;
};
-struct pcpu_dstats {
- u64 tx_pkts;
- u64 tx_bytes;
- u64 tx_drps;
- u64 rx_pkts;
- u64 rx_bytes;
- u64 rx_drps;
- struct u64_stats_sync syncp;
-};
-
static void vrf_rx_stats(struct net_device *dev, int len)
{
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
- dstats->rx_pkts++;
+ dstats->rx_packets++;
dstats->rx_bytes += len;
u64_stats_update_end(&dstats->syncp);
}
@@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev,
do {
start = u64_stats_fetch_begin(&dstats->syncp);
tbytes = dstats->tx_bytes;
- tpkts = dstats->tx_pkts;
- tdrops = dstats->tx_drps;
+ tpkts = dstats->tx_packets;
+ tdrops = dstats->tx_drops;
rbytes = dstats->rx_bytes;
- rpkts = dstats->rx_pkts;
+ rpkts = dstats->rx_packets;
} while (u64_stats_fetch_retry(&dstats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpkts;
@@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
vrf_rx_stats(dev, len);
else
- this_cpu_inc(dev->dstats->rx_drps);
+ this_cpu_inc(dev->dstats->rx_drops);
return NETDEV_TX_OK;
}
@@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
- dstats->tx_pkts++;
+ dstats->tx_packets++;
dstats->tx_bytes += len;
u64_stats_update_end(&dstats->syncp);
} else {
- this_cpu_inc(dev->dstats->tx_drps);
+ this_cpu_inc(dev->dstats->tx_drops);
}
return ret;
@@ -1174,22 +1164,15 @@ static void vrf_dev_uninit(struct net_device *dev)
vrf_rtable_release(dev, vrf);
vrf_rt6_release(dev, vrf);
-
- free_percpu(dev->dstats);
- dev->dstats = NULL;
}
static int vrf_dev_init(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
- if (!dev->dstats)
- goto out_nomem;
-
/* create the default dst which points back to us */
if (vrf_rtable_create(dev) != 0)
- goto out_stats;
+ goto out_nomem;
if (vrf_rt6_create(dev) != 0)
goto out_rth;
@@ -1203,9 +1186,6 @@ static int vrf_dev_init(struct net_device *dev)
out_rth:
vrf_rtable_release(dev, vrf);
-out_stats:
- free_percpu(dev->dstats);
- dev->dstats = NULL;
out_nomem:
return -ENOMEM;
}
@@ -1704,6 +1684,8 @@ static void vrf_setup(struct net_device *dev)
dev->min_mtu = IPV6_MIN_MTU;
dev->max_mtu = IP6_MAX_MTU;
dev->mtu = dev->max_mtu;
+
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 258dcc103921..deb9636b0ecf 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -210,7 +210,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
*/
while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
- ++dev->stats.tx_dropped;
+ DEV_STATS_INC(dev, tx_dropped);
}
skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
@@ -228,7 +228,7 @@ err_icmp:
else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
err:
- ++dev->stats.tx_errors;
+ DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
return ret;
}
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 0b3f0c843550..a176653c8861 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -416,20 +416,20 @@ dishonest_packet_peer:
net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
dev->name, skb, peer->internal_id,
&peer->endpoint.addr);
- ++dev->stats.rx_errors;
- ++dev->stats.rx_frame_errors;
+ DEV_STATS_INC(dev, rx_errors);
+ DEV_STATS_INC(dev, rx_frame_errors);
goto packet_processed;
dishonest_packet_type:
net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id, &peer->endpoint.addr);
- ++dev->stats.rx_errors;
- ++dev->stats.rx_frame_errors;
+ DEV_STATS_INC(dev, rx_errors);
+ DEV_STATS_INC(dev, rx_frame_errors);
goto packet_processed;
dishonest_packet_size:
net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id, &peer->endpoint.addr);
- ++dev->stats.rx_errors;
- ++dev->stats.rx_length_errors;
+ DEV_STATS_INC(dev, rx_errors);
+ DEV_STATS_INC(dev, rx_length_errors);
goto packet_processed;
packet_processed:
dev_kfree_skb(skb);
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index 95c853b59e1d..0d48e0f4a1ba 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -333,7 +333,8 @@ err:
void wg_packet_purge_staged_packets(struct wg_peer *peer)
{
spin_lock_bh(&peer->staged_packet_queue.lock);
- peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
+ DEV_STATS_ADD(peer->device->dev, tx_dropped,
+ peer->staged_packet_queue.qlen);
__skb_queue_purge(&peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
}
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index b027be0b0b6f..590b038e449e 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -26,10 +26,14 @@ struct virtual_nci_dev {
struct mutex mtx;
struct sk_buff *send_buff;
struct wait_queue_head wq;
+ bool running;
};
static int virtual_nci_open(struct nci_dev *ndev)
{
+ struct virtual_nci_dev *vdev = nci_get_drvdata(ndev);
+
+ vdev->running = true;
return 0;
}
@@ -40,6 +44,7 @@ static int virtual_nci_close(struct nci_dev *ndev)
mutex_lock(&vdev->mtx);
kfree_skb(vdev->send_buff);
vdev->send_buff = NULL;
+ vdev->running = false;
mutex_unlock(&vdev->mtx);
return 0;
@@ -50,7 +55,7 @@ static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
struct virtual_nci_dev *vdev = nci_get_drvdata(ndev);
mutex_lock(&vdev->mtx);
- if (vdev->send_buff) {
+ if (vdev->send_buff || !vdev->running) {
mutex_unlock(&vdev->mtx);
kfree_skb(skb);
return -1;
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 48328e36e93b..72c0525c75f5 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -757,12 +757,11 @@ static void nvme_queue_auth_work(struct work_struct *work)
__func__, chap->qid);
mutex_lock(&ctrl->dhchap_auth_mutex);
ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
if (ret) {
- mutex_unlock(&ctrl->dhchap_auth_mutex);
chap->error = ret;
goto fail2;
}
- mutex_unlock(&ctrl->dhchap_auth_mutex);
/* DH-HMAC-CHAP Step 3: send reply */
dev_dbg(ctrl->device, "%s: qid %d send reply\n",
@@ -839,6 +838,8 @@ static void nvme_queue_auth_work(struct work_struct *work)
}
fail2:
+ if (chap->status == 0)
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
__func__, chap->qid, chap->status);
tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 88b54cdcbd68..46a4c9c5ea96 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -482,7 +482,6 @@ EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
{
- nvme_stop_keep_alive(ctrl);
if (ctrl->admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
nvme_cancel_request, ctrl);
@@ -1814,16 +1813,18 @@ set_pi:
return ret;
}
-static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
{
struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
- if (nvme_init_ms(ns, id))
- return;
+ ret = nvme_init_ms(ns, id);
+ if (ret)
+ return ret;
ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- return;
+ return 0;
if (ctrl->ops->flags & NVME_F_FABRICS) {
/*
@@ -1832,7 +1833,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* remap the separate metadata buffer from the block layer.
*/
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
- return;
+ return 0;
ns->features |= NVME_NS_EXT_LBAS;
@@ -1859,6 +1860,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
else
ns->features |= NVME_NS_METADATA_SUPPORTED;
}
+ return 0;
}
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
@@ -2032,7 +2034,11 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
- nvme_configure_metadata(ns, id);
+ ret = nvme_configure_metadata(ns, id);
+ if (ret < 0) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
nvme_set_chunk_sectors(ns, id);
nvme_update_disk_info(ns->disk, ns, id);
@@ -4348,6 +4354,7 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
nvme_auth_stop(ctrl);
+ nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 4673ead69c5f..aa88606a44c4 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -667,8 +667,10 @@ static const match_table_t opt_tokens = {
#endif
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" },
+#ifdef CONFIG_NVME_HOST_AUTH
{ NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
{ NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
+#endif
#ifdef CONFIG_NVME_TCP_TLS
{ NVMF_OPT_TLS, "tls" },
#endif
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 49c3e46eaa1e..9f9a3b35dc64 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2530,12 +2530,6 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
* clean up the admin queue. Same thing as above.
*/
nvme_quiesce_admin_queue(&ctrl->ctrl);
-
- /*
- * Open-coding nvme_cancel_admin_tagset() as fc
- * is not using nvme_cancel_request().
- */
- nvme_stop_keep_alive(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -3138,11 +3132,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
nvme_unquiesce_admin_queue(&ctrl->ctrl);
ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
- if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
- ret = -EIO;
if (ret)
goto out_disconnect_admin_queue;
-
+ if (test_bit(ASSOC_FAILED, &ctrl->flags)) {
+ ret = -EIO;
+ goto out_stop_keep_alive;
+ }
/* sanity checks */
/* FC-NVME does not have other data in the capsule */
@@ -3150,7 +3145,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
ctrl->ctrl.icdoff);
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_disconnect_admin_queue;
+ goto out_stop_keep_alive;
}
/* FC-NVME supports normal SGL Data Block Descriptors */
@@ -3158,7 +3153,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n");
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_disconnect_admin_queue;
+ goto out_stop_keep_alive;
}
if (opts->queue_size > ctrl->ctrl.maxcmd) {
@@ -3205,6 +3200,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
out_term_aen_ops:
nvme_fc_term_aen_ops(ctrl);
+out_stop_keep_alive:
+ nvme_stop_keep_alive(&ctrl->ctrl);
out_disconnect_admin_queue:
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a7fea4cbacd7..6d178d555920 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1080,6 +1080,7 @@ destroy_io:
nvme_rdma_free_io_queues(ctrl);
}
destroy_admin:
+ nvme_stop_keep_alive(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 89661a9cf850..d79811cfa0ce 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -36,11 +36,11 @@ static int so_priority;
module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
-#ifdef CONFIG_NVME_TCP_TLS
/*
* TLS handshake timeout
*/
static int tls_handshake_timeout = 10;
+#ifdef CONFIG_NVME_TCP_TLS
module_param(tls_handshake_timeout, int, 0644);
MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)");
@@ -161,10 +161,8 @@ struct nvme_tcp_queue {
struct ahash_request *snd_hash;
__le32 exp_ddgst;
__le32 recv_ddgst;
-#ifdef CONFIG_NVME_TCP_TLS
struct completion tls_complete;
int tls_err;
-#endif
struct page_frag_cache pf_cache;
void (*state_change)(struct sock *);
@@ -207,6 +205,14 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
+static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl)
+{
+ if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
+ return 0;
+
+ return ctrl->opts->tls;
+}
+
static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
{
u32 queue_idx = nvme_tcp_queue_id(queue);
@@ -1412,7 +1418,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
memset(&msg, 0, sizeof(msg));
iov.iov_base = icresp;
iov.iov_len = sizeof(*icresp);
- if (queue->ctrl->ctrl.opts->tls) {
+ if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
@@ -1424,7 +1430,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp;
}
ret = -ENOTCONN;
- if (queue->ctrl->ctrl.opts->tls) {
+ if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
ctype = tls_get_record_type(queue->sock->sk,
(struct cmsghdr *)cbuf);
if (ctype != TLS_RECORD_TYPE_DATA) {
@@ -1548,7 +1554,6 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
}
-#ifdef CONFIG_NVME_TCP_TLS
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
{
struct nvme_tcp_queue *queue = data;
@@ -1625,14 +1630,6 @@ static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
}
return ret;
}
-#else
-static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
- struct nvme_tcp_queue *queue,
- key_serial_t pskid)
-{
- return -EPROTONOSUPPORT;
-}
-#endif
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
key_serial_t pskid)
@@ -1759,7 +1756,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
}
/* If PSKs are configured try to start TLS */
- if (pskid) {
+ if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) {
ret = nvme_tcp_start_tls(nctrl, queue, pskid);
if (ret)
goto err_init_connect;
@@ -1916,7 +1913,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
int ret;
key_serial_t pskid = 0;
- if (ctrl->opts->tls) {
+ if (nvme_tcp_tls(ctrl)) {
if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key);
else
@@ -1949,7 +1946,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
- if (ctrl->opts->tls && !ctrl->tls_key) {
+ if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) {
dev_err(ctrl->device, "no PSK negotiated\n");
return -ENOKEY;
}
@@ -2237,6 +2234,7 @@ destroy_io:
nvme_tcp_destroy_io_queues(ctrl, new);
}
destroy_admin:
+ nvme_stop_keep_alive(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
return ret;
}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 31633da9427c..e1ebc73f3e5e 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -4,6 +4,8 @@ config NVME_TARGET
tristate "NVMe Target support"
depends on BLOCK
depends on CONFIGFS_FS
+ select NVME_KEYRING if NVME_TARGET_TCP_TLS
+ select KEYS if NVME_TARGET_TCP_TLS
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
select SGL_ALLOC
help
@@ -87,9 +89,7 @@ config NVME_TARGET_TCP
config NVME_TARGET_TCP_TLS
bool "NVMe over Fabrics TCP target TLS encryption support"
depends on NVME_TARGET_TCP
- select NVME_KEYRING
select NET_HANDSHAKE
- select KEYS
help
Enables TLS encryption for the NVMe TCP target using the netlink handshake API.
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 9eed6e6765ea..e307a044b1a1 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1893,7 +1893,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
return ERR_PTR(-ENOMEM);
}
- if (nvme_keyring_id()) {
+ if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) {
port->keyring = key_lookup(nvme_keyring_id());
if (IS_ERR(port->keyring)) {
pr_warn("NVMe keyring not available, disabling TLS\n");
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 43b5bd8bb6a5..d8da840a1c0e 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -244,6 +244,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl);
if (status)
@@ -313,6 +315,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out;
}
+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
le16_to_cpu(d->cntlid), req);
if (!ctrl) {
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 92b74d0b8686..4cc27856aa8f 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1854,6 +1854,8 @@ static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
}
return ret;
}
+#else
+static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {}
#endif
static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1911,9 +1913,9 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
mutex_unlock(&nvmet_tcp_queue_mutex);
-#ifdef CONFIG_NVME_TARGET_TCP_TLS
INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
nvmet_tcp_tls_handshake_timeout);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
struct sock *sk = queue->sock->sk;
diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
index 539d8920c202..bb0d92461b08 100644
--- a/drivers/parisc/power.c
+++ b/drivers/parisc/power.c
@@ -176,7 +176,7 @@ static struct notifier_block parisc_panic_block = {
static int qemu_power_off(struct sys_off_data *data)
{
/* this turns the system off via SeaBIOS */
- *(int *)data->cb_data = 0;
+ gsc_writel(0, (unsigned long) data->cb_data);
pdc_soft_power_button(1);
return NOTIFY_DONE;
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 787354b849c7..4cef568231bf 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -87,7 +87,6 @@ source "drivers/phy/motorola/Kconfig"
source "drivers/phy/mscc/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
source "drivers/phy/ralink/Kconfig"
-source "drivers/phy/realtek/Kconfig"
source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 868a220ed0f6..fb3dc9de6111 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -26,7 +26,6 @@ obj-y += allwinner/ \
mscc/ \
qualcomm/ \
ralink/ \
- realtek/ \
renesas/ \
rockchip/ \
samsung/ \
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index d891058b7c39..846f8c99547f 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -63,7 +63,7 @@ config PHY_QCOM_QMP_COMBO
depends on DRM || DRM=n
select GENERIC_PHY
select MFD_SYSCON
- select DRM_PANEL_BRIDGE if DRM
+ select DRM_AUX_BRIDGE if DRM_BRIDGE
help
Enable this to support the QMP Combo PHY transceiver that is used
with USB3 and DisplayPort controllers on Qualcomm chips.
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 9c87845c78ec..f6c727249104 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -21,7 +21,7 @@
#include <linux/usb/typec.h>
#include <linux/usb/typec_mux.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include <dt-bindings/phy/phy-qcom-qmp.h>
@@ -1419,8 +1419,6 @@ struct qmp_combo {
struct clk_hw dp_link_hw;
struct clk_hw dp_pixel_hw;
- struct drm_bridge bridge;
-
struct typec_switch_dev *sw;
enum typec_orientation orientation;
};
@@ -3191,44 +3189,6 @@ static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
}
#endif
-#if IS_ENABLED(CONFIG_DRM)
-static int qmp_combo_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct qmp_combo *qmp = container_of(bridge, struct qmp_combo, bridge);
- struct drm_bridge *next_bridge;
-
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
- return -EINVAL;
-
- next_bridge = devm_drm_of_get_bridge(qmp->dev, qmp->dev->of_node, 0, 0);
- if (IS_ERR(next_bridge)) {
- dev_err(qmp->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
- return PTR_ERR(next_bridge);
- }
-
- return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-}
-
-static const struct drm_bridge_funcs qmp_combo_bridge_funcs = {
- .attach = qmp_combo_bridge_attach,
-};
-
-static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp)
-{
- qmp->bridge.funcs = &qmp_combo_bridge_funcs;
- qmp->bridge.of_node = qmp->dev->of_node;
-
- return devm_drm_bridge_add(qmp->dev, &qmp->bridge);
-}
-#else
-static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp)
-{
- return 0;
-}
-#endif
-
static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_node *np)
{
struct device *dev = qmp->dev;
@@ -3440,7 +3400,7 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = qmp_combo_dp_register_bridge(qmp);
+ ret = drm_aux_bridge_register(dev);
if (ret)
return ret;
diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
deleted file mode 100644
index 75ac7e7c31ae..000000000000
--- a/drivers/phy/realtek/Kconfig
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Phy drivers for Realtek platforms
-#
-
-if ARCH_REALTEK || COMPILE_TEST
-
-config PHY_RTK_RTD_USB2PHY
- tristate "Realtek RTD USB2 PHY Transceiver Driver"
- depends on USB_SUPPORT
- select GENERIC_PHY
- select USB_PHY
- select USB_COMMON
- help
- Enable this to support Realtek SoC USB2 phy transceiver.
- The DHC (digital home center) RTD series SoCs used the Synopsys
- DWC3 USB IP. This driver will do the PHY initialization
- of the parameters.
-
-config PHY_RTK_RTD_USB3PHY
- tristate "Realtek RTD USB3 PHY Transceiver Driver"
- depends on USB_SUPPORT
- select GENERIC_PHY
- select USB_PHY
- select USB_COMMON
- help
- Enable this to support Realtek SoC USB3 phy transceiver.
- The DHC (digital home center) RTD series SoCs used the Synopsys
- DWC3 USB IP. This driver will do the PHY initialization
- of the parameters.
-
-endif # ARCH_REALTEK || COMPILE_TEST
diff --git a/drivers/phy/realtek/Makefile b/drivers/phy/realtek/Makefile
deleted file mode 100644
index ed7b47ff8a26..000000000000
--- a/drivers/phy/realtek/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o
-obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o
diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
deleted file mode 100644
index 0a6426285c67..000000000000
--- a/drivers/phy/realtek/phy-rtk-usb2.c
+++ /dev/null
@@ -1,1325 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * phy-rtk-usb2.c RTK usb2.0 PHY driver
- *
- * Copyright (C) 2023 Realtek Semiconductor Corporation
- *
- */
-
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/nvmem-consumer.h>
-#include <linux/regmap.h>
-#include <linux/sys_soc.h>
-#include <linux/mfd/syscon.h>
-#include <linux/phy/phy.h>
-#include <linux/usb.h>
-#include <linux/usb/phy.h>
-#include <linux/usb/hcd.h>
-
-/* GUSB2PHYACCn register */
-#define PHY_NEW_REG_REQ BIT(25)
-#define PHY_VSTS_BUSY BIT(23)
-#define PHY_VCTRL_SHIFT 8
-#define PHY_REG_DATA_MASK 0xff
-
-#define GET_LOW_NIBBLE(addr) ((addr) & 0x0f)
-#define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4)
-
-#define EFUS_USB_DC_CAL_RATE 2
-#define EFUS_USB_DC_CAL_MAX 7
-
-#define EFUS_USB_DC_DIS_RATE 1
-#define EFUS_USB_DC_DIS_MAX 7
-
-#define MAX_PHY_DATA_SIZE 20
-#define OFFEST_PHY_READ 0x20
-
-#define MAX_USB_PHY_NUM 4
-#define MAX_USB_PHY_PAGE0_DATA_SIZE 16
-#define MAX_USB_PHY_PAGE1_DATA_SIZE 16
-#define MAX_USB_PHY_PAGE2_DATA_SIZE 8
-
-#define SET_PAGE_OFFSET 0xf4
-#define SET_PAGE_0 0x9b
-#define SET_PAGE_1 0xbb
-#define SET_PAGE_2 0xdb
-
-#define PAGE_START 0xe0
-#define PAGE0_0XE4 0xe4
-#define PAGE0_0XE6 0xe6
-#define PAGE0_0XE7 0xe7
-#define PAGE1_0XE0 0xe0
-#define PAGE1_0XE2 0xe2
-
-#define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6))
-#define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2)
-#define DEFAULT_DC_DRIVING_VALUE (0x8)
-#define DEFAULT_DC_DISCONNECTION_VALUE (0x6)
-#define HS_CLK_SELECT BIT(6)
-
-struct phy_reg {
- void __iomem *reg_wrap_vstatus;
- void __iomem *reg_gusb2phyacc0;
- int vstatus_index;
-};
-
-struct phy_data {
- u8 addr;
- u8 data;
-};
-
-struct phy_cfg {
- int page0_size;
- struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE];
- int page1_size;
- struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE];
- int page2_size;
- struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE];
-
- int num_phy;
-
- bool check_efuse;
- int check_efuse_version;
-#define CHECK_EFUSE_V1 1
-#define CHECK_EFUSE_V2 2
- int efuse_dc_driving_rate;
- int efuse_dc_disconnect_rate;
- int dc_driving_mask;
- int dc_disconnect_mask;
- bool usb_dc_disconnect_at_page0;
- int driving_updated_for_dev_dis;
-
- bool do_toggle;
- bool do_toggle_driving;
- bool use_default_parameter;
- bool is_double_sensitivity_mode;
-};
-
-struct phy_parameter {
- struct phy_reg phy_reg;
-
- /* Get from efuse */
- s8 efuse_usb_dc_cal;
- s8 efuse_usb_dc_dis;
-
- /* Get from dts */
- bool inverse_hstx_sync_clock;
- u32 driving_level;
- s32 driving_level_compensate;
- s32 disconnection_compensate;
-};
-
-struct rtk_phy {
- struct usb_phy phy;
- struct device *dev;
-
- struct phy_cfg *phy_cfg;
- int num_phy;
- struct phy_parameter *phy_parameter;
-
- struct dentry *debug_dir;
-};
-
-/* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */
-static inline int page_addr_to_array_index(u8 addr)
-{
- return (int)((((addr) - PAGE_START) & 0x7) +
- ((((addr) - PAGE_START) & 0x10) >> 1));
-}
-
-static inline u8 array_index_to_page_addr(int index)
-{
- return ((((index) + PAGE_START) & 0x7) +
- ((((index) & 0x8) << 1) + PAGE_START));
-}
-
-#define PHY_IO_TIMEOUT_USEC (50000)
-#define PHY_IO_DELAY_US (100)
-
-static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
-{
- int ret;
- unsigned int val;
-
- ret = read_poll_timeout(readl, val, ((val & mask) == result),
- PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
- if (ret) {
- pr_err("%s can't program USB phy\n", __func__);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static char rtk_phy_read(struct phy_reg *phy_reg, char addr)
-{
- void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
- unsigned int val;
- int ret = 0;
-
- addr -= OFFEST_PHY_READ;
-
- /* polling until VBusy == 0 */
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return (char)ret;
-
- /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */
- val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
- writel(val, reg_gusb2phyacc0);
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return (char)ret;
-
- /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */
- val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
- writel(val, reg_gusb2phyacc0);
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return (char)ret;
-
- val = readl(reg_gusb2phyacc0);
-
- return (char)(val & PHY_REG_DATA_MASK);
-}
-
-static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data)
-{
- unsigned int val;
- void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus;
- void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
- int shift_bits = phy_reg->vstatus_index * 8;
- int ret = 0;
-
- /* write data to VStatusOut2 (data output to phy) */
- writel((u32)data << shift_bits, reg_wrap_vstatus);
-
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return ret;
-
- /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */
- val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
-
- writel(val, reg_gusb2phyacc0);
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return ret;
-
- /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */
- val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
-
- writel(val, reg_gusb2phyacc0);
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int rtk_phy_set_page(struct phy_reg *phy_reg, int page)
-{
- switch (page) {
- case 0:
- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0);
- case 1:
- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1);
- case 2:
- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2);
- default:
- pr_err("%s error page=%d\n", __func__, page);
- }
-
- return -EINVAL;
-}
-
-static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg,
- struct phy_parameter *phy_parameter, u8 data)
-{
- u8 ret;
- s32 val;
- s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
- int offset = 4;
-
- val = (s32)((data >> offset) & dc_disconnect_mask)
- + phy_parameter->efuse_usb_dc_dis
- + phy_parameter->disconnection_compensate;
-
- if (val > dc_disconnect_mask)
- val = dc_disconnect_mask;
- else if (val < 0)
- val = 0;
-
- ret = (data & (~(dc_disconnect_mask << offset))) |
- (val & dc_disconnect_mask) << offset;
-
- return ret;
-}
-
-/* updated disconnect level at page0 */
-static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter, bool update)
-{
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
- struct phy_data *phy_data_page;
- struct phy_data *phy_data;
- u8 addr, data;
- int offset = 4;
- s32 dc_disconnect_mask;
- int i;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_reg = &phy_parameter->phy_reg;
-
- /* Set page 0 */
- phy_data_page = phy_cfg->page0;
- rtk_phy_set_page(phy_reg, 0);
-
- i = page_addr_to_array_index(PAGE0_0XE4);
- phy_data = phy_data_page + i;
- if (!phy_data->addr) {
- phy_data->addr = PAGE0_0XE4;
- phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4);
- }
-
- addr = phy_data->addr;
- data = phy_data->data;
- dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
-
- if (update)
- data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data);
- else
- data = (data & ~(dc_disconnect_mask << offset)) |
- (DEFAULT_DC_DISCONNECTION_VALUE << offset);
-
- if (rtk_phy_write(phy_reg, addr, data))
- dev_err(rtk_phy->dev,
- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
- __func__, addr, data);
-}
-
-static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg,
- struct phy_parameter *phy_parameter, u8 data)
-{
- u8 ret;
- s32 val;
- s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
-
- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
- val = (s32)(data & dc_disconnect_mask)
- + phy_parameter->efuse_usb_dc_dis
- + phy_parameter->disconnection_compensate;
- } else { /* for CHECK_EFUSE_V2 or no efuse */
- if (phy_parameter->efuse_usb_dc_dis)
- val = (s32)(phy_parameter->efuse_usb_dc_dis +
- phy_parameter->disconnection_compensate);
- else
- val = (s32)((data & dc_disconnect_mask) +
- phy_parameter->disconnection_compensate);
- }
-
- if (val > dc_disconnect_mask)
- val = dc_disconnect_mask;
- else if (val < 0)
- val = 0;
-
- ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask);
-
- return ret;
-}
-
-/* updated disconnect level at page1 */
-static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter, bool update)
-{
- struct phy_cfg *phy_cfg;
- struct phy_data *phy_data_page;
- struct phy_data *phy_data;
- struct phy_reg *phy_reg;
- u8 addr, data;
- s32 dc_disconnect_mask;
- int i;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_reg = &phy_parameter->phy_reg;
-
- /* Set page 1 */
- phy_data_page = phy_cfg->page1;
- rtk_phy_set_page(phy_reg, 1);
-
- i = page_addr_to_array_index(PAGE1_0XE2);
- phy_data = phy_data_page + i;
- if (!phy_data->addr) {
- phy_data->addr = PAGE1_0XE2;
- phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2);
- }
-
- addr = phy_data->addr;
- data = phy_data->data;
- dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
-
- if (update)
- data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data);
- else
- data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE;
-
- if (rtk_phy_write(phy_reg, addr, data))
- dev_err(rtk_phy->dev,
- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
- __func__, addr, data);
-}
-
-static void update_dc_disconnect_level(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter, bool update)
-{
- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
-
- if (phy_cfg->usb_dc_disconnect_at_page0)
- update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update);
- else
- update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update);
-}
-
-static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg,
- struct phy_parameter *phy_parameter, u8 data)
-{
- s32 driving_level_compensate = phy_parameter->driving_level_compensate;
- s32 dc_driving_mask = phy_cfg->dc_driving_mask;
- s32 val;
- u8 ret;
-
- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
- val = (s32)(data & dc_driving_mask) + driving_level_compensate
- + phy_parameter->efuse_usb_dc_cal;
- } else { /* for CHECK_EFUSE_V2 or no efuse */
- if (phy_parameter->efuse_usb_dc_cal)
- val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask)
- + driving_level_compensate);
- else
- val = (s32)(data & dc_driving_mask);
- }
-
- if (val > dc_driving_mask)
- val = dc_driving_mask;
- else if (val < 0)
- val = 0;
-
- ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask);
-
- return ret;
-}
-
-static void update_dc_driving_level(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter)
-{
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
-
- phy_reg = &phy_parameter->phy_reg;
- phy_cfg = rtk_phy->phy_cfg;
- if (!phy_cfg->page0[4].addr) {
- rtk_phy_set_page(phy_reg, 0);
- phy_cfg->page0[4].addr = PAGE0_0XE4;
- phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4);
- }
-
- if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) {
- u32 dc_driving_mask;
- u8 driving_level;
- u8 data;
-
- data = phy_cfg->page0[4].data;
- dc_driving_mask = phy_cfg->dc_driving_mask;
- driving_level = data & dc_driving_mask;
-
- dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n",
- __func__, driving_level, phy_parameter->driving_level);
-
- phy_cfg->page0[4].data = (data & (~dc_driving_mask)) |
- (phy_parameter->driving_level & dc_driving_mask);
- }
-
- phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg,
- phy_parameter,
- phy_cfg->page0[4].data);
-}
-
-static void update_hs_clk_select(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter)
-{
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_reg = &phy_parameter->phy_reg;
-
- if (phy_parameter->inverse_hstx_sync_clock) {
- if (!phy_cfg->page0[6].addr) {
- rtk_phy_set_page(phy_reg, 0);
- phy_cfg->page0[6].addr = PAGE0_0XE6;
- phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6);
- }
-
- phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT;
- }
-}
-
-static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy,
- int index, bool connect)
-{
- struct phy_parameter *phy_parameter;
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
- struct phy_data *phy_data_page;
- u8 addr, data;
- int i;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- if (!phy_cfg->do_toggle)
- goto out;
-
- if (phy_cfg->is_double_sensitivity_mode)
- goto do_toggle_driving;
-
- /* Set page 0 */
- rtk_phy_set_page(phy_reg, 0);
-
- addr = PAGE0_0XE7;
- data = rtk_phy_read(phy_reg, addr);
-
- if (connect)
- rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL));
- else
- rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL));
-
-do_toggle_driving:
-
- if (!phy_cfg->do_toggle_driving)
- goto do_toggle;
-
- /* Page 0 addr 0xE4 driving capability */
-
- /* Set page 0 */
- phy_data_page = phy_cfg->page0;
- rtk_phy_set_page(phy_reg, 0);
-
- i = page_addr_to_array_index(PAGE0_0XE4);
- addr = phy_data_page[i].addr;
- data = phy_data_page[i].data;
-
- if (connect) {
- rtk_phy_write(phy_reg, addr, data);
- } else {
- u8 value;
- s32 tmp;
- s32 driving_updated =
- phy_cfg->driving_updated_for_dev_dis;
- s32 dc_driving_mask = phy_cfg->dc_driving_mask;
-
- tmp = (s32)(data & dc_driving_mask) + driving_updated;
-
- if (tmp > dc_driving_mask)
- tmp = dc_driving_mask;
- else if (tmp < 0)
- tmp = 0;
-
- value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask);
-
- rtk_phy_write(phy_reg, addr, value);
- }
-
-do_toggle:
- /* restore dc disconnect level before toggle */
- update_dc_disconnect_level(rtk_phy, phy_parameter, false);
-
- /* Set page 1 */
- rtk_phy_set_page(phy_reg, 1);
-
- addr = PAGE1_0XE0;
- data = rtk_phy_read(phy_reg, addr);
-
- rtk_phy_write(phy_reg, addr, data &
- (~ENABLE_AUTO_SENSITIVITY_CALIBRATION));
- mdelay(1);
- rtk_phy_write(phy_reg, addr, data |
- (ENABLE_AUTO_SENSITIVITY_CALIBRATION));
-
- /* update dc disconnect level after toggle */
- update_dc_disconnect_level(rtk_phy, phy_parameter, true);
-
-out:
- return;
-}
-
-static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
-{
- struct phy_parameter *phy_parameter;
- struct phy_cfg *phy_cfg;
- struct phy_data *phy_data_page;
- struct phy_reg *phy_reg;
- int i;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- if (phy_cfg->use_default_parameter) {
- dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n",
- __func__, index);
- goto do_toggle;
- }
-
- /* Set page 0 */
- phy_data_page = phy_cfg->page0;
- rtk_phy_set_page(phy_reg, 0);
-
- for (i = 0; i < phy_cfg->page0_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = phy_data->addr;
- u8 data = phy_data->data;
-
- if (!addr)
- continue;
-
- if (rtk_phy_write(phy_reg, addr, data)) {
- dev_err(rtk_phy->dev,
- "%s: Error to set page0 parameter addr=0x%x value=0x%x\n",
- __func__, addr, data);
- return -EINVAL;
- }
- }
-
- /* Set page 1 */
- phy_data_page = phy_cfg->page1;
- rtk_phy_set_page(phy_reg, 1);
-
- for (i = 0; i < phy_cfg->page1_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = phy_data->addr;
- u8 data = phy_data->data;
-
- if (!addr)
- continue;
-
- if (rtk_phy_write(phy_reg, addr, data)) {
- dev_err(rtk_phy->dev,
- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
- __func__, addr, data);
- return -EINVAL;
- }
- }
-
- if (phy_cfg->page2_size == 0)
- goto do_toggle;
-
- /* Set page 2 */
- phy_data_page = phy_cfg->page2;
- rtk_phy_set_page(phy_reg, 2);
-
- for (i = 0; i < phy_cfg->page2_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = phy_data->addr;
- u8 data = phy_data->data;
-
- if (!addr)
- continue;
-
- if (rtk_phy_write(phy_reg, addr, data)) {
- dev_err(rtk_phy->dev,
- "%s: Error to set page2 parameter addr=0x%x value=0x%x\n",
- __func__, addr, data);
- return -EINVAL;
- }
- }
-
-do_toggle:
- do_rtk_phy_toggle(rtk_phy, index, false);
-
- return 0;
-}
-
-static int rtk_phy_init(struct phy *phy)
-{
- struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
- unsigned long phy_init_time = jiffies;
- int i, ret = 0;
-
- if (!rtk_phy)
- return -EINVAL;
-
- for (i = 0; i < rtk_phy->num_phy; i++)
- ret = do_rtk_phy_init(rtk_phy, i);
-
- dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n",
- jiffies_to_msecs(jiffies - phy_init_time));
- return ret;
-}
-
-static int rtk_phy_exit(struct phy *phy)
-{
- return 0;
-}
-
-static const struct phy_ops ops = {
- .init = rtk_phy_init,
- .exit = rtk_phy_exit,
- .owner = THIS_MODULE,
-};
-
-static void rtk_phy_toggle(struct usb_phy *usb2_phy, bool connect, int port)
-{
- int index = port;
- struct rtk_phy *rtk_phy = NULL;
-
- rtk_phy = dev_get_drvdata(usb2_phy->dev);
-
- if (index > rtk_phy->num_phy) {
- dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
- __func__, index, rtk_phy->num_phy);
- return;
- }
-
- do_rtk_phy_toggle(rtk_phy, index, connect);
-}
-
-static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
- u16 portstatus, u16 portchange)
-{
- bool connect = false;
-
- pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
- __func__, port, (int)portstatus, (int)portchange);
- if (portstatus & USB_PORT_STAT_CONNECTION)
- connect = true;
-
- if (portchange & USB_PORT_STAT_C_CONNECTION)
- rtk_phy_toggle(x, connect, port);
-
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static struct dentry *create_phy_debug_root(void)
-{
- struct dentry *phy_debug_root;
-
- phy_debug_root = debugfs_lookup("phy", usb_debug_root);
- if (!phy_debug_root)
- phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
-
- return phy_debug_root;
-}
-
-static int rtk_usb2_parameter_show(struct seq_file *s, void *unused)
-{
- struct rtk_phy *rtk_phy = s->private;
- struct phy_cfg *phy_cfg;
- int i, index;
-
- phy_cfg = rtk_phy->phy_cfg;
-
- seq_puts(s, "Property:\n");
- seq_printf(s, " check_efuse: %s\n",
- phy_cfg->check_efuse ? "Enable" : "Disable");
- seq_printf(s, " check_efuse_version: %d\n",
- phy_cfg->check_efuse_version);
- seq_printf(s, " efuse_dc_driving_rate: %d\n",
- phy_cfg->efuse_dc_driving_rate);
- seq_printf(s, " dc_driving_mask: 0x%x\n",
- phy_cfg->dc_driving_mask);
- seq_printf(s, " efuse_dc_disconnect_rate: %d\n",
- phy_cfg->efuse_dc_disconnect_rate);
- seq_printf(s, " dc_disconnect_mask: 0x%x\n",
- phy_cfg->dc_disconnect_mask);
- seq_printf(s, " usb_dc_disconnect_at_page0: %s\n",
- phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false");
- seq_printf(s, " do_toggle: %s\n",
- phy_cfg->do_toggle ? "Enable" : "Disable");
- seq_printf(s, " do_toggle_driving: %s\n",
- phy_cfg->do_toggle_driving ? "Enable" : "Disable");
- seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n",
- phy_cfg->driving_updated_for_dev_dis);
- seq_printf(s, " use_default_parameter: %s\n",
- phy_cfg->use_default_parameter ? "Enable" : "Disable");
- seq_printf(s, " is_double_sensitivity_mode: %s\n",
- phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable");
-
- for (index = 0; index < rtk_phy->num_phy; index++) {
- struct phy_parameter *phy_parameter;
- struct phy_reg *phy_reg;
- struct phy_data *phy_data_page;
-
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- seq_printf(s, "PHY %d:\n", index);
-
- seq_puts(s, "Page 0:\n");
- /* Set page 0 */
- phy_data_page = phy_cfg->page0;
- rtk_phy_set_page(phy_reg, 0);
-
- for (i = 0; i < phy_cfg->page0_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = array_index_to_page_addr(i);
- u8 data = phy_data->data;
- u8 value = rtk_phy_read(phy_reg, addr);
-
- if (phy_data->addr)
- seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
- addr, data, value);
- else
- seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n",
- addr, value);
- }
-
- seq_puts(s, "Page 1:\n");
- /* Set page 1 */
- phy_data_page = phy_cfg->page1;
- rtk_phy_set_page(phy_reg, 1);
-
- for (i = 0; i < phy_cfg->page1_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = array_index_to_page_addr(i);
- u8 data = phy_data->data;
- u8 value = rtk_phy_read(phy_reg, addr);
-
- if (phy_data->addr)
- seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
- addr, data, value);
- else
- seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n",
- addr, value);
- }
-
- if (phy_cfg->page2_size == 0)
- goto out;
-
- seq_puts(s, "Page 2:\n");
- /* Set page 2 */
- phy_data_page = phy_cfg->page2;
- rtk_phy_set_page(phy_reg, 2);
-
- for (i = 0; i < phy_cfg->page2_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = array_index_to_page_addr(i);
- u8 data = phy_data->data;
- u8 value = rtk_phy_read(phy_reg, addr);
-
- if (phy_data->addr)
- seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
- addr, data, value);
- else
- seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n",
- addr, value);
- }
-
-out:
- seq_puts(s, "PHY Property:\n");
- seq_printf(s, " efuse_usb_dc_cal: %d\n",
- (int)phy_parameter->efuse_usb_dc_cal);
- seq_printf(s, " efuse_usb_dc_dis: %d\n",
- (int)phy_parameter->efuse_usb_dc_dis);
- seq_printf(s, " inverse_hstx_sync_clock: %s\n",
- phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable");
- seq_printf(s, " driving_level: %d\n",
- phy_parameter->driving_level);
- seq_printf(s, " driving_level_compensate: %d\n",
- phy_parameter->driving_level_compensate);
- seq_printf(s, " disconnection_compensate: %d\n",
- phy_parameter->disconnection_compensate);
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter);
-
-static inline void create_debug_files(struct rtk_phy *rtk_phy)
-{
- struct dentry *phy_debug_root = NULL;
-
- phy_debug_root = create_phy_debug_root();
- if (!phy_debug_root)
- return;
-
- rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
- phy_debug_root);
-
- debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
- &rtk_usb2_parameter_fops);
-
- return;
-}
-
-static inline void remove_debug_files(struct rtk_phy *rtk_phy)
-{
- debugfs_remove_recursive(rtk_phy->debug_dir);
-}
-#else
-static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
-static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
-#endif /* CONFIG_DEBUG_FS */
-
-static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter, int index)
-{
- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
- u8 value = 0;
- struct nvmem_cell *cell;
- struct soc_device_attribute rtk_soc_groot[] = {
- { .family = "Realtek Groot",},
- { /* empty */ } };
-
- if (!phy_cfg->check_efuse)
- goto out;
-
- /* Read efuse for usb dc cal */
- cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal");
- if (IS_ERR(cell)) {
- dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n",
- __func__, PTR_ERR(cell));
- } else {
- unsigned char *buf;
- size_t buf_size;
-
- buf = nvmem_cell_read(cell, &buf_size);
- if (!IS_ERR(buf)) {
- value = buf[0] & phy_cfg->dc_driving_mask;
- kfree(buf);
- }
- nvmem_cell_put(cell);
- }
-
- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
- int rate = phy_cfg->efuse_dc_driving_rate;
-
- if (value <= EFUS_USB_DC_CAL_MAX)
- phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate);
- else
- phy_parameter->efuse_usb_dc_cal = -(int8_t)
- ((EFUS_USB_DC_CAL_MAX & value) * rate);
-
- if (soc_device_match(rtk_soc_groot)) {
- dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n");
-
- /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */
- if (value <= EFUS_USB_DC_CAL_MAX)
- phy_parameter->efuse_usb_dc_cal = (int8_t)(value);
-
- /* We set max dc cal compensate is 0x8 if otp is 0x7 */
- if (value == 0x7)
- phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1);
- }
- } else { /* for CHECK_EFUSE_V2 */
- phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask;
- }
-
- /* Read efuse for usb dc disconnect level */
- value = 0;
- cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis");
- if (IS_ERR(cell)) {
- dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n",
- __func__, PTR_ERR(cell));
- } else {
- unsigned char *buf;
- size_t buf_size;
-
- buf = nvmem_cell_read(cell, &buf_size);
- if (!IS_ERR(buf)) {
- value = buf[0] & phy_cfg->dc_disconnect_mask;
- kfree(buf);
- }
- nvmem_cell_put(cell);
- }
-
- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
- int rate = phy_cfg->efuse_dc_disconnect_rate;
-
- if (value <= EFUS_USB_DC_DIS_MAX)
- phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate);
- else
- phy_parameter->efuse_usb_dc_dis = -(int8_t)
- ((EFUS_USB_DC_DIS_MAX & value) * rate);
- } else { /* for CHECK_EFUSE_V2 */
- phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask;
- }
-
-out:
- return 0;
-}
-
-static int parse_phy_data(struct rtk_phy *rtk_phy)
-{
- struct device *dev = rtk_phy->dev;
- struct device_node *np = dev->of_node;
- struct phy_parameter *phy_parameter;
- int ret = 0;
- int index;
-
- rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
- rtk_phy->num_phy, GFP_KERNEL);
- if (!rtk_phy->phy_parameter)
- return -ENOMEM;
-
- for (index = 0; index < rtk_phy->num_phy; index++) {
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
-
- phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0);
- phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index;
- phy_parameter->phy_reg.vstatus_index = index;
-
- if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock"))
- phy_parameter->inverse_hstx_sync_clock = true;
- else
- phy_parameter->inverse_hstx_sync_clock = false;
-
- if (of_property_read_u32_index(np, "realtek,driving-level",
- index, &phy_parameter->driving_level))
- phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE;
-
- if (of_property_read_u32_index(np, "realtek,driving-level-compensate",
- index, &phy_parameter->driving_level_compensate))
- phy_parameter->driving_level_compensate = 0;
-
- if (of_property_read_u32_index(np, "realtek,disconnection-compensate",
- index, &phy_parameter->disconnection_compensate))
- phy_parameter->disconnection_compensate = 0;
-
- get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
-
- update_dc_driving_level(rtk_phy, phy_parameter);
-
- update_hs_clk_select(rtk_phy, phy_parameter);
- }
-
- return ret;
-}
-
-static int rtk_usb2phy_probe(struct platform_device *pdev)
-{
- struct rtk_phy *rtk_phy;
- struct device *dev = &pdev->dev;
- struct phy *generic_phy;
- struct phy_provider *phy_provider;
- const struct phy_cfg *phy_cfg;
- int ret = 0;
-
- phy_cfg = of_device_get_match_data(dev);
- if (!phy_cfg) {
- dev_err(dev, "phy config are not assigned!\n");
- return -EINVAL;
- }
-
- rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
- if (!rtk_phy)
- return -ENOMEM;
-
- rtk_phy->dev = &pdev->dev;
- rtk_phy->phy.dev = rtk_phy->dev;
- rtk_phy->phy.label = "rtk-usb2phy";
- rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
-
- rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
-
- memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
-
- rtk_phy->num_phy = phy_cfg->num_phy;
-
- ret = parse_phy_data(rtk_phy);
- if (ret)
- goto err;
-
- platform_set_drvdata(pdev, rtk_phy);
-
- generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
- if (IS_ERR(generic_phy))
- return PTR_ERR(generic_phy);
-
- phy_set_drvdata(generic_phy, rtk_phy);
-
- phy_provider = devm_of_phy_provider_register(rtk_phy->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- ret = usb_add_phy_dev(&rtk_phy->phy);
- if (ret)
- goto err;
-
- create_debug_files(rtk_phy);
-
-err:
- return ret;
-}
-
-static void rtk_usb2phy_remove(struct platform_device *pdev)
-{
- struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
-
- remove_debug_files(rtk_phy);
-
- usb_remove_phy(&rtk_phy->phy);
-}
-
-static const struct phy_cfg rtd1295_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0x90},
- [3] = {0xe3, 0x3a},
- [4] = {0xe4, 0x68},
- [6] = {0xe6, 0x91},
- [13] = {0xf5, 0x81},
- [15] = {0xf7, 0x02}, },
- .page1_size = 8,
- .page1 = { /* default parameter */ },
- .page2_size = 0,
- .page2 = { /* no parameter */ },
- .num_phy = 1,
- .check_efuse = false,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = false,
-};
-
-static const struct phy_cfg rtd1395_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [4] = {0xe4, 0xac},
- [13] = {0xf5, 0x00},
- [15] = {0xf7, 0x02}, },
- .page1_size = 8,
- .page1 = { /* default parameter */ },
- .page2_size = 0,
- .page2 = { /* no parameter */ },
- .num_phy = 1,
- .check_efuse = false,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = false,
-};
-
-static const struct phy_cfg rtd1395_phy_cfg_2port = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [4] = {0xe4, 0xac},
- [13] = {0xf5, 0x00},
- [15] = {0xf7, 0x02}, },
- .page1_size = 8,
- .page1 = { /* default parameter */ },
- .page2_size = 0,
- .page2 = { /* no parameter */ },
- .num_phy = 2,
- .check_efuse = false,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = false,
-};
-
-static const struct phy_cfg rtd1619_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [4] = {0xe4, 0x68}, },
- .page1_size = 8,
- .page1 = { /* default parameter */ },
- .page2_size = 0,
- .page2 = { /* no parameter */ },
- .num_phy = 1,
- .check_efuse = true,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = false,
-};
-
-static const struct phy_cfg rtd1319_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0x18},
- [4] = {0xe4, 0x6a},
- [7] = {0xe7, 0x71},
- [13] = {0xf5, 0x15},
- [15] = {0xf7, 0x32}, },
- .page1_size = 8,
- .page1 = { [3] = {0xe3, 0x44}, },
- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
- .page2 = { [0] = {0xe0, 0x01}, },
- .num_phy = 1,
- .check_efuse = true,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = true,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = true,
-};
-
-static const struct phy_cfg rtd1312c_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0x14},
- [4] = {0xe4, 0x67},
- [5] = {0xe5, 0x55}, },
- .page1_size = 8,
- .page1 = { [3] = {0xe3, 0x23},
- [6] = {0xe6, 0x58}, },
- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
- .page2 = { /* default parameter */ },
- .num_phy = 1,
- .check_efuse = true,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = true,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = true,
-};
-
-static const struct phy_cfg rtd1619b_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0xa3},
- [4] = {0xe4, 0x88},
- [5] = {0xe5, 0x4f},
- [6] = {0xe6, 0x02}, },
- .page1_size = 8,
- .page1 = { [3] = {0xe3, 0x64}, },
- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
- .page2 = { [7] = {0xe7, 0x45}, },
- .num_phy = 1,
- .check_efuse = true,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
- .dc_driving_mask = 0x1f,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = false,
- .do_toggle = true,
- .do_toggle_driving = true,
- .driving_updated_for_dev_dis = 0x8,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = true,
-};
-
-static const struct phy_cfg rtd1319d_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0xa3},
- [4] = {0xe4, 0x8e},
- [5] = {0xe5, 0x4f},
- [6] = {0xe6, 0x02}, },
- .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
- .page1 = { [14] = {0xf5, 0x1}, },
- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
- .page2 = { [7] = {0xe7, 0x44}, },
- .check_efuse = true,
- .num_phy = 1,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
- .dc_driving_mask = 0x1f,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = false,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0x8,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = true,
-};
-
-static const struct phy_cfg rtd1315e_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0xa3},
- [4] = {0xe4, 0x8c},
- [5] = {0xe5, 0x4f},
- [6] = {0xe6, 0x02}, },
- .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
- .page1 = { [3] = {0xe3, 0x7f},
- [14] = {0xf5, 0x01}, },
- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
- .page2 = { [7] = {0xe7, 0x44}, },
- .num_phy = 1,
- .check_efuse = true,
- .check_efuse_version = CHECK_EFUSE_V2,
- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
- .dc_driving_mask = 0x1f,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = false,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0x8,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = true,
-};
-
-static const struct of_device_id usbphy_rtk_dt_match[] = {
- { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg },
- { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg },
- { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg },
- { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg },
- { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg },
- { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg },
- { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port },
- { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg },
- { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg },
- {},
-};
-MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
-
-static struct platform_driver rtk_usb2phy_driver = {
- .probe = rtk_usb2phy_probe,
- .remove_new = rtk_usb2phy_remove,
- .driver = {
- .name = "rtk-usb2phy",
- .of_match_table = usbphy_rtk_dt_match,
- },
-};
-
-module_platform_driver(rtk_usb2phy_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform: rtk-usb2phy");
-MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
-MODULE_DESCRIPTION("Realtek usb 2.0 phy driver");
diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
deleted file mode 100644
index 67446a85e968..000000000000
--- a/drivers/phy/realtek/phy-rtk-usb3.c
+++ /dev/null
@@ -1,761 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * phy-rtk-usb3.c RTK usb3.0 phy driver
- *
- * copyright (c) 2023 realtek semiconductor corporation
- *
- */
-
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/nvmem-consumer.h>
-#include <linux/regmap.h>
-#include <linux/sys_soc.h>
-#include <linux/mfd/syscon.h>
-#include <linux/phy/phy.h>
-#include <linux/usb.h>
-#include <linux/usb/hcd.h>
-#include <linux/usb/phy.h>
-
-#define USB_MDIO_CTRL_PHY_BUSY BIT(7)
-#define USB_MDIO_CTRL_PHY_WRITE BIT(0)
-#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8
-#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16
-
-#define MAX_USB_PHY_DATA_SIZE 0x30
-#define PHY_ADDR_0X09 0x09
-#define PHY_ADDR_0X0B 0x0b
-#define PHY_ADDR_0X0D 0x0d
-#define PHY_ADDR_0X10 0x10
-#define PHY_ADDR_0X1F 0x1f
-#define PHY_ADDR_0X20 0x20
-#define PHY_ADDR_0X21 0x21
-#define PHY_ADDR_0X30 0x30
-
-#define REG_0X09_FORCE_CALIBRATION BIT(9)
-#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc
-#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6)
-#define REG_0X10_DEBUG_MODE_SETTING 0x3c0
-#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8
-#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e
-
-#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4
-#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf
-#define AMPLITUDE_CONTROL_COARSE_MASK 0xff
-#define AMPLITUDE_CONTROL_FINE_MASK 0xffff
-#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff
-#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff
-
-#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr)
-#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index)
-
-struct phy_reg {
- void __iomem *reg_mdio_ctl;
-};
-
-struct phy_data {
- u8 addr;
- u16 data;
-};
-
-struct phy_cfg {
- int param_size;
- struct phy_data param[MAX_USB_PHY_DATA_SIZE];
-
- bool check_efuse;
- bool do_toggle;
- bool do_toggle_once;
- bool use_default_parameter;
- bool check_rx_front_end_offset;
-};
-
-struct phy_parameter {
- struct phy_reg phy_reg;
-
- /* Get from efuse */
- u8 efuse_usb_u3_tx_lfps_swing_trim;
-
- /* Get from dts */
- u32 amplitude_control_coarse;
- u32 amplitude_control_fine;
-};
-
-struct rtk_phy {
- struct usb_phy phy;
- struct device *dev;
-
- struct phy_cfg *phy_cfg;
- int num_phy;
- struct phy_parameter *phy_parameter;
-
- struct dentry *debug_dir;
-};
-
-#define PHY_IO_TIMEOUT_USEC (50000)
-#define PHY_IO_DELAY_US (100)
-
-static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
-{
- int ret;
- unsigned int val;
-
- ret = read_poll_timeout(readl, val, ((val & mask) == result),
- PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
- if (ret) {
- pr_err("%s can't program USB phy\n", __func__);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg)
-{
- return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0);
-}
-
-static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr)
-{
- unsigned int tmp;
- u32 value;
-
- tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT);
-
- writel(tmp, phy_reg->reg_mdio_ctl);
-
- rtk_phy3_wait_vbusy(phy_reg);
-
- value = readl(phy_reg->reg_mdio_ctl);
- value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT;
-
- return (u16)value;
-}
-
-static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data)
-{
- unsigned int val;
-
- val = USB_MDIO_CTRL_PHY_WRITE |
- (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) |
- (data << USB_MDIO_CTRL_PHY_DATA_SHIFT);
-
- writel(val, phy_reg->reg_mdio_ctl);
-
- rtk_phy3_wait_vbusy(phy_reg);
-
- return 0;
-}
-
-static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect)
-{
- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
- struct phy_reg *phy_reg;
- struct phy_parameter *phy_parameter;
- struct phy_data *phy_data;
- u8 addr;
- u16 data;
- int i;
-
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- if (!phy_cfg->do_toggle)
- return;
-
- i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09);
- phy_data = phy_cfg->param + i;
- addr = phy_data->addr;
- data = phy_data->data;
-
- if (!addr && !data) {
- addr = PHY_ADDR_0X09;
- data = rtk_phy_read(phy_reg, addr);
- phy_data->addr = addr;
- phy_data->data = data;
- }
-
- rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION));
- mdelay(1);
- rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION);
-}
-
-static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
-{
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
- struct phy_parameter *phy_parameter;
- int i = 0;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- if (phy_cfg->use_default_parameter)
- goto do_toggle;
-
- for (i = 0; i < phy_cfg->param_size; i++) {
- struct phy_data *phy_data = phy_cfg->param + i;
- u8 addr = phy_data->addr;
- u16 data = phy_data->data;
-
- if (!addr && !data)
- continue;
-
- rtk_phy_write(phy_reg, addr, data);
- }
-
-do_toggle:
- if (phy_cfg->do_toggle_once)
- phy_cfg->do_toggle = true;
-
- do_rtk_usb3_phy_toggle(rtk_phy, index, false);
-
- if (phy_cfg->do_toggle_once) {
- u16 check_value = 0;
- int count = 10;
- u16 value_0x0d, value_0x10;
-
- /* Enable Debug mode by set 0x0D and 0x10 */
- value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D);
- value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10);
-
- rtk_phy_write(phy_reg, PHY_ADDR_0X0D,
- value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN);
- rtk_phy_write(phy_reg, PHY_ADDR_0X10,
- (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) |
- REG_0X10_DEBUG_MODE_SETTING);
-
- check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
-
- while (!(check_value & BIT(15))) {
- check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
- mdelay(1);
- if (count-- < 0)
- break;
- }
-
- if (!(check_value & BIT(15)))
- dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n",
- PHY_ADDR_0X30, check_value);
-
- /* Disable Debug mode by set 0x0D and 0x10 to default*/
- rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d);
- rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10);
-
- phy_cfg->do_toggle = false;
- }
-
- if (phy_cfg->check_rx_front_end_offset) {
- u16 rx_offset_code, rx_offset_range;
- u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK;
- u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK;
- bool do_update = false;
-
- rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F);
- if (((rx_offset_code & code_mask) == 0x0) ||
- ((rx_offset_code & code_mask) == code_mask))
- do_update = true;
-
- rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B);
- if (((rx_offset_range & range_mask) == range_mask) && do_update) {
- dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n",
- rx_offset_code, rx_offset_range);
- do_update = false;
- }
-
- if (do_update) {
- u16 tmp1, tmp2;
-
- tmp1 = rx_offset_range & (~range_mask);
- tmp2 = rx_offset_range & range_mask;
- tmp2 += (1 << 2);
- rx_offset_range = tmp1 | (tmp2 & range_mask);
- rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range);
- goto do_toggle;
- }
- }
-
- return 0;
-}
-
-static int rtk_phy_init(struct phy *phy)
-{
- struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
- int ret = 0;
- int i;
- unsigned long phy_init_time = jiffies;
-
- for (i = 0; i < rtk_phy->num_phy; i++)
- ret = do_rtk_phy_init(rtk_phy, i);
-
- dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n",
- jiffies_to_msecs(jiffies - phy_init_time));
-
- return ret;
-}
-
-static int rtk_phy_exit(struct phy *phy)
-{
- return 0;
-}
-
-static const struct phy_ops ops = {
- .init = rtk_phy_init,
- .exit = rtk_phy_exit,
- .owner = THIS_MODULE,
-};
-
-static void rtk_phy_toggle(struct usb_phy *usb3_phy, bool connect, int port)
-{
- int index = port;
- struct rtk_phy *rtk_phy = NULL;
-
- rtk_phy = dev_get_drvdata(usb3_phy->dev);
-
- if (index > rtk_phy->num_phy) {
- dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
- __func__, index, rtk_phy->num_phy);
- return;
- }
-
- do_rtk_usb3_phy_toggle(rtk_phy, index, connect);
-}
-
-static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
- u16 portstatus, u16 portchange)
-{
- bool connect = false;
-
- pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
- __func__, port, (int)portstatus, (int)portchange);
- if (portstatus & USB_PORT_STAT_CONNECTION)
- connect = true;
-
- if (portchange & USB_PORT_STAT_C_CONNECTION)
- rtk_phy_toggle(x, connect, port);
-
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static struct dentry *create_phy_debug_root(void)
-{
- struct dentry *phy_debug_root;
-
- phy_debug_root = debugfs_lookup("phy", usb_debug_root);
- if (!phy_debug_root)
- phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
-
- return phy_debug_root;
-}
-
-static int rtk_usb3_parameter_show(struct seq_file *s, void *unused)
-{
- struct rtk_phy *rtk_phy = s->private;
- struct phy_cfg *phy_cfg;
- int i, index;
-
- phy_cfg = rtk_phy->phy_cfg;
-
- seq_puts(s, "Property:\n");
- seq_printf(s, " check_efuse: %s\n",
- phy_cfg->check_efuse ? "Enable" : "Disable");
- seq_printf(s, " do_toggle: %s\n",
- phy_cfg->do_toggle ? "Enable" : "Disable");
- seq_printf(s, " do_toggle_once: %s\n",
- phy_cfg->do_toggle_once ? "Enable" : "Disable");
- seq_printf(s, " use_default_parameter: %s\n",
- phy_cfg->use_default_parameter ? "Enable" : "Disable");
-
- for (index = 0; index < rtk_phy->num_phy; index++) {
- struct phy_reg *phy_reg;
- struct phy_parameter *phy_parameter;
-
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- seq_printf(s, "PHY %d:\n", index);
-
- for (i = 0; i < phy_cfg->param_size; i++) {
- struct phy_data *phy_data = phy_cfg->param + i;
- u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i);
- u16 data = phy_data->data;
-
- if (!phy_data->addr && !data)
- seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n",
- addr, rtk_phy_read(phy_reg, addr));
- else
- seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n",
- addr, data, rtk_phy_read(phy_reg, addr));
- }
-
- seq_puts(s, "PHY Property:\n");
- seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n",
- (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim);
- seq_printf(s, " amplitude_control_coarse: 0x%x\n",
- (int)phy_parameter->amplitude_control_coarse);
- seq_printf(s, " amplitude_control_fine: 0x%x\n",
- (int)phy_parameter->amplitude_control_fine);
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter);
-
-static inline void create_debug_files(struct rtk_phy *rtk_phy)
-{
- struct dentry *phy_debug_root = NULL;
-
- phy_debug_root = create_phy_debug_root();
-
- if (!phy_debug_root)
- return;
-
- rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
-
- debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
- &rtk_usb3_parameter_fops);
-
- return;
-}
-
-static inline void remove_debug_files(struct rtk_phy *rtk_phy)
-{
- debugfs_remove_recursive(rtk_phy->debug_dir);
-}
-#else
-static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
-static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
-#endif /* CONFIG_DEBUG_FS */
-
-static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter, int index)
-{
- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
- u8 value = 0;
- struct nvmem_cell *cell;
-
- if (!phy_cfg->check_efuse)
- goto out;
-
- cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim");
- if (IS_ERR(cell)) {
- dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n",
- __func__, PTR_ERR(cell));
- } else {
- unsigned char *buf;
- size_t buf_size;
-
- buf = nvmem_cell_read(cell, &buf_size);
- if (!IS_ERR(buf)) {
- value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK;
- kfree(buf);
- }
- nvmem_cell_put(cell);
- }
-
- if (value > 0 && value < 0x8)
- phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8;
- else
- phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value;
-
-out:
- return 0;
-}
-
-static void update_amplitude_control_value(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter)
-{
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
-
- phy_reg = &phy_parameter->phy_reg;
- phy_cfg = rtk_phy->phy_cfg;
-
- if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) {
- u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK;
- u16 data;
-
- if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
- phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
- data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
- } else {
- data = phy_cfg->param[PHY_ADDR_0X20].data;
- }
-
- data &= (~val_mask);
- data |= (phy_parameter->amplitude_control_coarse & val_mask);
-
- phy_cfg->param[PHY_ADDR_0X20].data = data;
- }
-
- if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) {
- u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim;
- u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK;
- int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT;
- u16 data;
-
- if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
- phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
- data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
- } else {
- data = phy_cfg->param[PHY_ADDR_0X20].data;
- }
-
- data &= ~(val_mask << val_shift);
- data |= ((efuse_val & val_mask) << val_shift);
-
- phy_cfg->param[PHY_ADDR_0X20].data = data;
- }
-
- if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) {
- u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK;
-
- if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data)
- phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21;
-
- phy_cfg->param[PHY_ADDR_0X21].data =
- phy_parameter->amplitude_control_fine & val_mask;
- }
-}
-
-static int parse_phy_data(struct rtk_phy *rtk_phy)
-{
- struct device *dev = rtk_phy->dev;
- struct phy_parameter *phy_parameter;
- int ret = 0;
- int index;
-
- rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
- rtk_phy->num_phy, GFP_KERNEL);
- if (!rtk_phy->phy_parameter)
- return -ENOMEM;
-
- for (index = 0; index < rtk_phy->num_phy; index++) {
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
-
- phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index;
-
- /* Amplitude control address 0x20 bit 0 to bit 7 */
- if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning",
- &phy_parameter->amplitude_control_coarse))
- phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT;
-
- /* Amplitude control address 0x21 bit 0 to bit 16 */
- if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning",
- &phy_parameter->amplitude_control_fine))
- phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT;
-
- get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
-
- update_amplitude_control_value(rtk_phy, phy_parameter);
- }
-
- return ret;
-}
-
-static int rtk_usb3phy_probe(struct platform_device *pdev)
-{
- struct rtk_phy *rtk_phy;
- struct device *dev = &pdev->dev;
- struct phy *generic_phy;
- struct phy_provider *phy_provider;
- const struct phy_cfg *phy_cfg;
- int ret;
-
- phy_cfg = of_device_get_match_data(dev);
- if (!phy_cfg) {
- dev_err(dev, "phy config are not assigned!\n");
- return -EINVAL;
- }
-
- rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
- if (!rtk_phy)
- return -ENOMEM;
-
- rtk_phy->dev = &pdev->dev;
- rtk_phy->phy.dev = rtk_phy->dev;
- rtk_phy->phy.label = "rtk-usb3phy";
- rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
-
- rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
-
- memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
-
- rtk_phy->num_phy = 1;
-
- ret = parse_phy_data(rtk_phy);
- if (ret)
- goto err;
-
- platform_set_drvdata(pdev, rtk_phy);
-
- generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
- if (IS_ERR(generic_phy))
- return PTR_ERR(generic_phy);
-
- phy_set_drvdata(generic_phy, rtk_phy);
-
- phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- ret = usb_add_phy_dev(&rtk_phy->phy);
- if (ret)
- goto err;
-
- create_debug_files(rtk_phy);
-
-err:
- return ret;
-}
-
-static void rtk_usb3phy_remove(struct platform_device *pdev)
-{
- struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
-
- remove_debug_files(rtk_phy);
-
- usb_remove_phy(&rtk_phy->phy);
-}
-
-static const struct phy_cfg rtd1295_phy_cfg = {
- .param_size = MAX_USB_PHY_DATA_SIZE,
- .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046},
- [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779},
- [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3},
- [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00},
- [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c},
- [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904},
- [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c},
- [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000},
- [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00},
- [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81},
- [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000},
- [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000},
- [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004},
- [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00},
- [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f},
- [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807},
- [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa},
- [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66},
- [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000},
- [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6},
- [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080},
- [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078},
- [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff},
- [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, },
- .check_efuse = false,
- .do_toggle = true,
- .do_toggle_once = false,
- .use_default_parameter = false,
- .check_rx_front_end_offset = false,
-};
-
-static const struct phy_cfg rtd1619_phy_cfg = {
- .param_size = MAX_USB_PHY_DATA_SIZE,
- .param = { [8] = {0x08, 0x3591},
- [38] = {0x26, 0x840b},
- [40] = {0x28, 0xf842}, },
- .check_efuse = false,
- .do_toggle = true,
- .do_toggle_once = false,
- .use_default_parameter = false,
- .check_rx_front_end_offset = false,
-};
-
-static const struct phy_cfg rtd1319_phy_cfg = {
- .param_size = MAX_USB_PHY_DATA_SIZE,
- .param = { [1] = {0x01, 0xac86},
- [6] = {0x06, 0x0003},
- [9] = {0x09, 0x924c},
- [10] = {0x0a, 0xa608},
- [11] = {0x0b, 0xb905},
- [14] = {0x0e, 0x2010},
- [32] = {0x20, 0x705a},
- [33] = {0x21, 0xf645},
- [34] = {0x22, 0x0013},
- [35] = {0x23, 0xcb66},
- [41] = {0x29, 0xff00}, },
- .check_efuse = true,
- .do_toggle = true,
- .do_toggle_once = false,
- .use_default_parameter = false,
- .check_rx_front_end_offset = false,
-};
-
-static const struct phy_cfg rtd1619b_phy_cfg = {
- .param_size = MAX_USB_PHY_DATA_SIZE,
- .param = { [1] = {0x01, 0xac8c},
- [6] = {0x06, 0x0017},
- [9] = {0x09, 0x724c},
- [10] = {0x0a, 0xb610},
- [11] = {0x0b, 0xb90d},
- [13] = {0x0d, 0xef2a},
- [15] = {0x0f, 0x9050},
- [16] = {0x10, 0x000c},
- [32] = {0x20, 0x70ff},
- [34] = {0x22, 0x0013},
- [35] = {0x23, 0xdb66},
- [38] = {0x26, 0x8609},
- [41] = {0x29, 0xff13},
- [42] = {0x2a, 0x3070}, },
- .check_efuse = true,
- .do_toggle = false,
- .do_toggle_once = true,
- .use_default_parameter = false,
- .check_rx_front_end_offset = false,
-};
-
-static const struct phy_cfg rtd1319d_phy_cfg = {
- .param_size = MAX_USB_PHY_DATA_SIZE,
- .param = { [1] = {0x01, 0xac89},
- [4] = {0x04, 0xf2f5},
- [6] = {0x06, 0x0017},
- [9] = {0x09, 0x424c},
- [10] = {0x0a, 0x9610},
- [11] = {0x0b, 0x9901},
- [12] = {0x0c, 0xf000},
- [13] = {0x0d, 0xef2a},
- [14] = {0x0e, 0x1000},
- [15] = {0x0f, 0x9050},
- [32] = {0x20, 0x7077},
- [35] = {0x23, 0x0b62},
- [37] = {0x25, 0x10ec},
- [42] = {0x2a, 0x3070}, },
- .check_efuse = true,
- .do_toggle = false,
- .do_toggle_once = true,
- .use_default_parameter = false,
- .check_rx_front_end_offset = true,
-};
-
-static const struct of_device_id usbphy_rtk_dt_match[] = {
- { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg },
- { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg },
- { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg },
- { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg },
- { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg },
- {},
-};
-MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
-
-static struct platform_driver rtk_usb3phy_driver = {
- .probe = rtk_usb3phy_probe,
- .remove_new = rtk_usb3phy_remove,
- .driver = {
- .name = "rtk-usb3phy",
- .of_match_table = usbphy_rtk_dt_match,
- },
-};
-
-module_platform_driver(rtk_usb3phy_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform: rtk-usb3phy");
-MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
-MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index cd6ac04c1468..c3104714b480 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -964,33 +964,6 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ }
};
-static int amd_pmc_get_dram_size(struct amd_pmc_dev *dev)
-{
- int ret;
-
- switch (dev->cpu_id) {
- case AMD_CPU_ID_YC:
- if (!(dev->major > 90 || (dev->major == 90 && dev->minor > 39))) {
- ret = -EINVAL;
- goto err_dram_size;
- }
- break;
- default:
- ret = -EINVAL;
- goto err_dram_size;
- }
-
- ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
- if (ret || !dev->dram_size)
- goto err_dram_size;
-
- return 0;
-
-err_dram_size:
- dev_err(dev->dev, "DRAM size command not supported for this platform\n");
- return ret;
-}
-
static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
{
u32 phys_addr_low, phys_addr_hi;
@@ -1009,8 +982,8 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
return -EIO;
/* Get DRAM size */
- ret = amd_pmc_get_dram_size(dev);
- if (ret)
+ ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
+ if (ret || !dev->dram_size)
dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
/* Get STB DRAM address */
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index 5798b49ddaba..8c9f4f3227fc 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -588,17 +588,14 @@ static void release_attributes_data(void)
static int hp_add_other_attributes(int attr_type)
{
struct kobject *attr_name_kobj;
- union acpi_object *obj = NULL;
int ret;
char *attr_name;
- mutex_lock(&bioscfg_drv.mutex);
-
attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
- if (!attr_name_kobj) {
- ret = -ENOMEM;
- goto err_other_attr_init;
- }
+ if (!attr_name_kobj)
+ return -ENOMEM;
+
+ mutex_lock(&bioscfg_drv.mutex);
/* Check if attribute type is supported */
switch (attr_type) {
@@ -615,14 +612,14 @@ static int hp_add_other_attributes(int attr_type)
default:
pr_err("Error: Unknown attr_type: %d\n", attr_type);
ret = -EINVAL;
- goto err_other_attr_init;
+ kfree(attr_name_kobj);
+ goto unlock_drv_mutex;
}
ret = kobject_init_and_add(attr_name_kobj, &attr_name_ktype,
NULL, "%s", attr_name);
if (ret) {
pr_err("Error encountered [%d]\n", ret);
- kobject_put(attr_name_kobj);
goto err_other_attr_init;
}
@@ -630,27 +627,26 @@ static int hp_add_other_attributes(int attr_type)
switch (attr_type) {
case HPWMI_SECURE_PLATFORM_TYPE:
ret = hp_populate_secure_platform_data(attr_name_kobj);
- if (ret)
- goto err_other_attr_init;
break;
case HPWMI_SURE_START_TYPE:
ret = hp_populate_sure_start_data(attr_name_kobj);
- if (ret)
- goto err_other_attr_init;
break;
default:
ret = -EINVAL;
- goto err_other_attr_init;
}
+ if (ret)
+ goto err_other_attr_init;
+
mutex_unlock(&bioscfg_drv.mutex);
return 0;
err_other_attr_init:
+ kobject_put(attr_name_kobj);
+unlock_drv_mutex:
mutex_unlock(&bioscfg_drv.mutex);
- kfree(obj);
return ret;
}
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index ac037540acfc..88eefccb6ed2 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1425,18 +1425,17 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv)
if (WARN_ON(priv->kbd_bl.initialized))
return -EEXIST;
- brightness = ideapad_kbd_bl_brightness_get(priv);
- if (brightness < 0)
- return brightness;
-
- priv->kbd_bl.last_brightness = brightness;
-
if (ideapad_kbd_bl_check_tristate(priv->kbd_bl.type)) {
priv->kbd_bl.led.max_brightness = 2;
} else {
priv->kbd_bl.led.max_brightness = 1;
}
+ brightness = ideapad_kbd_bl_brightness_get(priv);
+ if (brightness < 0)
+ return brightness;
+
+ priv->kbd_bl.last_brightness = brightness;
priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get;
priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
diff --git a/drivers/platform/x86/intel/telemetry/core.c b/drivers/platform/x86/intel/telemetry/core.c
index fdf55b5d6948..e4be40f73eeb 100644
--- a/drivers/platform/x86/intel/telemetry/core.c
+++ b/drivers/platform/x86/intel/telemetry/core.c
@@ -102,7 +102,7 @@ static const struct telemetry_core_ops telm_defpltops = {
/**
* telemetry_update_events() - Update telemetry Configuration
* @pss_evtconfig: PSS related config. No change if num_evts = 0.
- * @pss_evtconfig: IOSS related config. No change if num_evts = 0.
+ * @ioss_evtconfig: IOSS related config. No change if num_evts = 0.
*
* This API updates the IOSS & PSS Telemetry configuration. Old config
* is overwritten. Call telemetry_reset_events when logging is over
@@ -176,7 +176,7 @@ EXPORT_SYMBOL_GPL(telemetry_reset_events);
/**
* telemetry_get_eventconfig() - Returns the pss and ioss events enabled
* @pss_evtconfig: Pointer to PSS related configuration.
- * @pss_evtconfig: Pointer to IOSS related configuration.
+ * @ioss_evtconfig: Pointer to IOSS related configuration.
* @pss_len: Number of u32 elements allocated for pss_evtconfig array
* @ioss_len: Number of u32 elements allocated for ioss_evtconfig array
*
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 3f7a74788802..7513018c9f9a 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -572,7 +572,8 @@ ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags,
for (i = 0; i < cnt; i++) {
event[i] = queue->buf[queue->head];
- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
+ /* Paired with READ_ONCE() in queue_cnt() */
+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
}
spin_unlock_irqrestore(&queue->lock, flags);
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 3134568af622..15b804ba4868 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -57,10 +57,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
dst->t.sec = seconds;
dst->t.nsec = remainder;
+ /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
if (!queue_free(queue))
- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
- queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
+ WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
spin_unlock_irqrestore(&queue->lock, flags);
}
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 35fde0a05746..45f9002a5dca 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -85,9 +85,13 @@ struct ptp_vclock {
* that a writer might concurrently increment the tail does not
* matter, since the queue remains nonempty nonetheless.
*/
-static inline int queue_cnt(struct timestamp_event_queue *q)
+static inline int queue_cnt(const struct timestamp_event_queue *q)
{
- int cnt = q->tail - q->head;
+ /*
+ * Paired with WRITE_ONCE() in enqueue_external_timestamp(),
+ * ptp_read(), extts_fifo_show().
+ */
+ int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
}
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 7d023d9d0acb..f7a499a1bd39 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -94,7 +94,8 @@ static ssize_t extts_fifo_show(struct device *dev,
qcnt = queue_cnt(queue);
if (qcnt) {
event = queue->buf[queue->head];
- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
+ /* Paired with READ_ONCE() in queue_cnt() */
+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
}
spin_unlock_irqrestore(&queue->lock, flags);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d440319a7945..833cfab7d877 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -676,18 +676,20 @@ static void dasd_profile_start(struct dasd_block *block,
* we count each request only once.
*/
device = cqr->startdev;
- if (device->profile.data) {
- counter = 1; /* request is not yet queued on the start device */
- list_for_each(l, &device->ccw_queue)
- if (++counter >= 31)
- break;
- }
+ if (!device->profile.data)
+ return;
+
+ spin_lock(get_ccwdev_lock(device->cdev));
+ counter = 1; /* request is not yet queued on the start device */
+ list_for_each(l, &device->ccw_queue)
+ if (++counter >= 31)
+ break;
+ spin_unlock(get_ccwdev_lock(device->cdev));
+
spin_lock(&device->profile.lock);
- if (device->profile.data) {
- device->profile.data->dasd_io_nr_req[counter]++;
- if (rq_data_dir(req) == READ)
- device->profile.data->dasd_read_nr_req[counter]++;
- }
+ device->profile.data->dasd_io_nr_req[counter]++;
+ if (rq_data_dir(req) == READ)
+ device->profile.data->dasd_read_nr_req[counter]++;
spin_unlock(&device->profile.lock);
}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 2e663131adaf..1b1b8a41c4d4 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -283,7 +283,7 @@ struct dasd_pprc_dev_info {
__u8 secondary; /* 7 Secondary device address */
__u16 pprc_id; /* 8-9 Peer-to-Peer Remote Copy ID */
__u8 reserved2[12]; /* 10-21 reserved */
- __u16 prim_cu_ssid; /* 22-23 Pimary Control Unit SSID */
+ __u16 prim_cu_ssid; /* 22-23 Primary Control Unit SSID */
__u8 reserved3[12]; /* 24-35 reserved */
__u16 sec_cu_ssid; /* 36-37 Secondary Control Unit SSID */
__u8 reserved4[90]; /* 38-127 reserved */
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 4902d45e929c..c61e6427384c 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -103,10 +103,11 @@ config CCWGROUP
config ISM
tristate "Support for ISM vPCI Adapter"
depends on PCI
+ imply SMC
default n
help
Select this option if you want to use the Internal Shared Memory
- vPCI Adapter.
+ vPCI Adapter. The adapter can be used with the SMC network protocol.
To compile as a module choose M. The module name is ism.
If unsure, choose N.
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 6df7f377d2f9..81aabbfbbe2c 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -30,7 +30,6 @@ static const struct pci_device_id ism_device_table[] = {
MODULE_DEVICE_TABLE(pci, ism_device_table);
static debug_info_t *ism_debug_info;
-static const struct smcd_ops ism_ops;
#define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
@@ -289,22 +288,6 @@ out:
return ret;
}
-static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
- u32 vid)
-{
- union ism_query_rgid cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.request.hdr.cmd = ISM_QUERY_RGID;
- cmd.request.hdr.len = sizeof(cmd.request);
-
- cmd.request.rgid = rgid;
- cmd.request.vlan_valid = vid_valid;
- cmd.request.vlan_id = vid;
-
- return ism_cmd(ism, &cmd);
-}
-
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
@@ -429,23 +412,6 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
-static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
- u32 event_code, u64 info)
-{
- union ism_sig_ieq cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
- cmd.request.hdr.len = sizeof(cmd.request);
-
- cmd.request.rgid = rgid;
- cmd.request.trigger_irq = trigger_irq;
- cmd.request.event_code = event_code;
- cmd.request.info = info;
-
- return ism_cmd(ism, &cmd);
-}
-
static unsigned int max_bytes(unsigned int start, unsigned int len,
unsigned int boundary)
{
@@ -503,14 +469,6 @@ u8 *ism_get_seid(void)
}
EXPORT_SYMBOL_GPL(ism_get_seid);
-static u16 ism_get_chid(struct ism_dev *ism)
-{
- if (!ism || !ism->pdev)
- return 0;
-
- return to_zpci(ism->pdev)->pchid;
-}
-
static void ism_handle_event(struct ism_dev *ism)
{
struct ism_event *entry;
@@ -569,11 +527,6 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static u64 ism_get_local_gid(struct ism_dev *ism)
-{
- return ism->local_gid;
-}
-
static int ism_dev_init(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
@@ -774,6 +727,22 @@ module_exit(ism_exit);
/*************************** SMC-D Implementation *****************************/
#if IS_ENABLED(CONFIG_SMC)
+static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
+ u32 vid)
+{
+ union ism_query_rgid cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_QUERY_RGID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.rgid = rgid;
+ cmd.request.vlan_valid = vid_valid;
+ cmd.request.vlan_id = vid;
+
+ return ism_cmd(ism, &cmd);
+}
+
static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
u32 vid)
{
@@ -811,6 +780,23 @@ static int smcd_reset_vlan_required(struct smcd_dev *smcd)
return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
}
+static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info)
+{
+ union ism_sig_ieq cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.rgid = rgid;
+ cmd.request.trigger_irq = trigger_irq;
+ cmd.request.event_code = event_code;
+ cmd.request.info = info;
+
+ return ism_cmd(ism, &cmd);
+}
+
static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
u32 event_code, u64 info)
{
@@ -830,11 +816,24 @@ static int smcd_supports_v2(void)
SYSTEM_EID.type[0] != '0';
}
+static u64 ism_get_local_gid(struct ism_dev *ism)
+{
+ return ism->local_gid;
+}
+
static u64 smcd_get_local_gid(struct smcd_dev *smcd)
{
return ism_get_local_gid(smcd->priv);
}
+static u16 ism_get_chid(struct ism_dev *ism)
+{
+ if (!ism || !ism->pdev)
+ return 0;
+
+ return to_zpci(ism->pdev)->pchid;
+}
+
static u16 smcd_get_chid(struct smcd_dev *smcd)
{
return ism_get_chid(smcd->priv);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 32d1e73e46ee..03348f605c2e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1837,8 +1837,16 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
- sp->done(sp, res);
+ switch (sp->type) {
+ case SRB_SCSI_CMD:
+ if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
+ sp->done(sp, res);
+ break;
+ default:
+ if (ret_cmd)
+ sp->done(sp, res);
+ break;
+ }
} else {
sp->done(sp, res);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 67922e2c4c19..6d8218a44122 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1019,7 +1019,7 @@ static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
struct sdebug_err_inject *inject;
struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
- buf = kmalloc(count, GFP_KERNEL);
+ buf = kzalloc(count + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1132,7 +1132,6 @@ static const struct file_operations sdebug_target_reset_fail_fops = {
static int sdebug_target_alloc(struct scsi_target *starget)
{
struct sdebug_target_info *targetip;
- struct dentry *dentry;
targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
if (!targetip)
@@ -1140,15 +1139,9 @@ static int sdebug_target_alloc(struct scsi_target *starget)
targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
sdebug_debugfs_root);
- if (IS_ERR_OR_NULL(targetip->debugfs_entry))
- pr_info("%s: failed to create debugfs directory for target %s\n",
- __func__, dev_name(&starget->dev));
debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
&sdebug_target_reset_fail_fops);
- if (IS_ERR_OR_NULL(dentry))
- pr_info("%s: failed to create fail_reset file for target %s\n",
- __func__, dev_name(&starget->dev));
starget->hostdata = targetip;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 530918cbfce2..fa00dd503cbf 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1643,24 +1643,21 @@ out:
return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
}
-static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
+static int sd_sync_cache(struct scsi_disk *sdkp)
{
int retries, res;
struct scsi_device *sdp = sdkp->device;
const int timeout = sdp->request_queue->rq_timeout
* SD_FLUSH_TIMEOUT_MULTIPLIER;
- struct scsi_sense_hdr my_sshdr;
+ struct scsi_sense_hdr sshdr;
const struct scsi_exec_args exec_args = {
.req_flags = BLK_MQ_REQ_PM,
- /* caller might not be interested in sense, but we need it */
- .sshdr = sshdr ? : &my_sshdr,
+ .sshdr = &sshdr,
};
if (!scsi_device_online(sdp))
return -ENODEV;
- sshdr = exec_args.sshdr;
-
for (retries = 3; retries > 0; --retries) {
unsigned char cmd[16] = { 0 };
@@ -1685,15 +1682,23 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
return res;
if (scsi_status_is_check_condition(res) &&
- scsi_sense_valid(sshdr)) {
- sd_print_sense_hdr(sdkp, sshdr);
+ scsi_sense_valid(&sshdr)) {
+ sd_print_sense_hdr(sdkp, &sshdr);
/* we need to evaluate the error return */
- if (sshdr->asc == 0x3a || /* medium not present */
- sshdr->asc == 0x20 || /* invalid command */
- (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */
+ if (sshdr.asc == 0x3a || /* medium not present */
+ sshdr.asc == 0x20 || /* invalid command */
+ (sshdr.asc == 0x74 && sshdr.ascq == 0x71)) /* drive is password locked */
/* this is no error here */
return 0;
+ /*
+ * This drive doesn't support sync and there's not much
+ * we can do because this is called during shutdown
+ * or suspend so just return success so those operations
+ * can proceed.
+ */
+ if (sshdr.sense_key == ILLEGAL_REQUEST)
+ return 0;
}
switch (host_byte(res)) {
@@ -3853,7 +3858,7 @@ static void sd_shutdown(struct device *dev)
if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
- sd_sync_cache(sdkp, NULL);
+ sd_sync_cache(sdkp);
}
if ((system_state != SYSTEM_RESTART &&
@@ -3874,7 +3879,6 @@ static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
static int sd_suspend_common(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
- struct scsi_sense_hdr sshdr;
int ret = 0;
if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
@@ -3883,24 +3887,13 @@ static int sd_suspend_common(struct device *dev, bool runtime)
if (sdkp->WCE && sdkp->media_present) {
if (!sdkp->device->silence_suspend)
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
- ret = sd_sync_cache(sdkp, &sshdr);
-
- if (ret) {
- /* ignore OFFLINE device */
- if (ret == -ENODEV)
- return 0;
-
- if (!scsi_sense_valid(&sshdr) ||
- sshdr.sense_key != ILLEGAL_REQUEST)
- return ret;
+ ret = sd_sync_cache(sdkp);
+ /* ignore OFFLINE device */
+ if (ret == -ENODEV)
+ return 0;
- /*
- * sshdr.sense_key == ILLEGAL_REQUEST means this drive
- * doesn't support sync. There's not much to do and
- * suspend shouldn't fail.
- */
- ret = 0;
- }
+ if (ret)
+ return ret;
}
if (sd_do_start_stop(sdkp->device, runtime)) {
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index b3634e10f6f5..c954001ae79e 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -86,6 +86,7 @@ config QCOM_PMIC_GLINK
depends on OF
select AUXILIARY_BUS
select QCOM_PDR_HELPERS
+ select DRM_AUX_HPD_BRIDGE
help
The Qualcomm PMIC GLINK driver provides access, over GLINK, to the
USB and battery firmware running on one of the coprocessors in
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index b78279e2f54c..053b7393e26a 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -11,7 +11,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/soc/qcom/pdr.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
@@ -76,7 +76,7 @@ struct pmic_glink_altmode_port {
struct work_struct work;
- struct drm_bridge bridge;
+ struct device *bridge;
enum typec_orientation orientation;
u16 svid;
@@ -230,10 +230,10 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
else
pmic_glink_altmode_enable_usb(altmode, alt_port);
- if (alt_port->hpd_state)
- drm_bridge_hpd_notify(&alt_port->bridge, connector_status_connected);
- else
- drm_bridge_hpd_notify(&alt_port->bridge, connector_status_disconnected);
+ drm_aux_hpd_bridge_notify(alt_port->bridge,
+ alt_port->hpd_state ?
+ connector_status_connected :
+ connector_status_disconnected);
pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index);
};
@@ -365,16 +365,6 @@ static void pmic_glink_altmode_callback(const void *data, size_t len, void *priv
}
}
-static int pmic_glink_altmode_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
-}
-
-static const struct drm_bridge_funcs pmic_glink_altmode_bridge_funcs = {
- .attach = pmic_glink_altmode_attach,
-};
-
static void pmic_glink_altmode_put_retimer(void *data)
{
typec_retimer_put(data);
@@ -464,15 +454,10 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->index = port;
INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
- alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
- alt_port->bridge.of_node = to_of_node(fwnode);
- alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
- alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
-
- ret = devm_drm_bridge_add(dev, &alt_port->bridge);
- if (ret) {
+ alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode));
+ if (IS_ERR(alt_port->bridge)) {
fwnode_handle_put(fwnode);
- return ret;
+ return PTR_ERR(alt_port->bridge);
}
alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 79bcd5bd4938..04c1b32a22c5 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -663,17 +663,54 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
return ret;
}
-static struct fb_ops lynxfb_ops = {
+static const struct fb_ops lynxfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = lynxfb_ops_check_var,
.fb_set_par = lynxfb_ops_set_par,
.fb_setcolreg = lynxfb_ops_setcolreg,
.fb_blank = lynxfb_ops_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_imageblit = cfb_imageblit,
- .fb_copyarea = cfb_copyarea,
- /* cursor */
+ .fb_pan_display = lynxfb_ops_pan_display,
+};
+
+static const struct fb_ops lynxfb_ops_with_cursor = {
+ .owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
+ .fb_check_var = lynxfb_ops_check_var,
+ .fb_set_par = lynxfb_ops_set_par,
+ .fb_setcolreg = lynxfb_ops_setcolreg,
+ .fb_blank = lynxfb_ops_blank,
+ .fb_pan_display = lynxfb_ops_pan_display,
+ .fb_cursor = lynxfb_ops_cursor,
+};
+
+static const struct fb_ops lynxfb_ops_accel = {
+ .owner = THIS_MODULE,
+ __FB_DEFAULT_IOMEM_OPS_RDWR,
+ .fb_check_var = lynxfb_ops_check_var,
+ .fb_set_par = lynxfb_ops_set_par,
+ .fb_setcolreg = lynxfb_ops_setcolreg,
+ .fb_blank = lynxfb_ops_blank,
+ .fb_pan_display = lynxfb_ops_pan_display,
+ .fb_fillrect = lynxfb_ops_fillrect,
+ .fb_copyarea = lynxfb_ops_copyarea,
+ .fb_imageblit = lynxfb_ops_imageblit,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
+};
+
+static const struct fb_ops lynxfb_ops_accel_with_cursor = {
+ .owner = THIS_MODULE,
+ __FB_DEFAULT_IOMEM_OPS_RDWR,
+ .fb_check_var = lynxfb_ops_check_var,
+ .fb_set_par = lynxfb_ops_set_par,
+ .fb_setcolreg = lynxfb_ops_setcolreg,
+ .fb_blank = lynxfb_ops_blank,
+ .fb_pan_display = lynxfb_ops_pan_display,
+ .fb_fillrect = lynxfb_ops_fillrect,
+ .fb_copyarea = lynxfb_ops_copyarea,
+ .fb_imageblit = lynxfb_ops_imageblit,
.fb_cursor = lynxfb_ops_cursor,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
static int lynxfb_set_fbinfo(struct fb_info *info, int index)
@@ -714,7 +751,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
par->index = index;
output->channel = &crtc->channel;
sm750fb_set_drv(par);
- lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display;
/*
* set current cursor variable and proc pointer,
@@ -731,19 +767,22 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
crtc->cursor.vstart = sm750_dev->pvMem + crtc->cursor.offset;
memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
- if (!g_hwcursor) {
- lynxfb_ops.fb_cursor = NULL;
+ if (!g_hwcursor)
sm750_hw_cursor_disable(&crtc->cursor);
- }
/* set info->fbops, must be set before fb_find_mode */
if (!sm750_dev->accel_off) {
/* use 2d acceleration */
- lynxfb_ops.fb_fillrect = lynxfb_ops_fillrect;
- lynxfb_ops.fb_copyarea = lynxfb_ops_copyarea;
- lynxfb_ops.fb_imageblit = lynxfb_ops_imageblit;
+ if (!g_hwcursor)
+ info->fbops = &lynxfb_ops_accel;
+ else
+ info->fbops = &lynxfb_ops_accel_with_cursor;
+ } else {
+ if (!g_hwcursor)
+ info->fbops = &lynxfb_ops;
+ else
+ info->fbops = &lynxfb_ops_with_cursor;
}
- info->fbops = &lynxfb_ops;
if (!g_fbmode[index]) {
g_fbmode[index] = g_def_fbmode;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 1e15ffa79295..44e9b09de47a 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1143,7 +1143,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
* Only set bonding if the link was not already bonded. This
* avoids the lane adapter to re-enter bonding state.
*/
- if (width == TB_LINK_WIDTH_SINGLE) {
+ if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) {
ret = tb_port_set_lane_bonding(port, true);
if (ret)
goto err_lane1;
@@ -2880,6 +2880,7 @@ static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
}
+/* Note updating sw->link_width done in tb_switch_update_link_attributes() */
static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
{
struct tb_port *up, *down, *port;
@@ -2919,10 +2920,10 @@ static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
return ret;
}
- sw->link_width = width;
return 0;
}
+/* Note updating sw->link_width done in tb_switch_update_link_attributes() */
static int tb_switch_asym_disable(struct tb_switch *sw)
{
struct tb_port *up, *down;
@@ -2957,7 +2958,6 @@ static int tb_switch_asym_disable(struct tb_switch *sw)
return ret;
}
- sw->link_width = TB_LINK_WIDTH_DUAL;
return 0;
}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 5acdeb766860..fd49f86e0353 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -213,7 +213,17 @@ static void tb_add_dp_resources(struct tb_switch *sw)
if (!tb_switch_query_dp_resource(sw, port))
continue;
- list_add(&port->list, &tcm->dp_resources);
+ /*
+ * If DP IN on device router exist, position it at the
+ * beginning of the DP resources list, so that it is used
+ * before DP IN of the host router. This way external GPU(s)
+ * will be prioritized when pairing DP IN to a DP OUT.
+ */
+ if (tb_route(sw))
+ list_add(&port->list, &tcm->dp_resources);
+ else
+ list_add_tail(&port->list, &tcm->dp_resources);
+
tb_port_dbg(port, "DP IN resource available\n");
}
}
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 2ba8ec254dce..0787456c2b89 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -436,7 +436,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
- hwq->max_entries = hba->nutrs;
+ hwq->max_entries = hba->nutrs + 1;
spin_lock_init(&hwq->sq_lock);
spin_lock_init(&hwq->cq_lock);
mutex_init(&hwq->sq_mutex);
@@ -630,6 +630,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
+ unsigned long flags;
int err = FAILED;
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
@@ -670,8 +671,10 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
}
err = SUCCESS;
+ spin_lock_irqsave(&hwq->cq_lock, flags);
if (ufshcd_cmd_inflight(lrbp->cmd))
ufshcd_release_scsi_cmd(hba, lrbp);
+ spin_unlock_irqrestore(&hwq->cq_lock, flags);
out:
return err;
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index af981778382d..02f297f5637d 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1529,6 +1529,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
unsigned long flags;
int counter = 0;
+ local_bh_disable();
spin_lock_irqsave(&pdev->lock, flags);
if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
@@ -1541,6 +1542,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
cdnsp_died(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
+ local_bh_enable();
return IRQ_HANDLED;
}
@@ -1557,6 +1559,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
spin_unlock_irqrestore(&pdev->lock, flags);
+ local_bh_enable();
return IRQ_HANDLED;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b19e38d5fd10..7f8d33f92ddb 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1047,7 +1047,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_notice(ddev, "descriptor type invalid, skip\n");
- continue;
+ goto skip_to_next_descriptor;
}
switch (cap_type) {
@@ -1078,6 +1078,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
break;
}
+skip_to_next_descriptor:
total_len -= length;
buffer += length;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b4584a0cd484..87480a6e6d93 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -622,29 +622,6 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
ret = 0;
}
mutex_unlock(&hub->status_mutex);
-
- /*
- * There is no need to lock status_mutex here, because status_mutex
- * protects hub->status, and the phy driver only checks the port
- * status without changing the status.
- */
- if (!ret) {
- struct usb_device *hdev = hub->hdev;
-
- /*
- * Only roothub will be notified of port state changes,
- * since the USB PHY only cares about changes at the next
- * level.
- */
- if (is_root_hub(hdev)) {
- struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
-
- if (hcd->usb_phy)
- usb_phy_notify_port_status(hcd->usb_phy,
- port1 - 1, *status, *change);
- }
- }
-
return ret;
}
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 0144ca8350c3..5c7538d498dd 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -2015,15 +2015,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
{
struct dwc2_qtd *qtd;
struct dwc2_host_chan *chan;
- u32 hcint, hcintmsk;
+ u32 hcint, hcintraw, hcintmsk;
chan = hsotg->hc_ptr_array[chnum];
- hcint = dwc2_readl(hsotg, HCINT(chnum));
+ hcintraw = dwc2_readl(hsotg, HCINT(chnum));
hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
+ hcint = hcintraw & hcintmsk;
+ dwc2_writel(hsotg, hcint, HCINT(chnum));
+
if (!chan) {
dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
- dwc2_writel(hsotg, hcint, HCINT(chnum));
return;
}
@@ -2032,11 +2034,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
chnum);
dev_vdbg(hsotg->dev,
" hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
- hcint, hcintmsk, hcint & hcintmsk);
+ hcintraw, hcintmsk, hcint);
}
- dwc2_writel(hsotg, hcint, HCINT(chnum));
-
/*
* If we got an interrupt after someone called
* dwc2_hcd_endpoint_disable() we don't want to crash below
@@ -2046,8 +2046,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
return;
}
- chan->hcint = hcint;
- hcint &= hcintmsk;
+ chan->hcint = hcintraw;
/*
* If the channel was halted due to a dequeue, the qtd list might
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 0328c86ef806..b101dbf8c5dc 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -2034,6 +2034,8 @@ static int dwc3_probe(struct platform_device *pdev)
pm_runtime_put(dev);
+ dma_set_max_seg_size(dev, UINT_MAX);
+
return 0;
err_exit_debugfs:
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 039bf241769a..57ddd2e43022 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -505,6 +505,7 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
mode = DWC3_GCTL_PRTCAP_DEVICE;
}
+ dwc3_set_mode(dwc, mode);
dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
dwc3_role_switch.set = dwc3_usb_role_switch_set;
@@ -526,7 +527,6 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
}
}
- dwc3_set_mode(dwc, mode);
return 0;
}
#else
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 3de43df6bbe8..fdf6d5d3c2ad 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -546,10 +546,9 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
pdata ? pdata->hs_phy_irq_index : -1);
if (irq > 0) {
/* Keep wakeup interrupts disabled until suspend */
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"qcom_dwc3 HS", qcom);
if (ret) {
dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
@@ -561,10 +560,9 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq = dwc3_qcom_get_irq(pdev, "dp_hs_phy_irq",
pdata ? pdata->dp_hs_phy_irq_index : -1);
if (irq > 0) {
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"qcom_dwc3 DP_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
@@ -576,10 +574,9 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq = dwc3_qcom_get_irq(pdev, "dm_hs_phy_irq",
pdata ? pdata->dm_hs_phy_irq_index : -1);
if (irq > 0) {
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"qcom_dwc3 DM_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
@@ -591,10 +588,9 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq = dwc3_qcom_get_irq(pdev, "ss_phy_irq",
pdata ? pdata->ss_phy_irq_index : -1);
if (irq > 0) {
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"qcom_dwc3 SS", qcom);
if (ret) {
dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
@@ -758,6 +754,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
if (!qcom->dwc3) {
ret = -ENODEV;
dev_err(dev, "failed to get dwc3 platform device\n");
+ of_platform_depopulate(dev);
}
node_put:
@@ -766,9 +763,9 @@ node_put:
return ret;
}
-static struct platform_device *
-dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
{
+ struct platform_device *urs_usb = NULL;
struct fwnode_handle *fwh;
struct acpi_device *adev;
char name[8];
@@ -788,9 +785,26 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
adev = to_acpi_device_node(fwh);
if (!adev)
- return NULL;
+ goto err_put_handle;
+
+ urs_usb = acpi_create_platform_device(adev, NULL);
+ if (IS_ERR_OR_NULL(urs_usb))
+ goto err_put_handle;
+
+ return urs_usb;
+
+err_put_handle:
+ fwnode_handle_put(fwh);
+
+ return urs_usb;
+}
- return acpi_create_platform_device(adev, NULL);
+static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
+{
+ struct fwnode_handle *fwh = urs_usb->dev.fwnode;
+
+ platform_device_unregister(urs_usb);
+ fwnode_handle_put(fwh);
}
static int dwc3_qcom_probe(struct platform_device *pdev)
@@ -874,13 +888,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
if (IS_ERR(qcom->qscratch_base)) {
ret = PTR_ERR(qcom->qscratch_base);
- goto clk_disable;
+ goto free_urs;
}
ret = dwc3_qcom_setup_irq(pdev);
if (ret) {
dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
- goto clk_disable;
+ goto free_urs;
}
/*
@@ -899,7 +913,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
- goto depopulate;
+ goto free_urs;
}
ret = dwc3_qcom_interconnect_init(qcom);
@@ -931,10 +945,16 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
interconnect_exit:
dwc3_qcom_interconnect_exit(qcom);
depopulate:
- if (np)
+ if (np) {
of_platform_depopulate(&pdev->dev);
- else
- platform_device_put(pdev);
+ } else {
+ device_remove_software_node(&qcom->dwc3->dev);
+ platform_device_del(qcom->dwc3);
+ }
+ platform_device_put(qcom->dwc3);
+free_urs:
+ if (qcom->urs_usb)
+ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
clk_disable:
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
@@ -953,11 +973,16 @@ static void dwc3_qcom_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int i;
- device_remove_software_node(&qcom->dwc3->dev);
- if (np)
+ if (np) {
of_platform_depopulate(&pdev->dev);
- else
- platform_device_put(pdev);
+ } else {
+ device_remove_software_node(&qcom->dwc3->dev);
+ platform_device_del(qcom->dwc3);
+ }
+ platform_device_put(qcom->dwc3);
+
+ if (qcom->urs_usb)
+ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
diff --git a/drivers/usb/dwc3/dwc3-rtk.c b/drivers/usb/dwc3/dwc3-rtk.c
index 590028e8fdcb..3cd6b184551c 100644
--- a/drivers/usb/dwc3/dwc3-rtk.c
+++ b/drivers/usb/dwc3/dwc3-rtk.c
@@ -183,10 +183,13 @@ static enum usb_device_speed __get_dwc3_maximum_speed(struct device_node *np)
ret = of_property_read_string(dwc3_np, "maximum-speed", &maximum_speed);
if (ret < 0)
- return USB_SPEED_UNKNOWN;
+ goto out;
ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
+out:
+ of_node_put(dwc3_np);
+
return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
}
@@ -339,6 +342,9 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
switch_usb2_role(rtk, rtk->cur_role);
+ platform_device_put(dwc3_pdev);
+ of_node_put(dwc3_node);
+
return 0;
err_pdev_put:
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 5b3cd455adec..61f3f8bbdcea 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -650,9 +650,8 @@ static int check_isoc_ss_overlap(struct mu3h_sch_ep_info *sch_ep, u32 offset)
if (sch_ep->ep_type == ISOC_OUT_EP) {
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
- k = XHCI_MTK_BW_INDEX(base + j + CS_OFFSET);
- /* use cs to indicate existence of in-ss @(base+j) */
- if (tt->fs_bus_bw_in[k])
+ k = XHCI_MTK_BW_INDEX(base + j);
+ if (tt->in_ss_cnt[k])
return -ESCH_SS_OVERLAP;
}
} else if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) {
@@ -769,6 +768,14 @@ static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
tt->fs_frame_bw[f] -= (u16)sch_ep->bw_budget_table[j];
}
}
+
+ if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) {
+ k = XHCI_MTK_BW_INDEX(base);
+ if (used)
+ tt->in_ss_cnt[k]++;
+ else
+ tt->in_ss_cnt[k]--;
+ }
}
if (used)
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 865b55e23b15..39f7ae7d3087 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -38,6 +38,7 @@
* @fs_bus_bw_in: save bandwidth used by FS/LS IN eps in each uframes
* @ls_bus_bw: save bandwidth used by LS eps in each uframes
* @fs_frame_bw: save bandwidth used by FS/LS eps in each FS frames
+ * @in_ss_cnt: the count of Start-Split for IN eps
* @ep_list: Endpoints using this TT
*/
struct mu3h_sch_tt {
@@ -45,6 +46,7 @@ struct mu3h_sch_tt {
u16 fs_bus_bw_in[XHCI_MTK_MAX_ESIT];
u8 ls_bus_bw[XHCI_MTK_MAX_ESIT];
u16 fs_frame_bw[XHCI_MTK_FRAMES_CNT];
+ u8 in_ss_cnt[XHCI_MTK_MAX_ESIT];
struct list_head ep_list;
};
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index b93161374293..732cdeb73920 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
#include <linux/slab.h>
@@ -148,7 +149,7 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
int ret;
int irq;
struct xhci_plat_priv *priv = NULL;
-
+ bool of_match;
if (usb_disabled())
return -ENODEV;
@@ -253,16 +254,23 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
&xhci->imod_interval);
}
- hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
- if (IS_ERR(hcd->usb_phy)) {
- ret = PTR_ERR(hcd->usb_phy);
- if (ret == -EPROBE_DEFER)
- goto disable_clk;
- hcd->usb_phy = NULL;
- } else {
- ret = usb_phy_init(hcd->usb_phy);
- if (ret)
- goto disable_clk;
+ /*
+ * Drivers such as dwc3 manages PHYs themself (and rely on driver name
+ * matching for the xhci platform device).
+ */
+ of_match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
+ if (of_match) {
+ hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
+ if (IS_ERR(hcd->usb_phy)) {
+ ret = PTR_ERR(hcd->usb_phy);
+ if (ret == -EPROBE_DEFER)
+ goto disable_clk;
+ hcd->usb_phy = NULL;
+ } else {
+ ret = usb_phy_init(hcd->usb_phy);
+ if (ret)
+ goto disable_clk;
+ }
}
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
@@ -285,15 +293,17 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
goto dealloc_usb2_hcd;
}
- xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
- "usb-phy", 1);
- if (IS_ERR(xhci->shared_hcd->usb_phy)) {
- xhci->shared_hcd->usb_phy = NULL;
- } else {
- ret = usb_phy_init(xhci->shared_hcd->usb_phy);
- if (ret)
- dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
- __func__, ret);
+ if (of_match) {
+ xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
+ "usb-phy", 1);
+ if (IS_ERR(xhci->shared_hcd->usb_phy)) {
+ xhci->shared_hcd->usb_phy = NULL;
+ } else {
+ ret = usb_phy_init(xhci->shared_hcd->usb_phy);
+ if (ret)
+ dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
+ __func__, ret);
+ }
}
xhci->shared_hcd->tpl_support = hcd->tpl_support;
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index a341b2fbb7b4..2b45404e9732 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -432,6 +432,8 @@ static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2744) }, /* USB5744 USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x5744) }, /* USB5744 USB 3.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index c4e24a7b9290..292110e64a1d 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -16,6 +16,11 @@ static const struct onboard_hub_pdata microchip_usb424_data = {
.num_supplies = 1,
};
+static const struct onboard_hub_pdata microchip_usb5744_data = {
+ .reset_us = 0,
+ .num_supplies = 2,
+};
+
static const struct onboard_hub_pdata realtek_rts5411_data = {
.reset_us = 0,
.num_supplies = 1,
@@ -50,6 +55,8 @@ static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2412", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2514", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
+ { .compatible = "usb424,2744", .data = &microchip_usb5744_data, },
+ { .compatible = "usb424,5744", .data = &microchip_usb5744_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
diff --git a/drivers/usb/misc/usb-ljca.c b/drivers/usb/misc/usb-ljca.c
index c9decd0396d4..35770e608c64 100644
--- a/drivers/usb/misc/usb-ljca.c
+++ b/drivers/usb/misc/usb-ljca.c
@@ -457,8 +457,8 @@ static void ljca_auxdev_acpi_bind(struct ljca_adapter *adap,
u64 adr, u8 id)
{
struct ljca_match_ids_walk_data wd = { 0 };
- struct acpi_device *parent, *adev;
struct device *dev = adap->dev;
+ struct acpi_device *parent;
char uid[4];
parent = ACPI_COMPANION(dev);
@@ -466,17 +466,7 @@ static void ljca_auxdev_acpi_bind(struct ljca_adapter *adap,
return;
/*
- * get auxdev ACPI handle from the ACPI device directly
- * under the parent that matches _ADR.
- */
- adev = acpi_find_child_device(parent, adr, false);
- if (adev) {
- ACPI_COMPANION_SET(&auxdev->dev, adev);
- return;
- }
-
- /*
- * _ADR is a grey area in the ACPI specification, some
+ * Currently LJCA hw doesn't use _ADR instead the shipped
* platforms use _HID to distinguish children devices.
*/
switch (adr) {
@@ -656,10 +646,11 @@ static int ljca_enumerate_spi(struct ljca_adapter *adap)
unsigned int i;
int ret;
+ /* Not all LJCA chips implement SPI, a timeout reading the descriptors is normal */
ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_ENUM_SPI, NULL, 0, buf,
sizeof(buf), true, LJCA_ENUM_CLIENT_TIMEOUT_MS);
if (ret < 0)
- return ret;
+ return (ret == -ETIMEDOUT) ? 0 : ret;
/* check firmware response */
desc = (struct ljca_spi_descriptor *)buf;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 45dcfaadaf98..4dffcfefd62d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -203,8 +203,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5829E_ESIM 0x81e4
#define DELL_PRODUCT_5829E 0x81e6
-#define DELL_PRODUCT_FM101R 0x8213
-#define DELL_PRODUCT_FM101R_ESIM 0x8215
+#define DELL_PRODUCT_FM101R_ESIM 0x8213
+#define DELL_PRODUCT_FM101R 0x8215
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -609,6 +609,8 @@ static void option_instat_callback(struct urb *urb);
#define UNISOC_VENDOR_ID 0x1782
/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
#define TOZED_PRODUCT_LT70C 0x4055
+/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
+#define LUAT_PRODUCT_AIR720U 0x4e00
/* Device flags */
@@ -1546,7 +1548,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
@@ -2249,6 +2252,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
@@ -2271,6 +2275,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 816b9bd08355..38416fb0cc3c 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -40,7 +40,7 @@ config TYPEC_MUX_NB7VPQ904M
tristate "On Semiconductor NB7VPQ904M Type-C redriver driver"
depends on I2C
depends on DRM || DRM=n
- select DRM_PANEL_BRIDGE if DRM
+ select DRM_AUX_BRIDGE if DRM_BRIDGE && OF
select REGMAP_I2C
help
Say Y or M if your system has a On Semiconductor NB7VPQ904M Type-C
diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c
index cda206cf0c38..b17826713753 100644
--- a/drivers/usb/typec/mux/nb7vpq904m.c
+++ b/drivers/usb/typec/mux/nb7vpq904m.c
@@ -11,7 +11,7 @@
#include <linux/regmap.h>
#include <linux/bitfield.h>
#include <linux/of_graph.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_retimer.h>
@@ -70,8 +70,6 @@ struct nb7vpq904m {
bool swap_data_lanes;
struct typec_switch *typec_switch;
- struct drm_bridge bridge;
-
struct mutex lock; /* protect non-concurrent retimer & switch */
enum typec_orientation orientation;
@@ -297,44 +295,6 @@ static int nb7vpq904m_retimer_set(struct typec_retimer *retimer, struct typec_re
return ret;
}
-#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
-static int nb7vpq904m_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct nb7vpq904m *nb7 = container_of(bridge, struct nb7vpq904m, bridge);
- struct drm_bridge *next_bridge;
-
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
- return -EINVAL;
-
- next_bridge = devm_drm_of_get_bridge(&nb7->client->dev, nb7->client->dev.of_node, 0, 0);
- if (IS_ERR(next_bridge)) {
- dev_err(&nb7->client->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
- return PTR_ERR(next_bridge);
- }
-
- return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-}
-
-static const struct drm_bridge_funcs nb7vpq904m_bridge_funcs = {
- .attach = nb7vpq904m_bridge_attach,
-};
-
-static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
-{
- nb7->bridge.funcs = &nb7vpq904m_bridge_funcs;
- nb7->bridge.of_node = nb7->client->dev.of_node;
-
- return devm_drm_bridge_add(&nb7->client->dev, &nb7->bridge);
-}
-#else
-static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
-{
- return 0;
-}
-#endif
-
static const struct regmap_config nb7_regmap = {
.max_register = 0x1f,
.reg_bits = 8,
@@ -461,7 +421,7 @@ static int nb7vpq904m_probe(struct i2c_client *client)
gpiod_set_value(nb7->enable_gpio, 1);
- ret = nb7vpq904m_register_bridge(nb7);
+ ret = drm_aux_bridge_register(dev);
if (ret)
goto err_disable_gpio;
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 0b2993fef564..8cdd84ca5d6f 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -80,6 +80,7 @@ config TYPEC_QCOM_PMIC
tristate "Qualcomm PMIC USB Type-C Port Controller Manager driver"
depends on ARCH_QCOM || COMPILE_TEST
depends on DRM || DRM=n
+ select DRM_AUX_HPD_BRIDGE if DRM_BRIDGE && OF
help
A Type-C port and Power Delivery driver which aggregates two
discrete pieces of silicon in the PM8150b PMIC block: the
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index 581199d37b49..1a2b4bddaa97 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -18,7 +18,7 @@
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include "qcom_pmic_typec_pdphy.h"
#include "qcom_pmic_typec_port.h"
@@ -36,7 +36,6 @@ struct pmic_typec {
struct pmic_typec_port *pmic_typec_port;
bool vbus_enabled;
struct mutex lock; /* VBUS state serialization */
- struct drm_bridge bridge;
};
#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
@@ -150,35 +149,6 @@ static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
return 0;
}
-#if IS_ENABLED(CONFIG_DRM)
-static int qcom_pmic_typec_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
-}
-
-static const struct drm_bridge_funcs qcom_pmic_typec_bridge_funcs = {
- .attach = qcom_pmic_typec_attach,
-};
-
-static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
-{
- tcpm->bridge.funcs = &qcom_pmic_typec_bridge_funcs;
-#ifdef CONFIG_OF
- tcpm->bridge.of_node = of_get_child_by_name(tcpm->dev->of_node, "connector");
-#endif
- tcpm->bridge.ops = DRM_BRIDGE_OP_HPD;
- tcpm->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
-
- return devm_drm_bridge_add(tcpm->dev, &tcpm->bridge);
-}
-#else
-static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
-{
- return 0;
-}
-#endif
-
static int qcom_pmic_typec_probe(struct platform_device *pdev)
{
struct pmic_typec *tcpm;
@@ -186,6 +156,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
const struct pmic_typec_resources *res;
struct regmap *regmap;
+ struct device *bridge_dev;
u32 base[2];
int ret;
@@ -241,14 +212,14 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
mutex_init(&tcpm->lock);
platform_set_drvdata(pdev, tcpm);
- ret = qcom_pmic_typec_init_drm(tcpm);
- if (ret)
- return ret;
-
tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
if (!tcpm->tcpc.fwnode)
return -EINVAL;
+ bridge_dev = drm_dp_hpd_bridge_register(tcpm->dev, to_of_node(tcpm->tcpc.fwnode));
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc);
if (IS_ERR(tcpm->tcpm_port)) {
ret = PTR_ERR(tcpm->tcpm_port);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 058d5b853b57..bfb6f9481e87 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -4273,7 +4273,8 @@ static void run_state_machine(struct tcpm_port *port)
current_lim = PD_P_SNK_STDBY_MW / 5;
tcpm_set_current_limit(port, current_lim, 5000);
/* Not sink vbus if operational current is 0mA */
- tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
+ tcpm_set_charge(port, !port->pd_supported ||
+ pdo_max_current(port->snk_pdo[0]));
if (!port->pd_supported)
tcpm_set_state(port, SNK_READY, 0);
@@ -5391,6 +5392,15 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
port->tcpc->set_bist_data(port->tcpc, false);
+ switch (port->state) {
+ case ERROR_RECOVERY:
+ case PORT_RESET:
+ case PORT_RESET_WAIT_OFF:
+ return;
+ default:
+ break;
+ }
+
if (port->ams != NONE_AMS)
port->ams = NONE_AMS;
if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 0e867f531d34..196535ad996d 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -968,16 +968,17 @@ static int tps25750_start_patch_burst_mode(struct tps6598x *tps)
ret = of_property_match_string(np, "reg-names", "patch-address");
if (ret < 0) {
dev_err(tps->dev, "failed to get patch-address %d\n", ret);
- return ret;
+ goto release_fw;
}
ret = of_property_read_u32_index(np, "reg", ret, &addr);
if (ret)
- return ret;
+ goto release_fw;
if (addr == 0 || (addr >= 0x20 && addr <= 0x23)) {
dev_err(tps->dev, "wrong patch address %u\n", addr);
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_fw;
}
bpms_data.addr = (u8)addr;
@@ -1226,7 +1227,10 @@ static int tps6598x_probe(struct i2c_client *client)
TPS_REG_INT_PLUG_EVENT;
}
- tps->data = device_get_match_data(tps->dev);
+ if (dev_fwnode(tps->dev))
+ tps->data = device_get_match_data(tps->dev);
+ else
+ tps->data = i2c_get_match_data(client);
if (!tps->data)
return -EINVAL;
@@ -1425,7 +1429,7 @@ static const struct of_device_id tps6598x_of_match[] = {
MODULE_DEVICE_TABLE(of, tps6598x_of_match);
static const struct i2c_device_id tps6598x_id[] = {
- { "tps6598x" },
+ { "tps6598x", (kernel_ulong_t)&tps6598x_data },
{ }
};
MODULE_DEVICE_TABLE(i2c, tps6598x_id);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index b3a3cb165795..b137f3679343 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -437,7 +437,7 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
if (blk->shared_backend) {
blk->buffer = shared_buffer;
} else {
- blk->buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ blk->buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
GFP_KERNEL);
if (!blk->buffer) {
ret = -ENOMEM;
@@ -495,7 +495,7 @@ static int __init vdpasim_blk_init(void)
goto parent_err;
if (shared_backend) {
- shared_buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ shared_buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
GFP_KERNEL);
if (!shared_buffer) {
ret = -ENOMEM;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 30df5c58db73..da7ec77cdaff 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -1582,7 +1582,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
err:
put_device(&v->dev);
- ida_simple_remove(&vhost_vdpa_ida, v->minor);
return r;
}
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 53693c826ebd..d5909a9206ff 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -146,10 +146,8 @@ config FB_ACORN
config FB_CLPS711X
tristate "CLPS711X LCD support"
depends on FB && (ARCH_CLPS711X || COMPILE_TEST)
+ select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select LCD_CLASS_DEVICE
select VIDEOMODE_HELPERS
help
@@ -180,10 +178,7 @@ config FB_IMX
config FB_CYBER2000
tristate "CyberPro 2000/2010/5000 support"
depends on FB && PCI && (BROKEN || !SPARC64)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB_IOMEM_FOPS
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This enables support for the Integraphics CyberPro 20x0 and 5000
@@ -272,10 +267,7 @@ config FB_FM2
config FB_ARC
tristate "Arc Monochrome LCD board support"
depends on FB && (X86 || COMPILE_TEST)
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS_DEFERRED
help
This enables support for the Arc Monochrome LCD board. The board
is based on the KS-108 lcd controller and is typically a matrix
@@ -1460,10 +1452,7 @@ config FB_AU1100
config FB_AU1200
bool "Au1200/Au1300 LCD Driver"
depends on (FB = y) && MIPS_ALCHEMY
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_DMAMEM_HELPERS
help
This is the framebuffer driver for the Au1200/Au1300 SOCs.
It can drive various panels and CRTs by passing in kernel cmd line
@@ -1475,6 +1464,7 @@ config FB_VT8500
select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
@@ -1487,6 +1477,7 @@ config FB_WM8505
select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
@@ -1637,12 +1628,9 @@ config FB_SH_MOBILE_LCDC
depends on FB && HAVE_CLK && HAS_IOMEM
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
depends on FB_DEVICE
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
select FB_BACKLIGHT
+ select FB_DEFERRED_IO
+ select FB_DMAMEM_HELPERS
help
Frame buffer driver for the on-chip SH-Mobile LCD controller.
@@ -1689,11 +1677,7 @@ config FB_SMSCUFX
tristate "SMSC UFX6000/7000 USB Framebuffer support"
depends on FB && USB
select FB_MODE_HELPERS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
+ select FB_SYSMEM_HELPERS_DEFERRED
help
This is a kernel framebuffer driver for SMSC UFX USB devices.
Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
@@ -1706,11 +1690,7 @@ config FB_UDL
depends on FB && USB
depends on FB_DEVICE
select FB_MODE_HELPERS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
+ select FB_SYSMEM_HELPERS_DEFERRED
help
This is a kernel framebuffer driver for DisplayLink USB devices.
Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
@@ -1732,10 +1712,7 @@ config FB_IBM_GXT4500
config FB_PS3
tristate "PS3 GPU framebuffer driver"
depends on FB && PS3_PS3AV
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS
help
Include support for the virtual frame buffer in the PS3 platform.
@@ -1800,10 +1777,7 @@ config FB_DA8XX
config FB_VIRTUAL
tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
depends on FB
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS
help
This is a `virtual' frame buffer device. It operates on a chunk of
unswappable kernel memory instead of on the memory of a graphics
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 163d2c9f951c..f0600f6ca254 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -605,7 +605,7 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
static const struct fb_ops acornfb_ops = {
.owner = THIS_MODULE,
- FB_IOMEM_DEFAULT_OPS,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = acornfb_check_var,
.fb_set_par = acornfb_set_par,
.fb_setcolreg = acornfb_setcolreg,
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 0399db369e70..47d373f04f3f 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -829,6 +829,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb)
static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
fb->fb.fix.smem_start, fb->fb.fix.smem_len);
}
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index cff11cb04a55..b2408543277c 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -363,39 +363,6 @@ static void arcfb_lcd_update(struct arcfb_par *par, unsigned int dx,
}
}
-static void arcfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct arcfb_par *par = info->par;
-
- sys_fillrect(info, rect);
-
- /* update the physical lcd */
- arcfb_lcd_update(par, rect->dx, rect->dy, rect->width, rect->height);
-}
-
-static void arcfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct arcfb_par *par = info->par;
-
- sys_copyarea(info, area);
-
- /* update the physical lcd */
- arcfb_lcd_update(par, area->dx, area->dy, area->width, area->height);
-}
-
-static void arcfb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- struct arcfb_par *par = info->par;
-
- sys_imageblit(info, image);
-
- /* update the physical lcd */
- arcfb_lcd_update(par, image->dx, image->dy, image->width,
- image->height);
-}
-
static int arcfb_ioctl(struct fb_info *info,
unsigned int cmd, unsigned long arg)
{
@@ -436,76 +403,48 @@ static int arcfb_ioctl(struct fb_info *info,
}
}
-/*
- * this is the access path from userspace. they can seek and write to
- * the fb. it's inefficient for them to do anything less than 64*8
- * writes since we update the lcd in each write() anyway.
- */
-static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos)
+static void arcfb_damage_range(struct fb_info *info, off_t off, size_t len)
{
- /* modded from epson 1355 */
-
- unsigned long p;
- int err;
- unsigned int fbmemlength,x,y,w,h, bitppos, startpos, endpos, bitcount;
- struct arcfb_par *par;
- unsigned int xres;
-
- if (!info->screen_buffer)
- return -ENODEV;
-
- p = *ppos;
- par = info->par;
- xres = info->var.xres;
- fbmemlength = (xres * info->var.yres)/8;
-
- if (p > fbmemlength)
- return -ENOSPC;
-
- err = 0;
- if ((count + p) > fbmemlength) {
- count = fbmemlength - p;
- err = -ENOSPC;
- }
-
- if (count) {
- char *base_addr;
-
- base_addr = info->screen_buffer;
- count -= copy_from_user(base_addr + p, buf, count);
- *ppos += count;
- err = -EFAULT;
- }
-
+ struct arcfb_par *par = info->par;
+ unsigned int xres = info->var.xres;
+ unsigned int bitppos, startpos, endpos, bitcount;
+ unsigned int x, y, width, height;
- bitppos = p*8;
+ bitppos = off * 8;
startpos = floorXres(bitppos, xres);
- endpos = ceilXres((bitppos + (count*8)), xres);
+ endpos = ceilXres((bitppos + (len * 8)), xres);
bitcount = endpos - startpos;
x = startpos % xres;
y = startpos / xres;
- w = xres;
- h = bitcount / xres;
- arcfb_lcd_update(par, x, y, w, h);
+ width = xres;
+ height = bitcount / xres;
+
+ arcfb_lcd_update(par, x, y, width, height);
+}
- if (count)
- return count;
- return err;
+static void arcfb_damage_area(struct fb_info *info, u32 x, u32 y,
+ u32 width, u32 height)
+{
+ struct arcfb_par *par = info->par;
+
+ /* update the physical lcd */
+ arcfb_lcd_update(par, x, y, width, height);
}
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(arcfb,
+ arcfb_damage_range,
+ arcfb_damage_area)
+
static const struct fb_ops arcfb_ops = {
.owner = THIS_MODULE,
.fb_open = arcfb_open,
- .fb_read = fb_sys_read,
- .fb_write = arcfb_write,
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(arcfb),
.fb_release = arcfb_release,
.fb_pan_display = arcfb_pan_display,
- .fb_fillrect = arcfb_fillrect,
- .fb_copyarea = arcfb_copyarea,
- .fb_imageblit = arcfb_imageblit,
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(arcfb),
.fb_ioctl = arcfb_ioctl,
+ // .fb_mmap reqires deferred I/O
};
static int arcfb_probe(struct platform_device *dev)
@@ -529,6 +468,7 @@ static int arcfb_probe(struct platform_device *dev)
if (!info)
goto err_fb_alloc;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = videomemory;
info->fbops = &arcfb_ops;
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index a9c8d33a6ef7..08109ce535cd 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -342,6 +342,8 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
return dma_mmap_coherent(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys,
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 98afd385c49c..6f20efc663d7 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1236,6 +1236,8 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct au1200fb_device *fbdev = info->par;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_coherent(fbdev->dev, vma,
fbdev->fb_mem, fbdev->fb_phys, fbdev->fb_len);
}
@@ -1488,15 +1490,12 @@ static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
static const struct fb_ops au1200fb_fb_ops = {
.owner = THIS_MODULE,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_check_var = au1200fb_fb_check_var,
.fb_set_par = au1200fb_fb_set_par,
.fb_setcolreg = au1200fb_fb_setcolreg,
.fb_blank = au1200fb_fb_blank,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_sync = NULL,
.fb_ioctl = au1200fb_ioctl,
.fb_mmap = au1200fb_fb_mmap,
@@ -1568,6 +1567,8 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
fbi->fix.mmio_len = 0;
fbi->fix.accel = FB_ACCEL_NONE;
+ fbi->flags |= FBINFO_VIRTFB;
+
fbi->screen_buffer = fbdev->fb_mem;
au1200fb_update_fbinfo(fbi);
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index e956c90efcdc..dcfd1fbbc7e1 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -155,13 +155,11 @@ static int clps711x_fb_blank(int blank, struct fb_info *info)
static const struct fb_ops clps711x_fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = clps711x_fb_setcolreg,
.fb_check_var = clps711x_fb_check_var,
.fb_set_par = clps711x_fb_set_par,
.fb_blank = clps711x_fb_blank,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
};
static int clps711x_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi)
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index 7a3ed13bed70..21053bf00dc5 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -4,7 +4,6 @@
#
config FB_CORE
- select FB_IOMEM_FOPS
select VIDEO_CMDLINE
tristate
@@ -129,7 +128,7 @@ config FB_LITTLE_ENDIAN
endchoice
-config FB_SYS_FOPS
+config FB_SYSMEM_FOPS
tristate
depends on FB_CORE
@@ -142,8 +141,8 @@ config FB_DMAMEM_HELPERS
depends on FB_CORE
select FB_SYS_COPYAREA
select FB_SYS_FILLRECT
- select FB_SYS_FOPS
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
config FB_IOMEM_FOPS
tristate
@@ -168,8 +167,8 @@ config FB_SYSMEM_HELPERS
depends on FB_CORE
select FB_SYS_COPYAREA
select FB_SYS_FILLRECT
- select FB_SYS_FOPS
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
config FB_SYSMEM_HELPERS_DEFERRED
bool
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index c1d657601b2b..d15974759086 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -32,6 +32,6 @@ obj-$(CONFIG_FB_IOMEM_FOPS) += fb_io_fops.o
obj-$(CONFIG_FB_SYS_FILLRECT) += sysfillrect.o
obj-$(CONFIG_FB_SYS_COPYAREA) += syscopyarea.o
obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o
-obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o
+obj-$(CONFIG_FB_SYSMEM_FOPS) += fb_sys_fops.o
obj-$(CONFIG_FB_SVGALIB) += svgalib.o
obj-$(CONFIG_FB_DDC) += fb_ddc.o
diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
index 5b80bf3dae50..a271f57d9c6c 100644
--- a/drivers/video/fbdev/core/cfbcopyarea.c
+++ b/drivers/video/fbdev/core/cfbcopyarea.c
@@ -391,6 +391,9 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (p->flags & FBINFO_VIRTFB)
+ fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
/* if the beginning of the target area might overlap with the end of
the source area, be have to copy the area reverse. */
if ((dy == sy && dx > sx) || (dy > sy)) {
diff --git a/drivers/video/fbdev/core/cfbfillrect.c b/drivers/video/fbdev/core/cfbfillrect.c
index ba9f58b2a5e8..cbaa4c9e2355 100644
--- a/drivers/video/fbdev/core/cfbfillrect.c
+++ b/drivers/video/fbdev/core/cfbfillrect.c
@@ -287,6 +287,9 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (p->flags & FBINFO_VIRTFB)
+ fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
fg = ((u32 *) (p->pseudo_palette))[rect->color];
diff --git a/drivers/video/fbdev/core/cfbimgblt.c b/drivers/video/fbdev/core/cfbimgblt.c
index 9ebda4e0dc7a..7d1d2f1a627d 100644
--- a/drivers/video/fbdev/core/cfbimgblt.c
+++ b/drivers/video/fbdev/core/cfbimgblt.c
@@ -326,6 +326,9 @@ void cfb_imageblit(struct fb_info *p, const struct fb_image *image)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (p->flags & FBINFO_VIRTFB)
+ fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
start_index = bitstart & (32 - 1);
pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
diff --git a/drivers/video/fbdev/core/fb_chrdev.c b/drivers/video/fbdev/core/fb_chrdev.c
index 32a7315b4b6d..4ebd16b7e3b8 100644
--- a/drivers/video/fbdev/core/fb_chrdev.c
+++ b/drivers/video/fbdev/core/fb_chrdev.c
@@ -34,13 +34,13 @@ static ssize_t fb_read(struct file *file, char __user *buf, size_t count, loff_t
if (!info)
return -ENODEV;
+ if (fb_WARN_ON_ONCE(info, !info->fbops->fb_read))
+ return -EINVAL;
+
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
- if (info->fbops->fb_read)
- return info->fbops->fb_read(info, buf, count, ppos);
-
- return fb_io_read(info, buf, count, ppos);
+ return info->fbops->fb_read(info, buf, count, ppos);
}
static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
@@ -50,13 +50,13 @@ static ssize_t fb_write(struct file *file, const char __user *buf, size_t count,
if (!info)
return -ENODEV;
+ if (fb_WARN_ON_ONCE(info, !info->fbops->fb_write))
+ return -EINVAL;
+
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
- if (info->fbops->fb_write)
- return info->fbops->fb_write(info, buf, count, ppos);
-
- return fb_io_write(info, buf, count, ppos);
+ return info->fbops->fb_write(info, buf, count, ppos);
}
static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
@@ -314,61 +314,19 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
static int fb_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fb_info *info = file_fb_info(file);
- unsigned long mmio_pgoff;
- unsigned long start;
- u32 len;
+ int res;
if (!info)
return -ENODEV;
- mutex_lock(&info->mm_lock);
- if (info->fbops->fb_mmap) {
- int res;
-
- /*
- * The framebuffer needs to be accessed decrypted, be sure
- * SME protection is removed ahead of the call
- */
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
- res = info->fbops->fb_mmap(info, vma);
- mutex_unlock(&info->mm_lock);
- return res;
-#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
- } else if (info->fbdefio) {
- /*
- * FB deferred I/O wants you to handle mmap in your drivers. At a
- * minimum, point struct fb_ops.fb_mmap to fb_deferred_io_mmap().
- */
- dev_warn_once(info->dev, "fbdev mmap not set up for deferred I/O.\n");
- mutex_unlock(&info->mm_lock);
+ if (fb_WARN_ON_ONCE(info, !info->fbops->fb_mmap))
return -ENODEV;
-#endif
- }
-
- /*
- * Ugh. This can be either the frame buffer mapping, or
- * if pgoff points past it, the mmio mapping.
- */
- start = info->fix.smem_start;
- len = info->fix.smem_len;
- mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
- if (vma->vm_pgoff >= mmio_pgoff) {
- if (info->var.accel_flags) {
- mutex_unlock(&info->mm_lock);
- return -EINVAL;
- }
- vma->vm_pgoff -= mmio_pgoff;
- start = info->fix.mmio_start;
- len = info->fix.mmio_len;
- }
+ mutex_lock(&info->mm_lock);
+ res = info->fbops->fb_mmap(info, vma);
mutex_unlock(&info->mm_lock);
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start,
- vma->vm_end, start);
-
- return vm_iomap_memory(vma, start, len);
+ return res;
}
static int fb_open(struct inode *inode, struct file *file)
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 274f5d0fa247..1b0b85e59e5e 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -227,6 +227,8 @@ static const struct address_space_operations fb_deferred_io_aops = {
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
vma->vm_ops = &fb_deferred_io_vm_ops;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
if (!(info->flags & FBINFO_VIRTFB))
diff --git a/drivers/video/fbdev/core/fb_io_fops.c b/drivers/video/fbdev/core/fb_io_fops.c
index 871b829521af..3408ff1b2b7a 100644
--- a/drivers/video/fbdev/core/fb_io_fops.c
+++ b/drivers/video/fbdev/core/fb_io_fops.c
@@ -12,6 +12,9 @@ ssize_t fb_io_read(struct fb_info *info, char __user *buf, size_t count, loff_t
int c, cnt = 0, err = 0;
unsigned long total_size, trailing;
+ if (info->flags & FBINFO_VIRTFB)
+ fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
if (!info->screen_base)
return -ENODEV;
@@ -73,6 +76,9 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count,
int c, cnt = 0, err = 0;
unsigned long total_size, trailing;
+ if (info->flags & FBINFO_VIRTFB)
+ fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
if (!info->screen_base)
return -ENODEV;
@@ -132,5 +138,35 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count,
}
EXPORT_SYMBOL(fb_io_write);
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ unsigned long start = info->fix.smem_start;
+ u32 len = info->fix.smem_len;
+ unsigned long mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
+
+ if (info->flags & FBINFO_VIRTFB)
+ fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
+ /*
+ * This can be either the framebuffer mapping, or if pgoff points
+ * past it, the mmio mapping.
+ */
+ if (vma->vm_pgoff >= mmio_pgoff) {
+ if (info->var.accel_flags)
+ return -EINVAL;
+
+ vma->vm_pgoff -= mmio_pgoff;
+ start = info->fix.mmio_start;
+ len = info->fix.mmio_len;
+ }
+
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start,
+ vma->vm_end, start);
+
+ return vm_iomap_memory(vma, start, len);
+}
+EXPORT_SYMBOL(fb_io_mmap);
+
MODULE_DESCRIPTION("Fbdev helpers for framebuffers in I/O memory");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/fb_sys_fops.c b/drivers/video/fbdev/core/fb_sys_fops.c
index 0cb0989abda6..a9aa6519a5b3 100644
--- a/drivers/video/fbdev/core/fb_sys_fops.c
+++ b/drivers/video/fbdev/core/fb_sys_fops.c
@@ -22,6 +22,9 @@ ssize_t fb_sys_read(struct fb_info *info, char __user *buf, size_t count,
unsigned long total_size, c;
ssize_t ret;
+ if (!(info->flags & FBINFO_VIRTFB))
+ fb_warn_once(info, "Framebuffer is not in virtual address space.");
+
if (!info->screen_buffer)
return -ENODEV;
@@ -64,6 +67,9 @@ ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
unsigned long total_size, c;
size_t ret;
+ if (!(info->flags & FBINFO_VIRTFB))
+ fb_warn_once(info, "Framebuffer is not in virtual address space.");
+
if (!info->screen_buffer)
return -ENODEV;
diff --git a/drivers/video/fbdev/core/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c
index 7b8bd3a2bedc..75e7001e8450 100644
--- a/drivers/video/fbdev/core/syscopyarea.c
+++ b/drivers/video/fbdev/core/syscopyarea.c
@@ -324,6 +324,9 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (!(p->flags & FBINFO_VIRTFB))
+ fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
/* if the beginning of the target area might overlap with the end of
the source area, be have to copy the area reverse. */
if ((dy == sy && dx > sx) || (dy > sy)) {
diff --git a/drivers/video/fbdev/core/sysfillrect.c b/drivers/video/fbdev/core/sysfillrect.c
index bcdcaeae6538..e49221a88ccc 100644
--- a/drivers/video/fbdev/core/sysfillrect.c
+++ b/drivers/video/fbdev/core/sysfillrect.c
@@ -242,6 +242,9 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (!(p->flags & FBINFO_VIRTFB))
+ fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
fg = ((u32 *) (p->pseudo_palette))[rect->color];
diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
index 665ef7a0a249..6949bbd51d92 100644
--- a/drivers/video/fbdev/core/sysimgblt.c
+++ b/drivers/video/fbdev/core/sysimgblt.c
@@ -296,6 +296,9 @@ void sys_imageblit(struct fb_info *p, const struct fb_image *image)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (!(p->flags & FBINFO_VIRTFB))
+ fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
start_index = bitstart & (32 - 1);
pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 52105dc1a72f..abb87d3576db 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -227,13 +227,6 @@ cyber2000fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
CO_REG_CMD_H, cfb);
}
-static void
-cyber2000fb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- cfb_imageblit(info, image);
- return;
-}
-
static int cyber2000fb_sync(struct fb_info *info)
{
struct cfb_info *cfb = container_of(info, struct cfb_info, fb);
@@ -1069,7 +1062,7 @@ static const struct fb_ops cyber2000fb_ops = {
.fb_pan_display = cyber2000fb_pan_display,
.fb_fillrect = cyber2000fb_fillrect,
.fb_copyarea = cyber2000fb_copyarea,
- .fb_imageblit = cyber2000fb_imageblit,
+ .fb_imageblit = cfb_imageblit,
.fb_sync = cyber2000fb_sync,
__FB_DEFAULT_IOMEM_OPS_MMAP,
};
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index cae00deee001..3e378874ccc7 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -311,6 +311,8 @@ static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned int offset = vma->vm_pgoff << PAGE_SHIFT;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (offset < info->fix.smem_len) {
return dma_mmap_wc(info->device, vma, info->screen_base,
info->fix.smem_start, info->fix.smem_len);
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index e89e5579258e..8463de833d1e 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1000,6 +1000,8 @@ static int gbefb_mmap(struct fb_info *info,
unsigned long phys_addr, phys_size;
u16 *tile;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
/* check range */
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 694cf6318782..aa31c0d26e92 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1203,6 +1203,8 @@ static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct omapfb_device *fbdev = plane->fbdev;
int r;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
omapfb_rqueue_lock(fbdev);
r = fbdev->ctrl->mmap(info, vma);
omapfb_rqueue_unlock(fbdev);
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index c9fd0ad352d7..0db9c55fce5a 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1095,6 +1095,8 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
u32 len;
int r;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
rg = omapfb_get_mem_region(ofbi->region);
start = omapfb_get_region_paddr(ofbi);
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 64d291d6b153..dbcda307f6a6 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -708,6 +708,8 @@ static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
int r;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n",
@@ -939,15 +941,12 @@ static const struct fb_ops ps3fb_ops = {
.owner = THIS_MODULE,
.fb_open = ps3fb_open,
.fb_release = ps3fb_release,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
.fb_check_var = ps3fb_check_var,
.fb_set_par = ps3fb_set_par,
.fb_setcolreg = ps3fb_setcolreg,
.fb_pan_display = ps3fb_pan_display,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = ps3fb_mmap,
.fb_blank = ps3fb_blank,
.fb_ioctl = ps3fb_ioctl,
@@ -1145,7 +1144,7 @@ static int ps3fb_probe(struct ps3_system_bus_device *dev)
info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START;
info->pseudo_palette = par->pseudo_palette;
- info->flags = FBINFO_READS_FAST |
+ info->flags = FBINFO_VIRTFB | FBINFO_READS_FAST |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
retval = fb_alloc_cmap(&info->cmap, 256, 0);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index befd3fe2f659..0d362d2bf0e3 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -562,6 +562,8 @@ static int sa1100fb_mmap(struct fb_info *info,
container_of(info, struct sa1100fb_info, fb);
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (off < info->fix.smem_len) {
vma->vm_pgoff += 1; /* skip over the palette */
return dma_mmap_wc(fbi->dev, vma, fbi->map_cpu, fbi->map_dma,
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index 21e9fd8e69e2..634e3d159452 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -48,7 +48,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
unsigned long map_offset = 0;
unsigned long off;
int i;
-
+
if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE)))
return -EINVAL;
@@ -60,6 +60,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* Each page, see which map applies */
@@ -72,7 +73,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
#define POFF_MASK (PAGE_MASK|0x1UL)
#else
#define POFF_MASK (PAGE_MASK)
-#endif
+#endif
map_offset = (physbase + map[i].poff) & POFF_MASK;
break;
}
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 1364dafaadb1..eb2297b37504 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1482,19 +1482,18 @@ sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_coherent(ovl->channel->lcdc->dev, vma, ovl->fb_mem,
ovl->dma_handle, ovl->fb_size);
}
static const struct fb_ops sh_mobile_lcdc_overlay_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_blank = sh_mobile_lcdc_overlay_blank,
.fb_pan_display = sh_mobile_lcdc_overlay_pan,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_ioctl = sh_mobile_lcdc_overlay_ioctl,
.fb_check_var = sh_mobile_lcdc_overlay_check_var,
.fb_set_par = sh_mobile_lcdc_overlay_set_par,
@@ -1567,6 +1566,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
info->fbops = &sh_mobile_lcdc_overlay_ops;
info->device = priv->dev;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = ovl->fb_mem;
info->par = ovl;
@@ -1958,6 +1958,8 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_coherent(ch->lcdc->dev, vma, ch->fb_mem,
ch->dma_handle, ch->fb_size);
}
@@ -1965,8 +1967,7 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
static const struct fb_ops sh_mobile_lcdc_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = sh_mobile_lcdc_setcolreg,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_fillrect = sh_mobile_lcdc_fillrect,
.fb_copyarea = sh_mobile_lcdc_copyarea,
.fb_imageblit = sh_mobile_lcdc_imageblit,
@@ -2053,6 +2054,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
info->fbops = &sh_mobile_lcdc_ops;
info->device = priv->dev;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = ch->fb_mem;
info->pseudo_palette = &ch->pseudo_palette;
info->par = ch;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 62f99f6fccd3..6f58ee276ad1 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -21,9 +21,11 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_clk.h>
#include <linux/of_platform.h>
#include <linux/parser.h>
+#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
static const struct fb_fix_screeninfo simplefb_fix = {
@@ -77,6 +79,11 @@ struct simplefb_par {
unsigned int clk_count;
struct clk **clks;
#endif
+#if defined CONFIG_OF && defined CONFIG_PM_GENERIC_DOMAINS
+ unsigned int num_genpds;
+ struct device **genpds;
+ struct device_link **genpd_links;
+#endif
#if defined CONFIG_OF && defined CONFIG_REGULATOR
bool regulators_enabled;
u32 regulator_count;
@@ -121,12 +128,13 @@ struct simplefb_params {
u32 height;
u32 stride;
struct simplefb_format *format;
+ struct resource memory;
};
static int simplefb_parse_dt(struct platform_device *pdev,
struct simplefb_params *params)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node, *mem;
int ret;
const char *format;
int i;
@@ -166,6 +174,23 @@ static int simplefb_parse_dt(struct platform_device *pdev,
return -EINVAL;
}
+ mem = of_parse_phandle(np, "memory-region", 0);
+ if (mem) {
+ ret = of_address_to_resource(mem, 0, &params->memory);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse memory-region\n");
+ of_node_put(mem);
+ return ret;
+ }
+
+ if (of_property_present(np, "reg"))
+ dev_warn(&pdev->dev, "preferring \"memory-region\" over \"reg\" property\n");
+
+ of_node_put(mem);
+ } else {
+ memset(&params->memory, 0, sizeof(params->memory));
+ }
+
return 0;
}
@@ -193,6 +218,8 @@ static int simplefb_parse_pd(struct platform_device *pdev,
return -EINVAL;
}
+ memset(&params->memory, 0, sizeof(params->memory));
+
return 0;
}
@@ -411,6 +438,93 @@ static void simplefb_regulators_enable(struct simplefb_par *par,
static void simplefb_regulators_destroy(struct simplefb_par *par) { }
#endif
+#if defined CONFIG_OF && defined CONFIG_PM_GENERIC_DOMAINS
+static void simplefb_detach_genpds(void *res)
+{
+ struct simplefb_par *par = res;
+ unsigned int i = par->num_genpds;
+
+ if (par->num_genpds <= 1)
+ return;
+
+ while (i--) {
+ if (par->genpd_links[i])
+ device_link_del(par->genpd_links[i]);
+
+ if (!IS_ERR_OR_NULL(par->genpds[i]))
+ dev_pm_domain_detach(par->genpds[i], true);
+ }
+}
+
+static int simplefb_attach_genpds(struct simplefb_par *par,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int i;
+ int err;
+
+ err = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (err < 0) {
+ /* Nothing wrong if optional PDs are missing */
+ if (err == -ENOENT)
+ return 0;
+
+ dev_info(dev, "failed to parse power-domains: %d\n", err);
+ return err;
+ }
+
+ par->num_genpds = err;
+
+ /*
+ * Single power-domain devices are handled by the driver core, so
+ * nothing to do here.
+ */
+ if (par->num_genpds <= 1)
+ return 0;
+
+ par->genpds = devm_kcalloc(dev, par->num_genpds, sizeof(*par->genpds),
+ GFP_KERNEL);
+ if (!par->genpds)
+ return -ENOMEM;
+
+ par->genpd_links = devm_kcalloc(dev, par->num_genpds,
+ sizeof(*par->genpd_links),
+ GFP_KERNEL);
+ if (!par->genpd_links)
+ return -ENOMEM;
+
+ for (i = 0; i < par->num_genpds; i++) {
+ par->genpds[i] = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR(par->genpds[i])) {
+ err = PTR_ERR(par->genpds[i]);
+ if (err == -EPROBE_DEFER) {
+ simplefb_detach_genpds(par);
+ return err;
+ }
+
+ dev_warn(dev, "failed to attach domain %u: %d\n", i, err);
+ continue;
+ }
+
+ par->genpd_links[i] = device_link_add(dev, par->genpds[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!par->genpd_links[i])
+ dev_warn(dev, "failed to link power-domain %u\n", i);
+ }
+
+ return devm_add_action_or_reset(dev, simplefb_detach_genpds, par);
+}
+#else
+static int simplefb_attach_genpds(struct simplefb_par *par,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
static int simplefb_probe(struct platform_device *pdev)
{
int ret;
@@ -431,10 +545,14 @@ static int simplefb_probe(struct platform_device *pdev)
if (ret)
return ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "No memory resource\n");
- return -EINVAL;
+ if (params.memory.start == 0 && params.memory.end == 0) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ return -EINVAL;
+ }
+ } else {
+ res = &params.memory;
}
mem = request_mem_region(res->start, resource_size(res), "simplefb");
@@ -493,6 +611,10 @@ static int simplefb_probe(struct platform_device *pdev)
if (ret < 0)
goto error_clocks;
+ ret = simplefb_attach_genpds(par, pdev);
+ if (ret < 0)
+ goto error_regulators;
+
simplefb_clocks_enable(par, pdev);
simplefb_regulators_enable(par, pdev);
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 3f8ef50e3209..104f122e0f27 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1347,16 +1347,14 @@ static int smtc_set_par(struct fb_info *info)
static const struct fb_ops smtcfb_ops = {
.owner = THIS_MODULE,
- FB_DEFAULT_IOMEM_OPS,
.fb_check_var = smtc_check_var,
.fb_set_par = smtc_set_par,
.fb_setcolreg = smtc_setcolreg,
.fb_blank = smtc_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_imageblit = cfb_imageblit,
- .fb_copyarea = cfb_copyarea,
+ __FB_DEFAULT_IOMEM_OPS_DRAW,
.fb_read = smtcfb_read,
.fb_write = smtcfb_write,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
/*
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index 90a77d19b236..35d682b110c4 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -783,6 +783,8 @@ static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
if (size > info->fix.smem_len)
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 2460ff4ac86b..1514ddac4caf 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -331,6 +331,8 @@ static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
if (size > info->fix.smem_len)
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 840ead69654b..a087b42ca652 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -998,6 +998,8 @@ static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
int ret;
unsigned long prot;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
ret = vmlfb_vram_offset(vinfo, offset);
if (ret)
return -EINVAL;
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 1b7c338f9956..f86149ba3835 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -80,15 +80,12 @@ static int vfb_mmap(struct fb_info *info,
static const struct fb_ops vfb_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
.fb_check_var = vfb_check_var,
.fb_set_par = vfb_set_par,
.fb_setcolreg = vfb_setcolreg,
.fb_pan_display = vfb_pan_display,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = vfb_mmap,
};
@@ -385,6 +382,8 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
static int vfb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return remap_vmalloc_range(vma, (void *)info->fix.smem_start, vma->vm_pgoff);
}
@@ -440,6 +439,7 @@ static int vfb_probe(struct platform_device *dev)
if (!info)
goto err;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = videomemory;
info->fbops = &vfb_ops;
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index 42d39a9d5130..42c25dc85197 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -241,6 +241,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
static const struct fb_ops vt8500lcd_ops = {
.owner = THIS_MODULE,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_set_par = vt8500lcd_set_par,
.fb_setcolreg = vt8500lcd_setcolreg,
.fb_fillrect = wmt_ge_fillrect,
@@ -250,6 +251,7 @@ static const struct fb_ops vt8500lcd_ops = {
.fb_ioctl = vt8500lcd_ioctl,
.fb_pan_display = vt8500lcd_pan_display,
.fb_blank = vt8500lcd_blank,
+ // .fb_mmap needs DMA mmap
};
static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id)
@@ -357,7 +359,7 @@ static int vt8500lcd_probe(struct platform_device *pdev)
fbi->fb.fix.smem_start = fb_mem_phys;
fbi->fb.fix.smem_len = fb_mem_len;
- fbi->fb.screen_base = fb_mem_virt;
+ fbi->fb.screen_buffer = fb_mem_virt;
fbi->palette_size = PAGE_ALIGN(512);
fbi->palette_cpu = dma_alloc_coherent(&pdev->dev,
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index 5833147aa43d..00952e9c8802 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -248,6 +248,7 @@ static int wm8505fb_blank(int blank, struct fb_info *info)
static const struct fb_ops wm8505fb_ops = {
.owner = THIS_MODULE,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_set_par = wm8505fb_set_par,
.fb_setcolreg = wm8505fb_setcolreg,
.fb_fillrect = wmt_ge_fillrect,
@@ -256,6 +257,7 @@ static const struct fb_ops wm8505fb_ops = {
.fb_sync = wmt_ge_sync,
.fb_pan_display = wm8505fb_pan_display,
.fb_blank = wm8505fb_blank,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
static int wm8505fb_probe(struct platform_device *pdev)
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index c2524a7207cf..7a5593997e0e 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -242,7 +242,7 @@ void vp_del_vqs(struct virtio_device *vdev)
if (v != VIRTIO_MSI_NO_VECTOR) {
int irq = pci_irq_vector(vp_dev->pci_dev, v);
- irq_set_affinity_hint(irq, NULL);
+ irq_update_affinity_hint(irq, NULL);
free_irq(irq, vq);
}
}
@@ -443,10 +443,10 @@ int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
mask = vp_dev->msix_affinity_masks[info->msix_vector];
irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
if (!cpu_mask)
- irq_set_affinity_hint(irq, NULL);
+ irq_update_affinity_hint(irq, NULL);
else {
cpumask_copy(mask, cpu_mask);
- irq_set_affinity_hint(irq, mask);
+ irq_set_affinity_and_hint(irq, mask);
}
}
return 0;
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index e2a1fe7bb66c..7de8b1ebabac 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -294,9 +294,10 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
err = -EINVAL;
mdev->common = vp_modern_map_capability(mdev, common,
- sizeof(struct virtio_pci_common_cfg), 4,
- 0, sizeof(struct virtio_pci_modern_common_cfg),
- &mdev->common_len, NULL);
+ sizeof(struct virtio_pci_common_cfg), 4, 0,
+ offsetofend(struct virtio_pci_modern_common_cfg,
+ queue_reset),
+ &mdev->common_len, NULL);
if (!mdev->common)
goto err_map_common;
mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index b8f2f971c2f0..e3585330cf98 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -171,11 +171,11 @@ static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
int i;
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+ evtchn_port_t evtchn;
/* Timer interrupt has highest priority. */
- irq = irq_from_virq(cpu, VIRQ_TIMER);
+ irq = irq_evtchn_from_virq(cpu, VIRQ_TIMER, &evtchn);
if (irq != -1) {
- evtchn_port_t evtchn = evtchn_from_irq(irq);
word_idx = evtchn / BITS_PER_LONG;
bit_idx = evtchn % BITS_PER_LONG;
if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
@@ -328,9 +328,9 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
if (sync_test_bit(i, BM(sh->evtchn_pending))) {
int word_idx = i / BITS_PER_EVTCHN_WORD;
- printk(" %d: event %d -> irq %d%s%s%s\n",
+ printk(" %d: event %d -> irq %u%s%s%s\n",
cpu_from_evtchn(i), i,
- get_evtchn_to_irq(i),
+ irq_from_evtchn(i),
sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
? "" : " l2-clear",
!sync_test_bit(i, BM(sh->evtchn_mask))
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 6de6b084ea60..f5edb9e27e3c 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -164,6 +164,8 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
+/* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
+static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
/* Event channel distribution data */
static atomic_t channels_on_cpu[NR_CPUS];
@@ -172,7 +174,7 @@ static int **evtchn_to_irq;
#ifdef CONFIG_X86
static unsigned long *pirq_eoi_map;
#endif
-static bool (*pirq_needs_eoi)(unsigned irq);
+static bool (*pirq_needs_eoi)(struct irq_info *info);
#define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
#define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
@@ -188,7 +190,6 @@ static struct irq_chip xen_lateeoi_chip;
static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip;
static void enable_dynirq(struct irq_data *data);
-static void disable_dynirq(struct irq_data *data);
static DEFINE_PER_CPU(unsigned int, irq_epoch);
@@ -246,15 +247,6 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
return 0;
}
-int get_evtchn_to_irq(evtchn_port_t evtchn)
-{
- if (evtchn >= xen_evtchn_max_channels())
- return -1;
- if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
- return -1;
- return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
-}
-
/* Get info for IRQ */
static struct irq_info *info_for_irq(unsigned irq)
{
@@ -272,6 +264,19 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
irq_set_chip_data(irq, info);
}
+static struct irq_info *evtchn_to_info(evtchn_port_t evtchn)
+{
+ int irq;
+
+ if (evtchn >= xen_evtchn_max_channels())
+ return NULL;
+ if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
+ return NULL;
+ irq = READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
+
+ return (irq < 0) ? NULL : info_for_irq(irq);
+}
+
/* Per CPU channel accounting */
static void channels_on_cpu_dec(struct irq_info *info)
{
@@ -298,6 +303,13 @@ static void channels_on_cpu_inc(struct irq_info *info)
info->is_accounted = 1;
}
+static void xen_irq_free_desc(unsigned int irq)
+{
+ /* Legacy IRQ descriptors are managed by the arch. */
+ if (irq >= nr_legacy_irqs())
+ irq_free_desc(irq);
+}
+
static void delayed_free_irq(struct work_struct *work)
{
struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
@@ -309,14 +321,11 @@ static void delayed_free_irq(struct work_struct *work)
kfree(info);
- /* Legacy IRQ descriptors are managed by the arch. */
- if (irq >= nr_legacy_irqs())
- irq_free_desc(irq);
+ xen_irq_free_desc(irq);
}
/* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info,
- unsigned irq,
enum xen_irq_type type,
evtchn_port_t evtchn,
unsigned short cpu)
@@ -326,29 +335,27 @@ static int xen_irq_info_common_setup(struct irq_info *info,
BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
info->type = type;
- info->irq = irq;
info->evtchn = evtchn;
info->cpu = cpu;
info->mask_reason = EVT_MASK_REASON_EXPLICIT;
raw_spin_lock_init(&info->lock);
- ret = set_evtchn_to_irq(evtchn, irq);
+ ret = set_evtchn_to_irq(evtchn, info->irq);
if (ret < 0)
return ret;
- irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
+ irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN);
return xen_evtchn_port_setup(evtchn);
}
-static int xen_irq_info_evtchn_setup(unsigned irq,
+static int xen_irq_info_evtchn_setup(struct irq_info *info,
evtchn_port_t evtchn,
struct xenbus_device *dev)
{
- struct irq_info *info = info_for_irq(irq);
int ret;
- ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
+ ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0);
info->u.interdomain = dev;
if (dev)
atomic_inc(&dev->event_channels);
@@ -356,49 +363,37 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
return ret;
}
-static int xen_irq_info_ipi_setup(unsigned cpu,
- unsigned irq,
- evtchn_port_t evtchn,
- enum ipi_vector ipi)
+static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
+ evtchn_port_t evtchn, enum ipi_vector ipi)
{
- struct irq_info *info = info_for_irq(irq);
-
info->u.ipi = ipi;
- per_cpu(ipi_to_irq, cpu)[ipi] = irq;
+ per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
+ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
- return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
+ return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0);
}
-static int xen_irq_info_virq_setup(unsigned cpu,
- unsigned irq,
- evtchn_port_t evtchn,
- unsigned virq)
+static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
+ evtchn_port_t evtchn, unsigned int virq)
{
- struct irq_info *info = info_for_irq(irq);
-
info->u.virq = virq;
- per_cpu(virq_to_irq, cpu)[virq] = irq;
+ per_cpu(virq_to_irq, cpu)[virq] = info->irq;
- return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
+ return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0);
}
-static int xen_irq_info_pirq_setup(unsigned irq,
- evtchn_port_t evtchn,
- unsigned pirq,
- unsigned gsi,
- uint16_t domid,
- unsigned char flags)
+static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
+ unsigned int pirq, unsigned int gsi,
+ uint16_t domid, unsigned char flags)
{
- struct irq_info *info = info_for_irq(irq);
-
info->u.pirq.pirq = pirq;
info->u.pirq.gsi = gsi;
info->u.pirq.domid = domid;
info->u.pirq.flags = flags;
- return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
+ return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0);
}
static void xen_irq_info_cleanup(struct irq_info *info)
@@ -412,7 +407,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
/*
* Accessors for packed IRQ information.
*/
-evtchn_port_t evtchn_from_irq(unsigned irq)
+static evtchn_port_t evtchn_from_irq(unsigned int irq)
{
const struct irq_info *info = NULL;
@@ -426,64 +421,51 @@ evtchn_port_t evtchn_from_irq(unsigned irq)
unsigned int irq_from_evtchn(evtchn_port_t evtchn)
{
- return get_evtchn_to_irq(evtchn);
+ struct irq_info *info = evtchn_to_info(evtchn);
+
+ return info ? info->irq : -1;
}
EXPORT_SYMBOL_GPL(irq_from_evtchn);
-int irq_from_virq(unsigned int cpu, unsigned int virq)
+int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
+ evtchn_port_t *evtchn)
{
- return per_cpu(virq_to_irq, cpu)[virq];
+ int irq = per_cpu(virq_to_irq, cpu)[virq];
+
+ *evtchn = evtchn_from_irq(irq);
+
+ return irq;
}
-static enum ipi_vector ipi_from_irq(unsigned irq)
+static enum ipi_vector ipi_from_irq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(irq);
-
BUG_ON(info == NULL);
BUG_ON(info->type != IRQT_IPI);
return info->u.ipi;
}
-static unsigned virq_from_irq(unsigned irq)
+static unsigned int virq_from_irq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(irq);
-
BUG_ON(info == NULL);
BUG_ON(info->type != IRQT_VIRQ);
return info->u.virq;
}
-static unsigned pirq_from_irq(unsigned irq)
+static unsigned int pirq_from_irq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(irq);
-
BUG_ON(info == NULL);
BUG_ON(info->type != IRQT_PIRQ);
return info->u.pirq.pirq;
}
-static enum xen_irq_type type_from_irq(unsigned irq)
-{
- return info_for_irq(irq)->type;
-}
-
-static unsigned cpu_from_irq(unsigned irq)
-{
- return info_for_irq(irq)->cpu;
-}
-
unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
{
- int irq = get_evtchn_to_irq(evtchn);
- unsigned ret = 0;
-
- if (irq != -1)
- ret = cpu_from_irq(irq);
+ struct irq_info *info = evtchn_to_info(evtchn);
- return ret;
+ return info ? info->cpu : 0;
}
static void do_mask(struct irq_info *info, u8 reason)
@@ -515,36 +497,30 @@ static void do_unmask(struct irq_info *info, u8 reason)
}
#ifdef CONFIG_X86
-static bool pirq_check_eoi_map(unsigned irq)
+static bool pirq_check_eoi_map(struct irq_info *info)
{
- return test_bit(pirq_from_irq(irq), pirq_eoi_map);
+ return test_bit(pirq_from_irq(info), pirq_eoi_map);
}
#endif
-static bool pirq_needs_eoi_flag(unsigned irq)
+static bool pirq_needs_eoi_flag(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(irq);
BUG_ON(info->type != IRQT_PIRQ);
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
}
-static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
+static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
bool force_affinity)
{
- int irq = get_evtchn_to_irq(evtchn);
- struct irq_info *info = info_for_irq(irq);
-
- BUG_ON(irq == -1);
-
if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
- struct irq_data *data = irq_get_irq_data(irq);
+ struct irq_data *data = irq_get_irq_data(info->irq);
irq_data_update_affinity(data, cpumask_of(cpu));
irq_data_update_effective_affinity(data, cpumask_of(cpu));
}
- xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
+ xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
channels_on_cpu_dec(info);
info->cpu = cpu;
@@ -601,7 +577,9 @@ static void lateeoi_list_add(struct irq_info *info)
spin_lock_irqsave(&eoi->eoi_list_lock, flags);
- if (list_empty(&eoi->eoi_list)) {
+ elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
+ eoi_list);
+ if (!elem || info->eoi_time < elem->eoi_time) {
list_add(&info->eoi_list, &eoi->eoi_list);
mod_delayed_work_on(info->eoi_cpu, system_wq,
&eoi->delayed, delay);
@@ -732,50 +710,49 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
}
EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
-static void xen_irq_init(unsigned irq)
+static struct irq_info *xen_irq_init(unsigned int irq)
{
struct irq_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info == NULL)
- panic("Unable to allocate metadata for IRQ%d\n", irq);
+ if (info) {
+ info->irq = irq;
+ info->type = IRQT_UNBOUND;
+ info->refcnt = -1;
+ INIT_RCU_WORK(&info->rwork, delayed_free_irq);
- info->type = IRQT_UNBOUND;
- info->refcnt = -1;
- INIT_RCU_WORK(&info->rwork, delayed_free_irq);
+ set_info_for_irq(irq, info);
+ /*
+ * Interrupt affinity setting can be immediate. No point
+ * in delaying it until an interrupt is handled.
+ */
+ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
- set_info_for_irq(irq, info);
- /*
- * Interrupt affinity setting can be immediate. No point
- * in delaying it until an interrupt is handled.
- */
- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+ INIT_LIST_HEAD(&info->eoi_list);
+ list_add_tail(&info->list, &xen_irq_list_head);
+ }
- INIT_LIST_HEAD(&info->eoi_list);
- list_add_tail(&info->list, &xen_irq_list_head);
+ return info;
}
-static int __must_check xen_allocate_irqs_dynamic(int nvec)
+static struct irq_info *xen_allocate_irq_dynamic(void)
{
- int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
+ int irq = irq_alloc_desc_from(0, -1);
+ struct irq_info *info = NULL;
if (irq >= 0) {
- for (i = 0; i < nvec; i++)
- xen_irq_init(irq + i);
+ info = xen_irq_init(irq);
+ if (!info)
+ xen_irq_free_desc(irq);
}
- return irq;
-}
-
-static inline int __must_check xen_allocate_irq_dynamic(void)
-{
-
- return xen_allocate_irqs_dynamic(1);
+ return info;
}
-static int __must_check xen_allocate_irq_gsi(unsigned gsi)
+static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi)
{
int irq;
+ struct irq_info *info;
/*
* A PV guest has no concept of a GSI (since it has no ACPI
@@ -792,15 +769,15 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
else
irq = irq_alloc_desc_at(gsi, -1);
- xen_irq_init(irq);
+ info = xen_irq_init(irq);
+ if (!info)
+ xen_irq_free_desc(irq);
- return irq;
+ return info;
}
-static void xen_free_irq(unsigned irq)
+static void xen_free_irq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(irq);
-
if (WARN_ON(!info))
return;
@@ -821,14 +798,11 @@ static void event_handler_exit(struct irq_info *info)
clear_evtchn(info->evtchn);
}
-static void pirq_query_unmask(int irq)
+static void pirq_query_unmask(struct irq_info *info)
{
struct physdev_irq_status_query irq_status;
- struct irq_info *info = info_for_irq(irq);
-
- BUG_ON(info->type != IRQT_PIRQ);
- irq_status.irq = pirq_from_irq(irq);
+ irq_status.irq = pirq_from_irq(info);
if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
irq_status.flags = 0;
@@ -837,61 +811,81 @@ static void pirq_query_unmask(int irq)
info->u.pirq.flags |= PIRQ_NEEDS_EOI;
}
-static void eoi_pirq(struct irq_data *data)
+static void do_eoi_pirq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(data->irq);
- evtchn_port_t evtchn = info ? info->evtchn : 0;
- struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
+ struct physdev_eoi eoi = { .irq = pirq_from_irq(info) };
int rc = 0;
- if (!VALID_EVTCHN(evtchn))
+ if (!VALID_EVTCHN(info->evtchn))
return;
event_handler_exit(info);
- if (pirq_needs_eoi(data->irq)) {
+ if (pirq_needs_eoi(info)) {
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
WARN_ON(rc);
}
}
+static void eoi_pirq(struct irq_data *data)
+{
+ struct irq_info *info = info_for_irq(data->irq);
+
+ do_eoi_pirq(info);
+}
+
+static void do_disable_dynirq(struct irq_info *info)
+{
+ if (VALID_EVTCHN(info->evtchn))
+ do_mask(info, EVT_MASK_REASON_EXPLICIT);
+}
+
+static void disable_dynirq(struct irq_data *data)
+{
+ struct irq_info *info = info_for_irq(data->irq);
+
+ if (info)
+ do_disable_dynirq(info);
+}
+
static void mask_ack_pirq(struct irq_data *data)
{
- disable_dynirq(data);
- eoi_pirq(data);
+ struct irq_info *info = info_for_irq(data->irq);
+
+ if (info) {
+ do_disable_dynirq(info);
+ do_eoi_pirq(info);
+ }
}
-static unsigned int __startup_pirq(unsigned int irq)
+static unsigned int __startup_pirq(struct irq_info *info)
{
struct evtchn_bind_pirq bind_pirq;
- struct irq_info *info = info_for_irq(irq);
- evtchn_port_t evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = info->evtchn;
int rc;
- BUG_ON(info->type != IRQT_PIRQ);
-
if (VALID_EVTCHN(evtchn))
goto out;
- bind_pirq.pirq = pirq_from_irq(irq);
+ bind_pirq.pirq = pirq_from_irq(info);
/* NB. We are happy to share unless we are probing. */
bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
BIND_PIRQ__WILL_SHARE : 0;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
if (rc != 0) {
- pr_warn("Failed to obtain physical IRQ %d\n", irq);
+ pr_warn("Failed to obtain physical IRQ %d\n", info->irq);
return 0;
}
evtchn = bind_pirq.port;
- pirq_query_unmask(irq);
+ pirq_query_unmask(info);
- rc = set_evtchn_to_irq(evtchn, irq);
+ rc = set_evtchn_to_irq(evtchn, info->irq);
if (rc)
goto err;
info->evtchn = evtchn;
- bind_evtchn_to_cpu(evtchn, 0, false);
+ bind_evtchn_to_cpu(info, 0, false);
rc = xen_evtchn_port_setup(evtchn);
if (rc)
@@ -900,26 +894,28 @@ static unsigned int __startup_pirq(unsigned int irq)
out:
do_unmask(info, EVT_MASK_REASON_EXPLICIT);
- eoi_pirq(irq_get_irq_data(irq));
+ do_eoi_pirq(info);
return 0;
err:
- pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
+ pr_err("irq%d: Failed to set port to irq mapping (%d)\n", info->irq,
+ rc);
xen_evtchn_close(evtchn);
return 0;
}
static unsigned int startup_pirq(struct irq_data *data)
{
- return __startup_pirq(data->irq);
+ struct irq_info *info = info_for_irq(data->irq);
+
+ return __startup_pirq(info);
}
static void shutdown_pirq(struct irq_data *data)
{
- unsigned int irq = data->irq;
- struct irq_info *info = info_for_irq(irq);
- evtchn_port_t evtchn = evtchn_from_irq(irq);
+ struct irq_info *info = info_for_irq(data->irq);
+ evtchn_port_t evtchn = info->evtchn;
BUG_ON(info->type != IRQT_PIRQ);
@@ -957,10 +953,14 @@ int xen_irq_from_gsi(unsigned gsi)
}
EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
-static void __unbind_from_irq(unsigned int irq)
+static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
{
- evtchn_port_t evtchn = evtchn_from_irq(irq);
- struct irq_info *info = info_for_irq(irq);
+ evtchn_port_t evtchn;
+
+ if (!info) {
+ xen_irq_free_desc(irq);
+ return;
+ }
if (info->refcnt > 0) {
info->refcnt--;
@@ -968,19 +968,22 @@ static void __unbind_from_irq(unsigned int irq)
return;
}
+ evtchn = info->evtchn;
+
if (VALID_EVTCHN(evtchn)) {
- unsigned int cpu = cpu_from_irq(irq);
+ unsigned int cpu = info->cpu;
struct xenbus_device *dev;
if (!info->is_static)
xen_evtchn_close(evtchn);
- switch (type_from_irq(irq)) {
+ switch (info->type) {
case IRQT_VIRQ:
- per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
+ per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
break;
case IRQT_IPI:
- per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
+ per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
+ per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
break;
case IRQT_EVTCHN:
dev = info->u.interdomain;
@@ -994,7 +997,7 @@ static void __unbind_from_irq(unsigned int irq)
xen_irq_info_cleanup(info);
}
- xen_free_irq(irq);
+ xen_free_irq(info);
}
/*
@@ -1010,24 +1013,24 @@ static void __unbind_from_irq(unsigned int irq)
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
unsigned pirq, int shareable, char *name)
{
- int irq;
+ struct irq_info *info;
struct physdev_irq irq_op;
int ret;
mutex_lock(&irq_mapping_update_lock);
- irq = xen_irq_from_gsi(gsi);
- if (irq != -1) {
+ ret = xen_irq_from_gsi(gsi);
+ if (ret != -1) {
pr_info("%s: returning irq %d for gsi %u\n",
- __func__, irq, gsi);
+ __func__, ret, gsi);
goto out;
}
- irq = xen_allocate_irq_gsi(gsi);
- if (irq < 0)
+ info = xen_allocate_irq_gsi(gsi);
+ if (!info)
goto out;
- irq_op.irq = irq;
+ irq_op.irq = info->irq;
irq_op.vector = 0;
/* Only the privileged domain can do this. For non-priv, the pcifront
@@ -1035,20 +1038,19 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
* this in the priv domain. */
if (xen_initial_domain() &&
HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
- xen_free_irq(irq);
- irq = -ENOSPC;
+ xen_free_irq(info);
+ ret = -ENOSPC;
goto out;
}
- ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
+ ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF,
shareable ? PIRQ_SHAREABLE : 0);
if (ret < 0) {
- __unbind_from_irq(irq);
- irq = ret;
+ __unbind_from_irq(info, info->irq);
goto out;
}
- pirq_query_unmask(irq);
+ pirq_query_unmask(info);
/* We try to use the handler with the appropriate semantic for the
* type of interrupt: if the interrupt is an edge triggered
* interrupt we use handle_edge_irq.
@@ -1065,16 +1067,18 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
* is the right choice either way.
*/
if (shareable)
- irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
+ irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
handle_fasteoi_irq, name);
else
- irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
+ irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
handle_edge_irq, name);
+ ret = info->irq;
+
out:
mutex_unlock(&irq_mapping_update_lock);
- return irq;
+ return ret;
}
#ifdef CONFIG_PCI_MSI
@@ -1096,17 +1100,22 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
int pirq, int nvec, const char *name, domid_t domid)
{
int i, irq, ret;
+ struct irq_info *info;
mutex_lock(&irq_mapping_update_lock);
- irq = xen_allocate_irqs_dynamic(nvec);
+ irq = irq_alloc_descs(-1, 0, nvec, -1);
if (irq < 0)
goto out;
for (i = 0; i < nvec; i++) {
+ info = xen_irq_init(irq + i);
+ if (!info)
+ goto error_irq;
+
irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
- ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
+ ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid,
i == 0 ? 0 : PIRQ_MSI_GROUP);
if (ret < 0)
goto error_irq;
@@ -1118,9 +1127,12 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
out:
mutex_unlock(&irq_mapping_update_lock);
return irq;
+
error_irq:
- while (nvec--)
- __unbind_from_irq(irq + nvec);
+ while (nvec--) {
+ info = info_for_irq(irq + nvec);
+ __unbind_from_irq(info, irq + nvec);
+ }
mutex_unlock(&irq_mapping_update_lock);
return ret;
}
@@ -1156,67 +1168,45 @@ int xen_destroy_irq(int irq)
}
}
- xen_free_irq(irq);
+ xen_free_irq(info);
out:
mutex_unlock(&irq_mapping_update_lock);
return rc;
}
-int xen_irq_from_pirq(unsigned pirq)
-{
- int irq;
-
- struct irq_info *info;
-
- mutex_lock(&irq_mapping_update_lock);
-
- list_for_each_entry(info, &xen_irq_list_head, list) {
- if (info->type != IRQT_PIRQ)
- continue;
- irq = info->irq;
- if (info->u.pirq.pirq == pirq)
- goto out;
- }
- irq = -1;
-out:
- mutex_unlock(&irq_mapping_update_lock);
-
- return irq;
-}
-
-
int xen_pirq_from_irq(unsigned irq)
{
- return pirq_from_irq(irq);
+ struct irq_info *info = info_for_irq(irq);
+
+ return pirq_from_irq(info);
}
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
struct xenbus_device *dev)
{
- int irq;
- int ret;
+ int ret = -ENOMEM;
+ struct irq_info *info;
if (evtchn >= xen_evtchn_max_channels())
return -ENOMEM;
mutex_lock(&irq_mapping_update_lock);
- irq = get_evtchn_to_irq(evtchn);
+ info = evtchn_to_info(evtchn);
- if (irq == -1) {
- irq = xen_allocate_irq_dynamic();
- if (irq < 0)
+ if (!info) {
+ info = xen_allocate_irq_dynamic();
+ if (!info)
goto out;
- irq_set_chip_and_handler_name(irq, chip,
+ irq_set_chip_and_handler_name(info->irq, chip,
handle_edge_irq, "event");
- ret = xen_irq_info_evtchn_setup(irq, evtchn, dev);
+ ret = xen_irq_info_evtchn_setup(info, evtchn, dev);
if (ret < 0) {
- __unbind_from_irq(irq);
- irq = ret;
+ __unbind_from_irq(info, info->irq);
goto out;
}
/*
@@ -1226,17 +1216,17 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
* affinity setting is not invoked on them so nothing would
* bind the channel.
*/
- bind_evtchn_to_cpu(evtchn, 0, false);
- } else {
- struct irq_info *info = info_for_irq(irq);
- if (!WARN_ON(!info || info->type != IRQT_EVTCHN))
- info->refcnt++;
+ bind_evtchn_to_cpu(info, 0, false);
+ } else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
+ info->refcnt++;
}
+ ret = info->irq;
+
out:
mutex_unlock(&irq_mapping_update_lock);
- return irq;
+ return ret;
}
int bind_evtchn_to_irq(evtchn_port_t evtchn)
@@ -1255,18 +1245,19 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
evtchn_port_t evtchn;
- int ret, irq;
+ struct irq_info *info;
+ int ret;
mutex_lock(&irq_mapping_update_lock);
- irq = per_cpu(ipi_to_irq, cpu)[ipi];
+ ret = per_cpu(ipi_to_irq, cpu)[ipi];
- if (irq == -1) {
- irq = xen_allocate_irq_dynamic();
- if (irq < 0)
+ if (ret == -1) {
+ info = xen_allocate_irq_dynamic();
+ if (!info)
goto out;
- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
+ irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
handle_percpu_irq, "ipi");
bind_ipi.vcpu = xen_vcpu_nr(cpu);
@@ -1275,25 +1266,25 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
BUG();
evtchn = bind_ipi.port;
- ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
+ ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
if (ret < 0) {
- __unbind_from_irq(irq);
- irq = ret;
+ __unbind_from_irq(info, info->irq);
goto out;
}
/*
* Force the affinity mask to the target CPU so proc shows
* the correct target.
*/
- bind_evtchn_to_cpu(evtchn, cpu, true);
+ bind_evtchn_to_cpu(info, cpu, true);
+ ret = info->irq;
} else {
- struct irq_info *info = info_for_irq(irq);
+ info = info_for_irq(ret);
WARN_ON(info == NULL || info->type != IRQT_IPI);
}
out:
mutex_unlock(&irq_mapping_update_lock);
- return irq;
+ return ret;
}
static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
@@ -1361,22 +1352,23 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
{
struct evtchn_bind_virq bind_virq;
evtchn_port_t evtchn = 0;
- int irq, ret;
+ struct irq_info *info;
+ int ret;
mutex_lock(&irq_mapping_update_lock);
- irq = per_cpu(virq_to_irq, cpu)[virq];
+ ret = per_cpu(virq_to_irq, cpu)[virq];
- if (irq == -1) {
- irq = xen_allocate_irq_dynamic();
- if (irq < 0)
+ if (ret == -1) {
+ info = xen_allocate_irq_dynamic();
+ if (!info)
goto out;
if (percpu)
- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
+ irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
handle_percpu_irq, "virq");
else
- irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
+ irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip,
handle_edge_irq, "virq");
bind_virq.virq = virq;
@@ -1391,10 +1383,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
BUG_ON(ret < 0);
}
- ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
+ ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
if (ret < 0) {
- __unbind_from_irq(irq);
- irq = ret;
+ __unbind_from_irq(info, info->irq);
goto out;
}
@@ -1402,22 +1393,26 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
* Force the affinity mask for percpu interrupts so proc
* shows the correct target.
*/
- bind_evtchn_to_cpu(evtchn, cpu, percpu);
+ bind_evtchn_to_cpu(info, cpu, percpu);
+ ret = info->irq;
} else {
- struct irq_info *info = info_for_irq(irq);
+ info = info_for_irq(ret);
WARN_ON(info == NULL || info->type != IRQT_VIRQ);
}
out:
mutex_unlock(&irq_mapping_update_lock);
- return irq;
+ return ret;
}
static void unbind_from_irq(unsigned int irq)
{
+ struct irq_info *info;
+
mutex_lock(&irq_mapping_update_lock);
- __unbind_from_irq(irq);
+ info = info_for_irq(irq);
+ __unbind_from_irq(info, irq);
mutex_unlock(&irq_mapping_update_lock);
}
@@ -1568,13 +1563,7 @@ EXPORT_SYMBOL_GPL(xen_set_irq_priority);
int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static)
{
- int irq = get_evtchn_to_irq(evtchn);
- struct irq_info *info;
-
- if (irq == -1)
- return -ENOENT;
-
- info = info_for_irq(irq);
+ struct irq_info *info = evtchn_to_info(evtchn);
if (!info)
return -ENOENT;
@@ -1590,7 +1579,6 @@ EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
int evtchn_get(evtchn_port_t evtchn)
{
- int irq;
struct irq_info *info;
int err = -ENOENT;
@@ -1599,11 +1587,7 @@ int evtchn_get(evtchn_port_t evtchn)
mutex_lock(&irq_mapping_update_lock);
- irq = get_evtchn_to_irq(evtchn);
- if (irq == -1)
- goto done;
-
- info = info_for_irq(irq);
+ info = evtchn_to_info(evtchn);
if (!info)
goto done;
@@ -1623,16 +1607,17 @@ EXPORT_SYMBOL_GPL(evtchn_get);
void evtchn_put(evtchn_port_t evtchn)
{
- int irq = get_evtchn_to_irq(evtchn);
- if (WARN_ON(irq == -1))
+ struct irq_info *info = evtchn_to_info(evtchn);
+
+ if (WARN_ON(!info))
return;
- unbind_from_irq(irq);
+ unbind_from_irq(info->irq);
}
EXPORT_SYMBOL_GPL(evtchn_put);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
{
- int irq;
+ evtchn_port_t evtchn;
#ifdef CONFIG_X86
if (unlikely(vector == XEN_NMI_VECTOR)) {
@@ -1643,9 +1628,9 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
return;
}
#endif
- irq = per_cpu(ipi_to_irq, cpu)[vector];
- BUG_ON(irq < 0);
- notify_remote_via_irq(irq);
+ evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
+ BUG_ON(evtchn == 0);
+ notify_remote_via_evtchn(evtchn);
}
struct evtchn_loop_ctrl {
@@ -1656,12 +1641,10 @@ struct evtchn_loop_ctrl {
void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
{
- int irq;
- struct irq_info *info;
+ struct irq_info *info = evtchn_to_info(port);
struct xenbus_device *dev;
- irq = get_evtchn_to_irq(port);
- if (irq == -1)
+ if (!info)
return;
/*
@@ -1686,7 +1669,6 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
}
}
- info = info_for_irq(irq);
if (xchg_acquire(&info->is_active, 1))
return;
@@ -1700,7 +1682,7 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
info->eoi_time = get_jiffies_64() + event_eoi_delay;
}
- generic_handle_irq(irq);
+ generic_handle_irq(info->irq);
}
int xen_evtchn_do_upcall(void)
@@ -1758,16 +1740,17 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */
- BUG_ON(get_evtchn_to_irq(evtchn) != -1);
+ BUG_ON(evtchn_to_info(evtchn));
/* Expect irq to have been bound before,
so there should be a proper type */
BUG_ON(info->type == IRQT_UNBOUND);
- (void)xen_irq_info_evtchn_setup(irq, evtchn, NULL);
+ info->irq = irq;
+ (void)xen_irq_info_evtchn_setup(info, evtchn, NULL);
mutex_unlock(&irq_mapping_update_lock);
- bind_evtchn_to_cpu(evtchn, info->cpu, false);
+ bind_evtchn_to_cpu(info, info->cpu, false);
/* Unmask the event channel. */
enable_irq(irq);
@@ -1801,7 +1784,7 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
* it, but don't do the xenlinux-level rebind in that case.
*/
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
- bind_evtchn_to_cpu(evtchn, tcpu, false);
+ bind_evtchn_to_cpu(info, tcpu, false);
do_unmask(info, EVT_MASK_REASON_TEMPORARY);
@@ -1858,28 +1841,30 @@ static void enable_dynirq(struct irq_data *data)
do_unmask(info, EVT_MASK_REASON_EXPLICIT);
}
-static void disable_dynirq(struct irq_data *data)
+static void do_ack_dynirq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(data->irq);
- evtchn_port_t evtchn = info ? info->evtchn : 0;
+ evtchn_port_t evtchn = info->evtchn;
if (VALID_EVTCHN(evtchn))
- do_mask(info, EVT_MASK_REASON_EXPLICIT);
+ event_handler_exit(info);
}
static void ack_dynirq(struct irq_data *data)
{
struct irq_info *info = info_for_irq(data->irq);
- evtchn_port_t evtchn = info ? info->evtchn : 0;
- if (VALID_EVTCHN(evtchn))
- event_handler_exit(info);
+ if (info)
+ do_ack_dynirq(info);
}
static void mask_ack_dynirq(struct irq_data *data)
{
- disable_dynirq(data);
- ack_dynirq(data);
+ struct irq_info *info = info_for_irq(data->irq);
+
+ if (info) {
+ do_disable_dynirq(info);
+ do_ack_dynirq(info);
+ }
}
static void lateeoi_ack_dynirq(struct irq_data *data)
@@ -1952,13 +1937,13 @@ static void restore_pirqs(void)
if (rc) {
pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
gsi, irq, pirq, rc);
- xen_free_irq(irq);
+ xen_free_irq(info);
continue;
}
printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
- __startup_pirq(irq);
+ __startup_pirq(info);
}
}
@@ -1966,13 +1951,15 @@ static void restore_cpu_virqs(unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
evtchn_port_t evtchn;
+ struct irq_info *info;
int virq, irq;
for (virq = 0; virq < NR_VIRQS; virq++) {
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
continue;
+ info = info_for_irq(irq);
- BUG_ON(virq_from_irq(irq) != virq);
+ BUG_ON(virq_from_irq(info) != virq);
/* Get a new binding from Xen. */
bind_virq.virq = virq;
@@ -1983,9 +1970,9 @@ static void restore_cpu_virqs(unsigned int cpu)
evtchn = bind_virq.port;
/* Record the new mapping. */
- (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
+ xen_irq_info_virq_setup(info, cpu, evtchn, virq);
/* The affinity mask is still valid */
- bind_evtchn_to_cpu(evtchn, cpu, false);
+ bind_evtchn_to_cpu(info, cpu, false);
}
}
@@ -1993,13 +1980,15 @@ static void restore_cpu_ipis(unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
evtchn_port_t evtchn;
+ struct irq_info *info;
int ipi, irq;
for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
continue;
+ info = info_for_irq(irq);
- BUG_ON(ipi_from_irq(irq) != ipi);
+ BUG_ON(ipi_from_irq(info) != ipi);
/* Get a new binding from Xen. */
bind_ipi.vcpu = xen_vcpu_nr(cpu);
@@ -2009,9 +1998,9 @@ static void restore_cpu_ipis(unsigned int cpu)
evtchn = bind_ipi.port;
/* Record the new mapping. */
- (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
+ xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
/* The affinity mask is still valid */
- bind_evtchn_to_cpu(evtchn, cpu, false);
+ bind_evtchn_to_cpu(info, cpu, false);
}
}
@@ -2025,13 +2014,6 @@ void xen_clear_irq_pending(int irq)
event_handler_exit(info);
}
EXPORT_SYMBOL(xen_clear_irq_pending);
-void xen_set_irq_pending(int irq)
-{
- evtchn_port_t evtchn = evtchn_from_irq(irq);
-
- if (VALID_EVTCHN(evtchn))
- set_evtchn(evtchn);
-}
bool xen_test_irq_pending(int irq)
{
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index 4d3398eff9cd..19ae31695edc 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -33,7 +33,6 @@ struct evtchn_ops {
extern const struct evtchn_ops *evtchn_ops;
-int get_evtchn_to_irq(evtchn_port_t evtchn);
void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index b3e3d1bb37f3..508655273145 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -47,6 +47,9 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#ifdef CONFIG_ACPI
+#include <acpi/processor.h>
+#endif
/*
* @cpu_id: Xen physical cpu logic number
@@ -400,4 +403,23 @@ bool __init xen_processor_present(uint32_t acpi_id)
return online;
}
+
+void xen_sanitize_proc_cap_bits(uint32_t *cap)
+{
+ struct xen_platform_op op = {
+ .cmd = XENPF_set_processor_pminfo,
+ .u.set_pminfo.id = -1,
+ .u.set_pminfo.type = XEN_PM_PDC,
+ };
+ u32 buf[3] = { ACPI_PDC_REVISION_ID, 1, *cap };
+ int ret;
+
+ set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
+ ret = HYPERVISOR_platform_op(&op);
+ if (ret)
+ pr_err("sanitize of _PDC buffer bits from Xen failed: %d\n",
+ ret);
+ else
+ *cap = buf[2];
+}
#endif
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 1ce7f3c7a950..0eb337a8ec0f 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1115,7 +1115,7 @@ struct privcmd_kernel_ioreq {
spinlock_t lock; /* Protects ioeventfds list */
struct list_head ioeventfds;
struct list_head list;
- struct ioreq_port ports[0];
+ struct ioreq_port ports[] __counted_by(vcpus);
};
static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 946bd56f0ac5..0e6c6c25d154 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -405,4 +405,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
+ .max_mapping_size = swiotlb_max_mapping_size,
};
diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c
index b52e0fa595a9..223870a0111b 100644
--- a/drivers/xen/xen-front-pgdir-shbuf.c
+++ b/drivers/xen/xen-front-pgdir-shbuf.c
@@ -21,7 +21,7 @@
#include <xen/xen-front-pgdir-shbuf.h>
-/**
+/*
* This structure represents the structure of a shared page
* that contains grant references to the pages of the shared
* buffer. This structure is common to many Xen para-virtualized
@@ -33,7 +33,7 @@ struct xen_page_directory {
grant_ref_t gref[]; /* Variable length */
};
-/**
+/*
* Shared buffer ops which are differently implemented
* depending on the allocation mode, e.g. if the buffer
* is allocated by the corresponding backend or frontend.
@@ -61,7 +61,7 @@ struct xen_front_pgdir_shbuf_ops {
int (*unmap)(struct xen_front_pgdir_shbuf *buf);
};
-/**
+/*
* Get granted reference to the very first page of the
* page directory. Usually this is passed to the backend,
* so it can find/fill the grant references to the buffer's
@@ -81,7 +81,7 @@ xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
-/**
+/*
* Map granted references of the shared buffer.
*
* Depending on the shared buffer mode of allocation
@@ -102,7 +102,7 @@ int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
-/**
+/*
* Unmap granted references of the shared buffer.
*
* Depending on the shared buffer mode of allocation
@@ -123,7 +123,7 @@ int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
-/**
+/*
* Free all the resources of the shared buffer.
*
* \param buf shared buffer which resources to be freed.
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
offsetof(struct xen_page_directory, \
gref)) / sizeof(grant_ref_t))
-/**
+/*
* Get the number of pages the page directory consumes itself.
*
* \param buf shared buffer.
@@ -160,7 +160,7 @@ static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
}
-/**
+/*
* Calculate the number of grant references needed to share the buffer
* and its pages when backend allocates the buffer.
*
@@ -172,7 +172,7 @@ static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
buf->num_grefs = get_num_pages_dir(buf);
}
-/**
+/*
* Calculate the number of grant references needed to share the buffer
* and its pages when frontend allocates the buffer.
*
@@ -190,7 +190,7 @@ static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
#define xen_page_to_vaddr(page) \
((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
-/**
+/*
* Unmap the buffer previously mapped with grant references
* provided by the backend.
*
@@ -238,7 +238,7 @@ static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
return ret;
}
-/**
+/*
* Map the buffer with grant references provided by the backend.
*
* \param buf shared buffer.
@@ -320,7 +320,7 @@ static int backend_map(struct xen_front_pgdir_shbuf *buf)
return ret;
}
-/**
+/*
* Fill page directory with grant references to the pages of the
* page directory itself.
*
@@ -350,7 +350,7 @@ static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
}
-/**
+/*
* Fill page directory with grant references to the pages of the
* page directory and the buffer we share with the backend.
*
@@ -389,7 +389,7 @@ static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
}
}
-/**
+/*
* Grant references to the frontend's buffer pages.
*
* These will be shared with the backend, so it can
@@ -418,7 +418,7 @@ static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
return 0;
}
-/**
+/*
* Grant all the references needed to share the buffer.
*
* Grant references to the page directory pages and, if
@@ -466,7 +466,7 @@ static int grant_references(struct xen_front_pgdir_shbuf *buf)
return 0;
}
-/**
+/*
* Allocate all required structures to mange shared buffer.
*
* \param buf shared buffer.
@@ -506,7 +506,7 @@ static const struct xen_front_pgdir_shbuf_ops local_ops = {
.grant_refs_for_buffer = guest_grant_refs_for_buffer,
};
-/**
+/*
* Allocate a new instance of a shared buffer.
*
* \param cfg configuration to be used while allocating a new shared buffer.