summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig18
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c38
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/armada/armada_510.c1
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c1
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_out.c1
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c15
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h11
-rw-r--r--drivers/gpu/drm/ast/ast_main.c21
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c65
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig22
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h12
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c12
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c24
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c43
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c200
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c2
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c26
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c54
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c1
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c1
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c22
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig47
-rw-r--r--drivers/gpu/drm/bridge/imx/Makefile9
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.c221
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.h96
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c588
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c723
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c450
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c430
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c488
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c1
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c11
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c7
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c26
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c3
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c7
-rw-r--r--drivers/gpu/drm/bridge/panel.c34
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c6
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c113
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c1
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c180
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c25
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c418
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c71
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c27
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c2
-rw-r--r--drivers/gpu/drm/display/Kconfig2
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c211
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c34
-rw-r--r--drivers/gpu/drm/drm_aperture.c174
-rw-r--r--drivers/gpu/drm/drm_atomic.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c73
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c1
-rw-r--r--drivers/gpu/drm/drm_blend.c2
-rw-r--r--drivers/gpu/drm/drm_bridge.c24
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c8
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c29
-rw-r--r--drivers/gpu/drm/drm_connector.c119
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h5
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c1
-rw-r--r--drivers/gpu/drm/drm_debugfs.c42
-rw-r--r--drivers/gpu/drm/drm_displayid.c16
-rw-r--r--drivers/gpu/drm/drm_edid.c1924
-rw-r--r--drivers/gpu/drm/drm_encoder.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c28
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c7
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c39
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c104
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c54
-rw-r--r--drivers/gpu/drm/drm_ioctl.c2
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c9
-rw-r--r--drivers/gpu/drm/drm_mode_config.c1
-rw-r--r--drivers/gpu/drm/drm_modes.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c1
-rw-r--r--drivers/gpu/drm/drm_of.c63
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c8
-rw-r--r--drivers/gpu/drm/drm_prime.c2
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c241
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c14
-rw-r--r--drivers/gpu/drm/drm_syncobj.c59
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/drm_writeback.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c42
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c47
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c98
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c80
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c1
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c1
-rw-r--r--drivers/gpu/drm/gma500/intel_i2c.c36
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c51
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c36
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h24
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c86
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c2
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c1
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c74
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c7
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_proto.c23
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c1
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/TODO.txt2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c22
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio_regs.h160
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c657
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c314
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1846
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h90
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c147
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h59
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c127
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c411
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c734
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c246
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c91
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h67
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h4
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c1
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c33
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c50
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c250
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c215
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c627
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c132
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c268
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_gmch.c654
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_gmch.h46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c497
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h76
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c177
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h45
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c450
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h92
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c30
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c186
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c77
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c147
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c97
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c28
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c106
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c115
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c88
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h94
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h39
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h7
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c132
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_query.c26
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h235
-rw-r--r--drivers/gpu/drm/i915/i915_request.c57
-rw-r--r--drivers/gpu/drm/i915/i915_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c17
-rw-r--r--drivers/gpu/drm/i915/i915_tasklet.h43
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h40
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c87
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h45
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h8
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c93
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h20
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c182
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h14
-rw-r--r--drivers/gpu/drm/i915/intel_step.c70
-rw-r--r--drivers/gpu/drm/i915/intel_step.h4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c378
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c1
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c12
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.h3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c1
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.c1
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c2
-rw-r--r--drivers/gpu/drm/logicvc/Kconfig9
-rw-r--r--drivers/gpu/drm/logicvc/Makefile9
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_crtc.c280
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_crtc.h21
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c496
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.h67
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_interface.c214
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_interface.h28
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_layer.c631
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_layer.h64
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_mode.c80
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_mode.h15
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_of.c185
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_of.h46
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_regs.h80
-rw-r--r--drivers/gpu/drm/mcde/mcde_clk_div.c1
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c1
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c1
-rw-r--r--drivers/gpu/drm/mediatek/Makefile4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c320
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c261
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi_regs.h18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c69
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c184
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c101
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c315
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.h20
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c27
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c2
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/mgag200/Makefile14
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c387
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h148
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c201
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c50
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c51
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c46
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c50
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c60
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c130
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c50
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c59
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mm.c116
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c454
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_pll.c12
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h2
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c30
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c106
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c65
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c23
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c43
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c169
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c514
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c55
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c11
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c31
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c21
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c31
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_clk_util.c120
-rw-r--r--drivers/gpu/drm/msm/dp/dp_clk_util.h38
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c55
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c122
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c11
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.c53
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.c104
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c57
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c126
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h15
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c11
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c62
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c12
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c83
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h12
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c1
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c1
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c19
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c29
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h26
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c20
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c90
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h31
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c41
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c57
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig16
-rw-r--r--drivers/gpu/drm/mxsfb/Makefile2
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c340
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.h44
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c485
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_regs.h257
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/drf.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h66
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c231
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c2
-rw-r--r--drivers/gpu/drm/panel/Kconfig13
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c7
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c12
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c29
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c285
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c48
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c12
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c2
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c12
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c13
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c6
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c1
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c51
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c137
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c9
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h13
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c18
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_issues.h19
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h1
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c4
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c1
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/rs600.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c15
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig4
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c10
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c6
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c1
-rw-r--r--drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c1
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c2
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c2
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c1
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c1
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c1
-rw-r--r--drivers/gpu/drm/stm/ltdc.c294
-rw-r--r--drivers/gpu/drm/stm/ltdc.h8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c242
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c3
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.h11
-rw-r--r--drivers/gpu/drm/tegra/falcon.c8
-rw-r--r--drivers/gpu/drm/tegra/falcon.h1
-rw-r--r--drivers/gpu/drm/tegra/fb.c1
-rw-r--r--drivers/gpu/drm/tegra/gem.c11
-rw-r--r--drivers/gpu/drm/tegra/hub.c3
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c14
-rw-r--r--drivers/gpu/drm/tegra/plane.c2
-rw-r--r--drivers/gpu/drm/tegra/submit.c48
-rw-r--r--drivers/gpu/drm/tegra/uapi.c43
-rw-r--r--drivers/gpu/drm/tegra/vic.c92
-rw-r--r--drivers/gpu/drm/tests/.kunitconfig3
-rw-r--r--drivers/gpu/drm/tests/Makefile3
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c161
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c3
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c1
-rw-r--r--drivers/gpu/drm/tiny/Kconfig1
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c2
-rw-r--r--drivers/gpu/drm/tiny/bochs.c3
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c2
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c1
-rw-r--r--drivers/gpu/drm/tiny/repaper.c1
-rw-r--r--drivers/gpu/drm/tiny/st7586.c1
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c52
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c1
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c3
-rw-r--r--drivers/gpu/drm/v3d/Kconfig5
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c18
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c12
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c12
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c64
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c211
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c100
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c116
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h20
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c152
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c40
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c213
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h14
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h38
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c60
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c33
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c54
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c123
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c10
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h1
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h9
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_evtchnl.c43
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c1
741 files changed, 24340 insertions, 10883 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e88c497fa010..6c2256e8474b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -70,6 +70,22 @@ config DRM_DEBUG_SELFTEST
If in doubt, say "N".
+config DRM_KUNIT_TEST
+ tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
+ depends on DRM && KUNIT=y
+ select DRM_KMS_HELPER
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for DRM. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on DRM and associated drivers.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
+
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -351,6 +367,8 @@ source "drivers/gpu/drm/etnaviv/Kconfig"
source "drivers/gpu/drm/hisilicon/Kconfig"
+source "drivers/gpu/drm/logicvc/Kconfig"
+
source "drivers/gpu/drm/mediatek/Kconfig"
source "drivers/gpu/drm/mxsfb/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 15fe3163f822..e7af358e6dda 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
#
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
+obj-$(CONFIG_DRM_KUNIT_TEST) += tests/
obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
@@ -121,6 +122,7 @@ obj-$(CONFIG_DRM_STM) += stm/
obj-$(CONFIG_DRM_STI) += sti/
obj-y += imx/
obj-$(CONFIG_DRM_INGENIC) += ingenic/
+obj-$(CONFIG_DRM_LOGICVC) += logicvc/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/
obj-y += i2c/
@@ -129,7 +131,7 @@ obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
-obj-$(CONFIG_DRM_MXSFB) += mxsfb/
+obj-y += mxsfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 65552bb7d2f2..e83cb1c09610 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -184,7 +184,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
/* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
- * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
+ * srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex.
*/
release_queue(adev);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 9dc5f2a0cc07..870f352837fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -208,7 +208,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
/* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
- * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
+ * srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex.
*/
release_queue(adev);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index dbe2904e015b..d788a00043a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -37,6 +37,7 @@
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <linux/i2c.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 49e4092f447f..7a5e8a7b4a1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -50,6 +50,35 @@ to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
}
+static inline struct drm_buddy_block *
+amdgpu_vram_mgr_first_block(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct drm_buddy_block, link);
+}
+
+static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
+{
+ struct drm_buddy_block *block;
+ u64 start, size;
+
+ block = amdgpu_vram_mgr_first_block(head);
+ if (!block)
+ return false;
+
+ while (head != block->link.next) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+
+ block = list_entry(block->link.next, struct drm_buddy_block, link);
+ if (start + size != amdgpu_vram_mgr_block_start(block))
+ return false;
+ }
+
+ return true;
+}
+
+
+
/**
* DOC: mem_info_vram_total
*
@@ -496,16 +525,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
list_splice_tail(trim_list, &vres->blocks);
}
- list_for_each_entry(block, &vres->blocks, link)
- vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
+ vres->base.start = 0;
+ list_for_each_entry(block, &vres->blocks, link) {
+ unsigned long start;
- block = amdgpu_vram_mgr_first_block(&vres->blocks);
- if (!block) {
- r = -EINVAL;
- goto error_fini;
- }
+ start = amdgpu_vram_mgr_block_start(block) +
+ amdgpu_vram_mgr_block_size(block);
+ start >>= PAGE_SHIFT;
+
+ if (start > vres->base.num_pages)
+ start -= vres->base.num_pages;
+ else
+ start = 0;
+ vres->base.start = max(vres->base.start, start);
- vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
+ }
if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index 9a2db87186c7..4b267bf1c5db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -53,33 +53,6 @@ static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
return PAGE_SIZE << drm_buddy_block_order(block);
}
-static inline struct drm_buddy_block *
-amdgpu_vram_mgr_first_block(struct list_head *list)
-{
- return list_first_entry_or_null(list, struct drm_buddy_block, link);
-}
-
-static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
-{
- struct drm_buddy_block *block;
- u64 start, size;
-
- block = amdgpu_vram_mgr_first_block(head);
- if (!block)
- return false;
-
- while (head != block->link.next) {
- start = amdgpu_vram_mgr_block_start(block);
- size = amdgpu_vram_mgr_block_size(block);
-
- block = list_entry(block->link.next, struct drm_buddy_block, link);
- if (start + size != amdgpu_vram_mgr_block_start(block))
- return false;
- }
-
- return true;
-}
-
static inline struct amdgpu_vram_mgr_resource *
to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
{
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c03f300851fa..3e83fed540e8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -79,11 +79,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
+#include <drm/drm_gem_atomic_helper.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -6874,7 +6876,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
return &state->base;
}
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+#ifdef CONFIG_DEBUG_FS
static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
{
crtc_debugfs_init(crtc);
@@ -6975,7 +6977,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+#if defined(CONFIG_DEBUG_FS)
.late_register = amdgpu_dm_crtc_late_register,
#endif
};
@@ -7880,6 +7882,10 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
goto error_unpin;
}
+ r = drm_gem_plane_helper_prepare_fb(plane, new_state);
+ if (unlikely(r != 0))
+ goto error_unpin;
+
amdgpu_bo_unreserve(rbo);
afb->address = amdgpu_bo_gpu_offset(rbo);
@@ -9412,9 +9418,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
- long r;
unsigned long flags;
- struct amdgpu_bo *abo;
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool cursor_update = false;
@@ -9492,19 +9496,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
continue;
}
- abo = gem_to_amdgpu_bo(fb->obj[0]);
-
- /*
- * Wait for all fences on this FB. Do limited wait to avoid
- * deadlock during GPU reset when this fence will not signal
- * but we hold reservation lock for the BO.
- */
- r = dma_resv_wait_timeout(abo->tbo.base.resv,
- DMA_RESV_USAGE_WRITE, false,
- msecs_to_jiffies(5000));
- if (unlikely(r <= 0))
- DRM_ERROR("Waiting for fences timed out!");
-
fill_dc_plane_info_and_addr(
dm->adev, new_plane_state,
afb->tiling_flags,
@@ -9872,9 +9863,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
int crtc_disable_count = 0;
bool mode_set_reset_required = false;
+ int r;
trace_amdgpu_dm_atomic_commit_tail_begin(state);
+ r = drm_atomic_helper_wait_for_fences(dev, state, false);
+ if (unlikely(r))
+ DRM_ERROR("Waiting for fences timed out!");
+
drm_atomic_helper_update_legacy_modeset_state(dev, state);
dm_state = dm_atomic_get_new_state(state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 49bdcbf0a592..a1f40d0cd41c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -871,28 +871,18 @@ static int psr_capability_show(struct seq_file *m, void *data)
}
/*
- * Returns the current and maximum output bpc for the connector.
- * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
+ * Returns the current bpc for the crtc.
+ * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_bpc
*/
-static int output_bpc_show(struct seq_file *m, void *data)
+static int amdgpu_current_bpc_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
- struct drm_crtc *crtc = NULL;
+ struct drm_crtc *crtc = m->private;
+ struct drm_device *dev = crtc->dev;
struct dm_crtc_state *dm_crtc_state = NULL;
int res = -ENODEV;
unsigned int bpc;
mutex_lock(&dev->mode_config.mutex);
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- if (connector->state == NULL)
- goto unlock;
-
- crtc = connector->state->crtc;
- if (crtc == NULL)
- goto unlock;
-
drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->state == NULL)
goto unlock;
@@ -922,18 +912,15 @@ static int output_bpc_show(struct seq_file *m, void *data)
}
seq_printf(m, "Current: %u\n", bpc);
- seq_printf(m, "Maximum: %u\n", connector->display_info.bpc);
res = 0;
unlock:
- if (crtc)
- drm_modeset_unlock(&crtc->mutex);
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&crtc->mutex);
mutex_unlock(&dev->mode_config.mutex);
return res;
}
+DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc);
/*
* Example usage:
@@ -2545,7 +2532,6 @@ static int target_backlight_show(struct seq_file *m, void *unused)
DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
-DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
#ifdef CONFIG_DRM_AMD_DC_HDCP
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
@@ -2792,7 +2778,6 @@ static const struct {
const struct file_operations *fops;
} connector_debugfs_entries[] = {
{"force_yuv420_output", &force_yuv420_output_fops},
- {"output_bpc", &output_bpc_fops},
{"trigger_hotplug", &trigger_hotplug_debugfs_fops},
{"internal_display", &internal_display_fops}
};
@@ -3176,9 +3161,10 @@ static int crc_win_update_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get,
crc_win_update_set, "%llu\n");
-
+#endif
void crtc_debugfs_init(struct drm_crtc *crtc)
{
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry);
if (!dir)
@@ -3194,9 +3180,11 @@ void crtc_debugfs_init(struct drm_crtc *crtc)
&crc_win_y_end_fops);
debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
&crc_win_update_fops);
-
-}
#endif
+ debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
+ crtc, &amdgpu_current_bpc_fops);
+}
+
/*
* Writes DTN log state to the user supplied buffer.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index 3366cb644053..071200473c27 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -31,8 +31,6 @@
void connector_debugfs_init(struct amdgpu_dm_connector *connector);
void dtn_debugfs_init(struct amdgpu_device *adev);
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
void crtc_debugfs_init(struct drm_crtc *crtc);
-#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index fdcaea22b456..d3bc9dc21771 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -34,6 +34,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_plane.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_encoder.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index d380b8bc6f39..dfc74aea2852 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -695,7 +695,7 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
void dp_hw_to_dpcd_lane_settings(
const struct link_training_settings *lt_settings,
const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+ union dpcd_training_lane dpcd_lane_settings[])
{
uint8_t lane = 0;
@@ -725,7 +725,7 @@ void dp_decide_lane_settings(
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+ union dpcd_training_lane dpcd_lane_settings[])
{
uint32_t lane;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index b44c7b43f7db..6682d9e181c6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -148,12 +148,12 @@ bool dp_is_max_vs_reached(
void dp_hw_to_dpcd_lane_settings(
const struct link_training_settings *lt_settings,
const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+ union dpcd_training_lane dpcd_lane_settings[]);
void dp_decide_lane_settings(
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+ union dpcd_training_lane dpcd_lane_settings[]);
uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 6008450370e8..dccbd9f70723 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -781,7 +781,7 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
goto failed;
}
- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 0370482dd52b..e8fe84f806d1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -856,7 +856,7 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
feature->feature_num < 64)
return -EINVAL;
- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index 00fa56c29b3e..daa1faccd3e7 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -5,6 +5,7 @@
*
*/
+#include <drm/drm_blend.h>
#include <drm/drm_print.h>
#include "d71_dev.h"
#include "malidp_io.h"
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 456f3c435719..7889e380ab23 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index 06c595378dda..4b7d94961527 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -4,6 +4,8 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
+#include <linux/of.h>
+
#include <drm/drm_print.h>
#include "komeda_dev.h"
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index e0b9f7063b20..dff22dec54b5 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -6,6 +6,7 @@
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include "komeda_dev.h"
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index ce4b760a691b..ebccb74306a7 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -4,6 +4,7 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
+#include <drm/drm_framebuffer.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 7adb065169e9..afc9cd856501 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index b5928b52e279..962730772b2f 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 204c869d9fe2..b66ca5b33a7f 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -9,8 +9,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 338cec4a3fff..8a9562642d16 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -11,9 +11,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index 93d5c0a2d49a..93cd7e1a08ab 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <drm/drm_probe_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index 7dda5f2a0af4..c5bc53d7e0c4 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -5,6 +5,8 @@
#ifndef ARMADA_FB_H
#define ARMADA_FB_H
+#include <drm/drm_framebuffer.h>
+
struct armada_framebuffer {
struct drm_framebuffer fb;
uint8_t fmt;
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index 827e62c1daba..f3788d7d82d6 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -9,6 +9,7 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
index 6759cb88415a..4f2187025a21 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
@@ -4,6 +4,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include "aspeed_gfx.h"
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 4551bc8a3ecf..56483860306b 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -34,7 +34,7 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
* CRE4[7:0]: Read-Pointer for EDID (Unit: 4bytes); valid range: 0~64
*/
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE4,
- (u8) ~ASTDP_EDID_READ_POINTER_MASK, (u8) i);
+ ASTDP_AND_CLEAR_MASK, (u8)i);
j = 0;
/*
@@ -160,13 +160,12 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
}
if (bDPExecute)
- ast->tx_chip_type = AST_TX_ASTDP;
+ ast->tx_chip_types |= BIT(AST_TX_ASTDP);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
(u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
- } else
- ast->tx_chip_type = AST_TX_NONE;
+ }
}
@@ -275,8 +274,8 @@ void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mo
* CRE1[7:0]: MISC1 (default: 0x00)
* CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
*/
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE0, (u8) ~ASTDP_CLEAR_MASK,
- ASTDP_MISC0_24bpp);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE1, (u8) ~ASTDP_CLEAR_MASK, ASTDP_MISC1);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE2, (u8) ~ASTDP_CLEAR_MASK, ModeIdx);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE0, ASTDP_AND_CLEAR_MASK,
+ ASTDP_MISC0_24bpp);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE1, ASTDP_AND_CLEAR_MASK, ASTDP_MISC1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE2, ASTDP_AND_CLEAR_MASK, ModeIdx);
}
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 204c926a18ea..4f75a9efb610 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -450,7 +450,7 @@ void ast_init_3rdtx(struct drm_device *dev)
ast_init_dvo(dev);
break;
default:
- if (ast->tx_chip_type == AST_TX_SIL164)
+ if (ast->tx_chip_types & BIT(AST_TX_SIL164))
ast_init_dvo(dev);
else
ast_init_analog(dev);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index afebe35f205e..2e44b971c3a6 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -73,6 +73,11 @@ enum ast_tx_chip {
AST_TX_ASTDP,
};
+#define AST_TX_NONE_BIT BIT(AST_TX_NONE)
+#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164)
+#define AST_TX_DP501_BIT BIT(AST_TX_DP501)
+#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP)
+
#define AST_DRAM_512Mx16 0
#define AST_DRAM_1Gx16 1
#define AST_DRAM_512Mx32 2
@@ -173,7 +178,7 @@ struct ast_private {
struct drm_plane primary_plane;
struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
- union {
+ struct {
struct {
struct drm_encoder encoder;
struct ast_vga_connector vga_connector;
@@ -199,7 +204,7 @@ struct ast_private {
ast_use_defaults
} config_mode;
- enum ast_tx_chip tx_chip_type;
+ unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */
u8 *dp501_fw_addr;
const struct firmware *dp501_fw; /* dp501 fw */
};
@@ -428,7 +433,7 @@ int ast_mode_config_init(struct ast_private *ast);
*/
#define ASTDP_MISC0_24bpp BIT(5)
#define ASTDP_MISC1 0
-#define ASTDP_CLEAR_MASK GENMASK(7, 0)
+#define ASTDP_AND_CLEAR_MASK 0x00
/*
* ASTDP resoultion table:
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index d770d5a23c1a..067453266897 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -216,7 +216,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
}
/* Check 3rd Tx option (digital output afaik) */
- ast->tx_chip_type = AST_TX_NONE;
+ ast->tx_chip_types |= AST_TX_NONE_BIT;
/*
* VGACRA3 Enhanced Color Mode Register, check if DVO is already
@@ -229,7 +229,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
if (!*need_post) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
if (jreg & 0x80)
- ast->tx_chip_type = AST_TX_SIL164;
+ ast->tx_chip_types = AST_TX_SIL164_BIT;
}
if ((ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST2500)) {
@@ -241,7 +241,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
switch (jreg) {
case 0x04:
- ast->tx_chip_type = AST_TX_SIL164;
+ ast->tx_chip_types = AST_TX_SIL164_BIT;
break;
case 0x08:
ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL);
@@ -254,22 +254,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
}
fallthrough;
case 0x0c:
- ast->tx_chip_type = AST_TX_DP501;
+ ast->tx_chip_types = AST_TX_DP501_BIT;
}
} else if (ast->chip == AST2600)
ast_dp_launch(&ast->base, 0);
/* Print stuff for diagnostic purposes */
- switch(ast->tx_chip_type) {
- case AST_TX_SIL164:
+ if (ast->tx_chip_types & AST_TX_NONE_BIT)
+ drm_info(dev, "Using analog VGA\n");
+ if (ast->tx_chip_types & AST_TX_SIL164_BIT)
drm_info(dev, "Using Sil164 TMDS transmitter\n");
- break;
- case AST_TX_DP501:
+ if (ast->tx_chip_types & AST_TX_DP501_BIT)
drm_info(dev, "Using DP501 DisplayPort transmitter\n");
- break;
- default:
- drm_info(dev, "Analog VGA only\n");
- }
+
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 323af2746aa9..214b10178454 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -36,6 +36,7 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -989,6 +990,9 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct ast_private *ast = to_ast_private(crtc->dev);
u8 ch = AST_DPMS_VSYNC_OFF | AST_DPMS_HSYNC_OFF;
+ struct ast_crtc_state *ast_state;
+ const struct drm_format_info *format;
+ struct ast_vbios_mode_info *vbios_mode_info;
/* TODO: Maybe control display signal generation with
* Sync Enable (bit CR17.7).
@@ -997,32 +1001,42 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0);
- if (ast->tx_chip_type == AST_TX_DP501)
+ if (ast->tx_chip_types & AST_TX_DP501_BIT)
ast_set_dp501_video_output(crtc->dev, 1);
- if (ast->tx_chip_type == AST_TX_ASTDP) {
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ast_dp_power_on_off(crtc->dev, AST_DP_POWER_ON);
ast_wait_for_vretrace(ast);
ast_dp_set_on_off(crtc->dev, 1);
}
+ ast_state = to_ast_crtc_state(crtc->state);
+ format = ast_state->format;
+
+ if (format) {
+ vbios_mode_info = &ast_state->vbios_mode_info;
+
+ ast_set_color_reg(ast, format);
+ ast_set_vbios_color_reg(ast, format, vbios_mode_info);
+ }
+
ast_crtc_load_lut(ast, crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
ch = mode;
- if (ast->tx_chip_type == AST_TX_DP501)
+ if (ast->tx_chip_types & AST_TX_DP501_BIT)
ast_set_dp501_video_output(crtc->dev, 0);
- break;
- if (ast->tx_chip_type == AST_TX_ASTDP) {
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ast_dp_set_on_off(crtc->dev, 0);
ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF);
}
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch);
+ break;
}
}
@@ -1094,15 +1108,19 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
- crtc);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct drm_device *dev = crtc->dev;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
bool succ;
+ int ret;
+
+ ret = drm_atomic_helper_check_crtc_state(crtc_state, false);
+ if (ret)
+ return ret;
if (!crtc_state->enable)
- return 0; /* no mode checks if CRTC is being disabled */
+ goto out;
ast_state = to_ast_crtc_state(crtc_state);
@@ -1116,7 +1134,8 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
if (!succ)
return -EINVAL;
- return 0;
+out:
+ return drm_atomic_add_affected_planes(state, crtc);
}
static void ast_crtc_helper_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state)
@@ -1155,7 +1174,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
ast_crtc_load_lut(ast, crtc);
//Set Aspeed Display-Port
- if (ast->tx_chip_type == AST_TX_ASTDP)
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
ast_dp_set_mode(crtc, vbios_mode_info);
mutex_unlock(&ast->ioregs_lock);
@@ -1739,22 +1758,26 @@ int ast_mode_config_init(struct ast_private *ast)
ast_crtc_init(dev);
- switch (ast->tx_chip_type) {
- case AST_TX_NONE:
+ if (ast->tx_chip_types & AST_TX_NONE_BIT) {
ret = ast_vga_output_init(ast);
- break;
- case AST_TX_SIL164:
+ if (ret)
+ return ret;
+ }
+ if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
ret = ast_sil164_output_init(ast);
- break;
- case AST_TX_DP501:
+ if (ret)
+ return ret;
+ }
+ if (ast->tx_chip_types & AST_TX_DP501_BIT) {
ret = ast_dp501_output_init(ast);
- break;
- case AST_TX_ASTDP:
+ if (ret)
+ return ret;
+ }
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ret = ast_astdp_output_init(ast);
- break;
+ if (ret)
+ return ret;
}
- if (ret)
- return ret;
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 0aa9cf0fb5c3..82fd3c8adee1 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -391,7 +391,7 @@ void ast_post_gpu(struct drm_device *dev)
ast_init_3rdtx(dev);
} else {
- if (ast->tx_chip_type != AST_TX_NONE)
+ if (ast->tx_chip_types & AST_TX_SIL164_BIT)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
}
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index cfe4fc69277e..58184cd6ab0b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk.h>
+#include <linux/media-bus-format.h>
#include <linux/mfd/atmel-hlcdc.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 43bc709e3523..50fee6a93964 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -9,6 +9,7 @@
*/
#include <linux/media-bus-format.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index a077d93c78d7..2306ceb3e999 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -11,8 +11,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index da3441830d46..57946d80b02d 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -78,6 +78,7 @@ config DRM_DISPLAY_CONNECTOR
config DRM_FSL_LDB
tristate "Freescale i.MX8MP LDB bridge"
depends on OF
+ depends on ARCH_MXC || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
@@ -91,7 +92,10 @@ config DRM_ITE_IT6505
select DRM_DISPLAY_HELPER
select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
+ select DRM_DP_HELPER
select EXTCON
+ select CRYPTO
+ select CRYPTO_HASH
help
ITE IT6505 DisplayPort bridge chip driver.
@@ -320,6 +324,22 @@ config DRM_TOSHIBA_TC358775
help
Toshiba TC358775 DSI/LVDS bridge chip driver.
+config DRM_TI_DLPC3433
+ tristate "TI DLPC3433 Display controller"
+ depends on DRM && DRM_PANEL
+ depends on OF
+ select DRM_MIPI_DSI
+ help
+ TI DLPC3433 is a MIPI DSI based display controller bridge
+ for processing high resolution DMD based projectors.
+
+ It has a flexible configuration of MIPI DSI and DPI signal
+ input that produces a DMD output in RGB565, RGB666, RGB888
+ formats.
+
+ It supports upto 720p resolution with 60 and 120 Hz refresh
+ rates.
+
config DRM_TI_TFP410
tristate "TI TFP410 DVI/HDMI bridge"
depends on OF
@@ -365,6 +385,8 @@ source "drivers/gpu/drm/bridge/adv7511/Kconfig"
source "drivers/gpu/drm/bridge/cadence/Kconfig"
+source "drivers/gpu/drm/bridge/imx/Kconfig"
+
source "drivers/gpu/drm/bridge/synopsys/Kconfig"
endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index f6c0a95de549..1884803c6860 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
obj-$(CONFIG_DRM_TOSHIBA_TC358775) += tc358775.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
+obj-$(CONFIG_DRM_TI_DLPC3433) += ti-dlpc3433.o
obj-$(CONFIG_DRM_TI_SN65DSI83) += ti-sn65dsi83.o
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
@@ -35,4 +36,5 @@ obj-$(CONFIG_DRM_ITE_IT66121) += ite-it66121.o
obj-y += analogix/
obj-y += cadence/
+obj-y += imx/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 9e3bb8a8ee40..a031a0cd1f18 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -226,18 +226,6 @@
#define ADV7511_REG_CEC_CLK_DIV 0x4e
#define ADV7511_REG_CEC_SOFT_RESET 0x50
-static const u8 ADV7511_REG_CEC_RX_FRAME_HDR[] = {
- ADV7511_REG_CEC_RX1_FRAME_HDR,
- ADV7511_REG_CEC_RX2_FRAME_HDR,
- ADV7511_REG_CEC_RX3_FRAME_HDR,
-};
-
-static const u8 ADV7511_REG_CEC_RX_FRAME_LEN[] = {
- ADV7511_REG_CEC_RX1_FRAME_LEN,
- ADV7511_REG_CEC_RX2_FRAME_LEN,
- ADV7511_REG_CEC_RX3_FRAME_LEN,
-};
-
#define ADV7533_REG_CEC_OFFSET 0x70
enum adv7511_input_clock {
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index 399f625a50c8..0b266f28f150 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -15,6 +15,18 @@
#include "adv7511.h"
+static const u8 ADV7511_REG_CEC_RX_FRAME_HDR[] = {
+ ADV7511_REG_CEC_RX1_FRAME_HDR,
+ ADV7511_REG_CEC_RX2_FRAME_HDR,
+ ADV7511_REG_CEC_RX3_FRAME_HDR,
+};
+
+static const u8 ADV7511_REG_CEC_RX_FRAME_LEN[] = {
+ ADV7511_REG_CEC_RX1_FRAME_LEN,
+ ADV7511_REG_CEC_RX2_FRAME_LEN,
+ ADV7511_REG_CEC_RX3_FRAME_LEN,
+};
+
#define ADV7511_INT1_CEC_MASK \
(ADV7511_INT1_CEC_TX_READY | ADV7511_INT1_CEC_TX_ARBIT_LOST | \
ADV7511_INT1_CEC_TX_RETRY_TIMEOUT | ADV7511_INT1_CEC_RX_READY1 | \
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 5bb9300040dd..38bf28720f3a 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1065,6 +1065,10 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
ADV7511_CEC_I2C_ADDR_DEFAULT);
if (IS_ERR(adv->i2c_cec))
return PTR_ERR(adv->i2c_cec);
+
+ regmap_write(adv->regmap, ADV7511_REG_CEC_I2C_ADDR,
+ adv->i2c_cec->addr << 1);
+
i2c_set_clientdata(adv->i2c_cec, adv);
adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
@@ -1271,9 +1275,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (ret)
goto err_i2c_unregister_packet;
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
- adv7511->i2c_cec->addr << 1);
-
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
if (i2c->irq) {
@@ -1392,10 +1393,21 @@ static struct i2c_driver adv7511_driver = {
static int __init adv7511_init(void)
{
- if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
- mipi_dsi_driver_register(&adv7533_dsi_driver);
+ int ret;
- return i2c_add_driver(&adv7511_driver);
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
+ ret = mipi_dsi_driver_register(&adv7533_dsi_driver);
+ if (ret)
+ return ret;
+ }
+
+ ret = i2c_add_driver(&adv7511_driver);
+ if (ret) {
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_unregister(&adv7533_dsi_driver);
+ }
+
+ return ret;
}
module_init(adv7511_init);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index b97f6e8f0f6b..8aadcc0aa90b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -24,6 +24,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -1267,6 +1268,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
}
static
+struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp,
+ struct drm_atomic_state *state)
+{
+ struct drm_encoder *encoder = dp->encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+
+ connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
+ if (!connector)
+ return NULL;
+
+ conn_state = drm_atomic_get_old_connector_state(state, connector);
+ if (!conn_state)
+ return NULL;
+
+ return conn_state->crtc;
+}
+
+static
struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
struct drm_atomic_state *state)
{
@@ -1446,14 +1466,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
{
struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
- struct drm_crtc *crtc;
+ struct drm_crtc *old_crtc, *new_crtc;
+ struct drm_crtc_state *old_crtc_state = NULL;
struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
- crtc = analogix_dp_get_new_crtc(dp, old_state);
- if (!crtc)
+ new_crtc = analogix_dp_get_new_crtc(dp, old_state);
+ if (!new_crtc)
goto out;
- new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc);
if (!new_crtc_state)
goto out;
@@ -1462,6 +1484,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
return;
out:
+ old_crtc = analogix_dp_get_old_crtc(dp, old_state);
+ if (old_crtc) {
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
+ old_crtc);
+
+ /* When moving from PSR to fully disabled, exit PSR first. */
+ if (old_crtc_state && old_crtc_state->self_refresh_active) {
+ ret = analogix_dp_disable_psr(dp);
+ if (ret)
+ DRM_ERROR("Failed to disable psr (%d)\n", ret);
+ }
+ }
+
analogix_dp_bridge_disable(bridge);
}
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 53a5da6c49dd..d1f1d525aeb6 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1443,23 +1443,24 @@ static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
}
-static void anx7625_hpd_polling(struct anx7625_data *ctx)
+static int _anx7625_hpd_polling(struct anx7625_data *ctx,
+ unsigned long wait_us)
{
int ret, val;
struct device *dev = &ctx->client->dev;
/* Interrupt mode, no need poll HPD status, just return */
if (ctx->pdata.intp_irq)
- return;
+ return 0;
ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
ctx, val,
((val & HPD_STATUS) || (val < 0)),
- 5000,
- 5000 * 100);
+ wait_us / 100,
+ wait_us);
if (ret) {
DRM_DEV_ERROR(dev, "no hpd.\n");
- return;
+ return ret;
}
DRM_DEV_DEBUG_DRIVER(dev, "system status: 0x%x. HPD raise up.\n", val);
@@ -1472,6 +1473,23 @@ static void anx7625_hpd_polling(struct anx7625_data *ctx)
if (!ctx->pdata.panel_bridge && ctx->bridge_attached)
drm_helper_hpd_irq_event(ctx->bridge.dev);
+
+ return 0;
+}
+
+static int anx7625_wait_hpd_asserted(struct drm_dp_aux *aux,
+ unsigned long wait_us)
+{
+ struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux);
+ struct device *dev = &ctx->client->dev;
+ int ret;
+
+ pm_runtime_get_sync(dev);
+ ret = _anx7625_hpd_polling(ctx, wait_us);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
}
static void anx7625_remove_edid(struct anx7625_data *ctx)
@@ -1623,29 +1641,30 @@ static int anx7625_parse_dt(struct device *dev,
anx7625_get_swing_setting(dev, pdata);
- pdata->is_dpi = 1; /* default dpi mode */
+ pdata->is_dpi = 0; /* default dsi mode */
pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0);
if (!pdata->mipi_host_node) {
DRM_DEV_ERROR(dev, "fail to get internal panel.\n");
return -ENODEV;
}
- bus_type = V4L2_FWNODE_BUS_TYPE_PARALLEL;
+ bus_type = 0;
mipi_lanes = MAX_LANES_SUPPORT;
ep0 = of_graph_get_endpoint_by_regs(np, 0, 0);
if (ep0) {
if (of_property_read_u32(ep0, "bus-type", &bus_type))
bus_type = 0;
- mipi_lanes = of_property_count_u32_elems(ep0, "data-lanes");
+ mipi_lanes = drm_of_get_data_lanes_count(ep0, 1, MAX_LANES_SUPPORT);
+ of_node_put(ep0);
}
- if (bus_type == V4L2_FWNODE_BUS_TYPE_PARALLEL) /* bus type is Parallel(DSI) */
- pdata->is_dpi = 0;
+ if (bus_type == V4L2_FWNODE_BUS_TYPE_DPI) /* bus type is DPI */
+ pdata->is_dpi = 1;
- pdata->mipi_lanes = mipi_lanes;
- if (pdata->mipi_lanes > MAX_LANES_SUPPORT || pdata->mipi_lanes <= 0)
- pdata->mipi_lanes = MAX_LANES_SUPPORT;
+ pdata->mipi_lanes = MAX_LANES_SUPPORT;
+ if (mipi_lanes > 0)
+ pdata->mipi_lanes = mipi_lanes;
if (pdata->is_dpi)
DRM_DEV_DEBUG_DRIVER(dev, "found MIPI DPI host node.\n");
@@ -1657,8 +1676,10 @@ static int anx7625_parse_dt(struct device *dev,
pdata->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
if (IS_ERR(pdata->panel_bridge)) {
- if (PTR_ERR(pdata->panel_bridge) == -ENODEV)
+ if (PTR_ERR(pdata->panel_bridge) == -ENODEV) {
+ pdata->panel_bridge = NULL;
return 0;
+ }
return PTR_ERR(pdata->panel_bridge);
}
@@ -1738,6 +1759,7 @@ static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
}
pm_runtime_get_sync(dev);
+ _anx7625_hpd_polling(ctx, 5000 * 100);
edid_num = sp_tx_edid_read(ctx, p_edid->edid_raw_data);
pm_runtime_put_sync(dev);
@@ -2375,6 +2397,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
ctx->connector = connector;
pm_runtime_get_sync(dev);
+ _anx7625_hpd_polling(ctx, 5000 * 100);
anx7625_dp_start(ctx);
}
@@ -2433,82 +2456,44 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
struct i2c_client *client)
{
- int err = 0;
+ struct device *dev = &ctx->client->dev;
- ctx->i2c.tx_p0_client = i2c_new_dummy_device(client->adapter,
- TX_P0_ADDR >> 1);
+ ctx->i2c.tx_p0_client = devm_i2c_new_dummy_device(dev, client->adapter,
+ TX_P0_ADDR >> 1);
if (IS_ERR(ctx->i2c.tx_p0_client))
return PTR_ERR(ctx->i2c.tx_p0_client);
- ctx->i2c.tx_p1_client = i2c_new_dummy_device(client->adapter,
- TX_P1_ADDR >> 1);
- if (IS_ERR(ctx->i2c.tx_p1_client)) {
- err = PTR_ERR(ctx->i2c.tx_p1_client);
- goto free_tx_p0;
- }
+ ctx->i2c.tx_p1_client = devm_i2c_new_dummy_device(dev, client->adapter,
+ TX_P1_ADDR >> 1);
+ if (IS_ERR(ctx->i2c.tx_p1_client))
+ return PTR_ERR(ctx->i2c.tx_p1_client);
- ctx->i2c.tx_p2_client = i2c_new_dummy_device(client->adapter,
- TX_P2_ADDR >> 1);
- if (IS_ERR(ctx->i2c.tx_p2_client)) {
- err = PTR_ERR(ctx->i2c.tx_p2_client);
- goto free_tx_p1;
- }
+ ctx->i2c.tx_p2_client = devm_i2c_new_dummy_device(dev, client->adapter,
+ TX_P2_ADDR >> 1);
+ if (IS_ERR(ctx->i2c.tx_p2_client))
+ return PTR_ERR(ctx->i2c.tx_p2_client);
- ctx->i2c.rx_p0_client = i2c_new_dummy_device(client->adapter,
- RX_P0_ADDR >> 1);
- if (IS_ERR(ctx->i2c.rx_p0_client)) {
- err = PTR_ERR(ctx->i2c.rx_p0_client);
- goto free_tx_p2;
- }
+ ctx->i2c.rx_p0_client = devm_i2c_new_dummy_device(dev, client->adapter,
+ RX_P0_ADDR >> 1);
+ if (IS_ERR(ctx->i2c.rx_p0_client))
+ return PTR_ERR(ctx->i2c.rx_p0_client);
- ctx->i2c.rx_p1_client = i2c_new_dummy_device(client->adapter,
- RX_P1_ADDR >> 1);
- if (IS_ERR(ctx->i2c.rx_p1_client)) {
- err = PTR_ERR(ctx->i2c.rx_p1_client);
- goto free_rx_p0;
- }
+ ctx->i2c.rx_p1_client = devm_i2c_new_dummy_device(dev, client->adapter,
+ RX_P1_ADDR >> 1);
+ if (IS_ERR(ctx->i2c.rx_p1_client))
+ return PTR_ERR(ctx->i2c.rx_p1_client);
- ctx->i2c.rx_p2_client = i2c_new_dummy_device(client->adapter,
- RX_P2_ADDR >> 1);
- if (IS_ERR(ctx->i2c.rx_p2_client)) {
- err = PTR_ERR(ctx->i2c.rx_p2_client);
- goto free_rx_p1;
- }
+ ctx->i2c.rx_p2_client = devm_i2c_new_dummy_device(dev, client->adapter,
+ RX_P2_ADDR >> 1);
+ if (IS_ERR(ctx->i2c.rx_p2_client))
+ return PTR_ERR(ctx->i2c.rx_p2_client);
- ctx->i2c.tcpc_client = i2c_new_dummy_device(client->adapter,
- TCPC_INTERFACE_ADDR >> 1);
- if (IS_ERR(ctx->i2c.tcpc_client)) {
- err = PTR_ERR(ctx->i2c.tcpc_client);
- goto free_rx_p2;
- }
+ ctx->i2c.tcpc_client = devm_i2c_new_dummy_device(dev, client->adapter,
+ TCPC_INTERFACE_ADDR >> 1);
+ if (IS_ERR(ctx->i2c.tcpc_client))
+ return PTR_ERR(ctx->i2c.tcpc_client);
return 0;
-
-free_rx_p2:
- i2c_unregister_device(ctx->i2c.rx_p2_client);
-free_rx_p1:
- i2c_unregister_device(ctx->i2c.rx_p1_client);
-free_rx_p0:
- i2c_unregister_device(ctx->i2c.rx_p0_client);
-free_tx_p2:
- i2c_unregister_device(ctx->i2c.tx_p2_client);
-free_tx_p1:
- i2c_unregister_device(ctx->i2c.tx_p1_client);
-free_tx_p0:
- i2c_unregister_device(ctx->i2c.tx_p0_client);
-
- return err;
-}
-
-static void anx7625_unregister_i2c_dummy_clients(struct anx7625_data *ctx)
-{
- i2c_unregister_device(ctx->i2c.tx_p0_client);
- i2c_unregister_device(ctx->i2c.tx_p1_client);
- i2c_unregister_device(ctx->i2c.tx_p2_client);
- i2c_unregister_device(ctx->i2c.rx_p0_client);
- i2c_unregister_device(ctx->i2c.rx_p1_client);
- i2c_unregister_device(ctx->i2c.rx_p2_client);
- i2c_unregister_device(ctx->i2c.tcpc_client);
}
static int __maybe_unused anx7625_runtime_pm_suspend(struct device *dev)
@@ -2532,45 +2517,15 @@ static int __maybe_unused anx7625_runtime_pm_resume(struct device *dev)
mutex_lock(&ctx->lock);
anx7625_power_on_init(ctx);
- anx7625_hpd_polling(ctx);
mutex_unlock(&ctx->lock);
return 0;
}
-static int __maybe_unused anx7625_resume(struct device *dev)
-{
- struct anx7625_data *ctx = dev_get_drvdata(dev);
-
- if (!ctx->pdata.intp_irq)
- return 0;
-
- if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
- enable_irq(ctx->pdata.intp_irq);
- anx7625_runtime_pm_resume(dev);
- }
-
- return 0;
-}
-
-static int __maybe_unused anx7625_suspend(struct device *dev)
-{
- struct anx7625_data *ctx = dev_get_drvdata(dev);
-
- if (!ctx->pdata.intp_irq)
- return 0;
-
- if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
- anx7625_runtime_pm_suspend(dev);
- disable_irq(ctx->pdata.intp_irq);
- }
-
- return 0;
-}
-
static const struct dev_pm_ops anx7625_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(anx7625_suspend, anx7625_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(anx7625_runtime_pm_suspend,
anx7625_runtime_pm_resume, NULL)
};
@@ -2653,15 +2608,8 @@ static int anx7625_i2c_probe(struct i2c_client *client,
platform->aux.name = "anx7625-aux";
platform->aux.dev = dev;
platform->aux.transfer = anx7625_aux_transfer;
+ platform->aux.wait_hpd_asserted = anx7625_wait_hpd_asserted;
drm_dp_aux_init(&platform->aux);
- devm_of_dp_aux_populate_ep_devices(&platform->aux);
-
- ret = anx7625_parse_dt(dev, pdata);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
- goto free_wq;
- }
if (anx7625_register_i2c_dummy_clients(platform, client) != 0) {
ret = -ENOMEM;
@@ -2677,9 +2625,19 @@ static int anx7625_i2c_probe(struct i2c_client *client,
if (ret)
goto free_wq;
+ devm_of_dp_aux_populate_ep_devices(&platform->aux);
+
+ ret = anx7625_parse_dt(dev, pdata);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
+ goto free_wq;
+ }
+
if (!platform->pdata.low_power_mode) {
anx7625_disable_pd_protocol(platform);
pm_runtime_get_sync(dev);
+ _anx7625_hpd_polling(platform, 5000 * 100);
}
/* Add work function */
@@ -2720,8 +2678,6 @@ unregister_bridge:
if (!platform->pdata.low_power_mode)
pm_runtime_put_sync_suspend(&client->dev);
- anx7625_unregister_i2c_dummy_clients(platform);
-
free_wq:
if (platform->workqueue)
destroy_workqueue(platform->workqueue);
@@ -2751,8 +2707,6 @@ static int anx7625_i2c_remove(struct i2c_client *client)
if (!platform->pdata.low_power_mode)
pm_runtime_put_sync_suspend(&client->dev);
- anx7625_unregister_i2c_dummy_clients(platform);
-
if (platform->pdata.audio_en)
anx7625_unregister_audio(platform);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 67f0f444b4e8..ab63e7b11944 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -26,6 +26,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -43,6 +44,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index 829e1a144656..20bece84ff8c 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -462,6 +462,7 @@ struct cdns_dsi {
struct reset_control *dsi_p_rst;
struct clk *dsi_sys_clk;
bool link_initialized;
+ bool phy_initialized;
struct phy *dphy;
};
@@ -711,11 +712,21 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put(dsi->base.dev);
}
+static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+
+ pm_runtime_put(dsi->base.dev);
+}
+
static void cdns_dsi_hs_init(struct cdns_dsi *dsi)
{
struct cdns_dsi_output *output = &dsi->output;
u32 status;
+ if (dsi->phy_initialized)
+ return;
/*
* Power all internal DPHY blocks down and maintain their reset line
* asserted before changing the DPHY config.
@@ -739,6 +750,7 @@ static void cdns_dsi_hs_init(struct cdns_dsi *dsi)
writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN |
DPHY_D_RSTB(output->dev->lanes) | DPHY_C_RSTB,
dsi->regs + MCTL_DPHY_CFG0);
+ dsi->phy_initialized = true;
}
static void cdns_dsi_init_link(struct cdns_dsi *dsi)
@@ -914,11 +926,25 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
+static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+
+ if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
+ return;
+
+ cdns_dsi_init_link(dsi);
+ cdns_dsi_hs_init(dsi);
+}
+
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
.disable = cdns_dsi_bridge_disable,
+ .pre_enable = cdns_dsi_bridge_pre_enable,
.enable = cdns_dsi_bridge_enable,
+ .post_disable = cdns_dsi_bridge_post_disable,
};
static int cdns_dsi_attach(struct mipi_dsi_host *host,
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 47dea657a752..481c86b2406e 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -9,9 +9,12 @@
#include <drm/drm_print.h>
#include <drm/drm_mipi_dsi.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
@@ -26,6 +29,11 @@
#define PD_CTRL(n) (0x0a + ((n) & 0x3)) /* 0..3 */
#define RST_CTRL(n) (0x0e + ((n) & 0x1)) /* 0..1 */
#define SYS_CTRL(n) (0x10 + ((n) & 0x7)) /* 0..4 */
+#define SYS_CTRL_1_CLK_PHASE_MSK GENMASK(5, 4)
+#define CLK_PHASE_0 0
+#define CLK_PHASE_1_4 1
+#define CLK_PHASE_1_2 2
+#define CLK_PHASE_3_4 3
#define RGB_DRV(n) (0x18 + ((n) & 0x3)) /* 0..3 */
#define RGB_DLY(n) (0x1c + ((n) & 0x1)) /* 0..1 */
#define RGB_TEST_CTRL 0x1e
@@ -100,7 +108,7 @@
#define MIPI_PN_SWAP 0x87
#define MIPI_PN_SWAP_CLK BIT(4)
#define MIPI_PN_SWAP_D(n) BIT((n) & 0x3)
-#define MIPI_SOT_SYNC_BIT_(n) (0x88 + ((n) & 0x1)) /* 0..1 */
+#define MIPI_SOT_SYNC_BIT(n) (0x88 + ((n) & 0x1)) /* 0..1 */
#define MIPI_ULPS_CTRL 0x8a
#define MIPI_CLK_CHK_VAR 0x8e
#define MIPI_CLK_CHK_INI 0x8f
@@ -115,7 +123,7 @@
#define MIPI_T_CLK_SETTLE 0x9a
#define MIPI_TO_HS_RX_L 0x9e
#define MIPI_TO_HS_RX_H 0x9f
-#define MIPI_PHY_(n) (0xa0 + ((n) & 0x7)) /* 0..5 */
+#define MIPI_PHY(n) (0xa0 + ((n) & 0x7)) /* 0..5 */
#define MIPI_PD_RX 0xb0
#define MIPI_PD_TERM 0xb1
#define MIPI_PD_HSRX 0xb2
@@ -125,13 +133,11 @@
#define MIPI_FORCE_0 0xb6
#define MIPI_RST_CTRL 0xb7
#define MIPI_RST_NUM 0xb8
-#define MIPI_DBG_SET_(n) (0xc0 + ((n) & 0xf)) /* 0..9 */
+#define MIPI_DBG_SET(n) (0xc0 + ((n) & 0xf)) /* 0..9 */
#define MIPI_DBG_SEL 0xe0
#define MIPI_DBG_DATA 0xe1
#define MIPI_ATE_TEST_SEL 0xe2
-#define MIPI_ATE_STATUS_(n) (0xe3 + ((n) & 0x1)) /* 0..1 */
-#define MIPI_ATE_STATUS_1 0xe4
-#define ICN6211_MAX_REGISTER MIPI_ATE_STATUS(1)
+#define MIPI_ATE_STATUS(n) (0xe3 + ((n) & 0x1)) /* 0..1 */
struct chipone {
struct device *dev;
@@ -155,10 +161,10 @@ static const struct regmap_range chipone_dsi_readable_ranges[] = {
regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE),
regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H),
regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE),
- regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)),
+ regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY(5)),
regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM),
- regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)),
- regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)),
+ regmap_reg_range(MIPI_DBG_SET(0), MIPI_DBG_SET(9)),
+ regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS(1)),
};
static const struct regmap_access_table chipone_dsi_readable_table = {
@@ -172,10 +178,10 @@ static const struct regmap_range chipone_dsi_writeable_ranges[] = {
regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE),
regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H),
regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE),
- regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)),
+ regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY(5)),
regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM),
- regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)),
- regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)),
+ regmap_reg_range(MIPI_DBG_SET(0), MIPI_DBG_SET(9)),
+ regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS(1)),
};
static const struct regmap_access_table chipone_dsi_writeable_table = {
@@ -189,7 +195,7 @@ static const struct regmap_config chipone_regmap_config = {
.rd_table = &chipone_dsi_readable_table,
.wr_table = &chipone_dsi_writeable_table,
.cache_type = REGCACHE_RBTREE,
- .max_register = MIPI_ATE_STATUS_(1),
+ .max_register = MIPI_ATE_STATUS(1),
};
static int chipone_dsi_read(void *context,
@@ -336,7 +342,7 @@ static void chipone_atomic_enable(struct drm_bridge *bridge,
const struct drm_bridge_state *bridge_state;
u16 hfp, hbp, hsync;
u32 bus_flags;
- u8 pol, id[4];
+ u8 pol, sys_ctrl_1, id[4];
chipone_readb(icn, VENDOR_ID, id);
chipone_readb(icn, DEVICE_ID_H, id + 1);
@@ -414,7 +420,14 @@ static void chipone_atomic_enable(struct drm_bridge *bridge,
chipone_configure_pll(icn, mode);
chipone_writeb(icn, SYS_CTRL(0), 0x40);
- chipone_writeb(icn, SYS_CTRL(1), 0x88);
+ sys_ctrl_1 = 0x88;
+
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+ sys_ctrl_1 |= FIELD_PREP(SYS_CTRL_1_CLK_PHASE_MSK, CLK_PHASE_0);
+ else
+ sys_ctrl_1 |= FIELD_PREP(SYS_CTRL_1_CLK_PHASE_MSK, CLK_PHASE_1_2);
+
+ chipone_writeb(icn, SYS_CTRL(1), sys_ctrl_1);
/* icn6211 specific sequence */
chipone_writeb(icn, MIPI_FORCE_0, 0x20);
@@ -486,21 +499,18 @@ static int chipone_dsi_attach(struct chipone *icn)
{
struct mipi_dsi_device *dsi = icn->dsi;
struct device *dev = icn->dev;
- struct device_node *endpoint;
int dsi_lanes, ret;
- endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
- dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
- of_node_put(endpoint);
+ dsi_lanes = drm_of_get_data_lanes_count_ep(dev->of_node, 0, 0, 1, 4);
/*
* If the 'data-lanes' property does not exist in DT or is invalid,
* default to previously hard-coded behavior, which was 4 data lanes.
*/
- if (dsi_lanes >= 1 && dsi_lanes <= 4)
- icn->dsi->lanes = dsi_lanes;
- else
+ if (dsi_lanes < 0)
icn->dsi->lanes = 4;
+ else
+ icn->dsi->lanes = dsi_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index 486f405c2e16..ba060277c3fd 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -6,6 +6,7 @@
*/
#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index e4d52a7e31b7..9a12449ad7b8 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -6,6 +6,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index b2675c769a55..f9e0f8d99268 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/media-bus-format.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -74,22 +75,6 @@ static int fsl_ldb_attach(struct drm_bridge *bridge,
bridge, flags);
}
-static int fsl_ldb_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- /* Invert DE signal polarity. */
- bridge_state->input_bus_cfg.flags &= ~(DRM_BUS_FLAG_DE_LOW |
- DRM_BUS_FLAG_DE_HIGH);
- if (bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_LOW)
- bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH;
- else if (bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_HIGH)
- bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_LOW;
-
- return 0;
-}
-
static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
@@ -153,7 +138,7 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
reg = LDB_CTRL_CH0_ENABLE;
if (fsl_ldb->lvds_dual_link)
- reg |= LDB_CTRL_CH1_ENABLE;
+ reg |= LDB_CTRL_CH1_ENABLE | LDB_CTRL_SPLIT_MODE;
if (lvds_format_24bpp) {
reg |= LDB_CTRL_CH0_DATA_WIDTH;
@@ -233,7 +218,7 @@ fsl_ldb_mode_valid(struct drm_bridge *bridge,
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- if (mode->clock > (fsl_ldb->lvds_dual_link ? 80000 : 160000))
+ if (mode->clock > (fsl_ldb->lvds_dual_link ? 160000 : 80000))
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -241,7 +226,6 @@ fsl_ldb_mode_valid(struct drm_bridge *bridge,
static const struct drm_bridge_funcs funcs = {
.attach = fsl_ldb_attach,
- .atomic_check = fsl_ldb_atomic_check,
.atomic_enable = fsl_ldb_atomic_enable,
.atomic_disable = fsl_ldb_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
new file mode 100644
index 000000000000..608f47f41bcd
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/Kconfig
@@ -0,0 +1,47 @@
+if ARCH_MXC || COMPILE_TEST
+
+config DRM_IMX8QM_LDB
+ tristate "Freescale i.MX8QM LVDS display bridge"
+ depends on OF
+ depends on COMMON_CLK
+ select DRM_KMS_HELPER
+ help
+ Choose this to enable the internal LVDS Display Bridge(LDB) found in
+ Freescale i.MX8qm processor. Official name of LDB is pixel mapper.
+
+config DRM_IMX8QXP_LDB
+ tristate "Freescale i.MX8QXP LVDS display bridge"
+ depends on OF
+ depends on COMMON_CLK
+ select DRM_KMS_HELPER
+ help
+ Choose this to enable the internal LVDS Display Bridge(LDB) found in
+ Freescale i.MX8qxp processor. Official name of LDB is pixel mapper.
+
+config DRM_IMX8QXP_PIXEL_COMBINER
+ tristate "Freescale i.MX8QM/QXP pixel combiner"
+ depends on OF
+ depends on COMMON_CLK
+ select DRM_KMS_HELPER
+ help
+ Choose this to enable pixel combiner found in
+ Freescale i.MX8qm/qxp processors.
+
+config DRM_IMX8QXP_PIXEL_LINK
+ tristate "Freescale i.MX8QM/QXP display pixel link"
+ depends on OF
+ depends on IMX_SCU
+ select DRM_KMS_HELPER
+ help
+ Choose this to enable display pixel link found in
+ Freescale i.MX8qm/qxp processors.
+
+config DRM_IMX8QXP_PIXEL_LINK_TO_DPI
+ tristate "Freescale i.MX8QXP pixel link to display pixel interface"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Choose this to enable pixel link to display pixel interface(PXL2DPI)
+ found in Freescale i.MX8qxp processor.
+
+endif # ARCH_MXC || COMPILE_TEST
diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile
new file mode 100644
index 000000000000..aa90ec8d5433
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/Makefile
@@ -0,0 +1,9 @@
+imx8qm-ldb-objs := imx-ldb-helper.o imx8qm-ldb-drv.o
+obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o
+
+imx8qxp-ldb-objs := imx-ldb-helper.o imx8qxp-ldb-drv.o
+obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o
+
+obj-$(CONFIG_DRM_IMX8QXP_PIXEL_COMBINER) += imx8qxp-pixel-combiner.o
+obj-$(CONFIG_DRM_IMX8QXP_PIXEL_LINK) += imx8qxp-pixel-link.o
+obj-$(CONFIG_DRM_IMX8QXP_PIXEL_LINK_TO_DPI) += imx8qxp-pxl2dpi.o
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
new file mode 100644
index 000000000000..7338b84bc83d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ * Copyright 2019,2020,2022 NXP
+ */
+
+#include <linux/media-bus-format.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+
+#include "imx-ldb-helper.h"
+
+bool ldb_channel_is_single_link(struct ldb_channel *ldb_ch)
+{
+ return ldb_ch->link_type == LDB_CH_SINGLE_LINK;
+}
+
+bool ldb_channel_is_split_link(struct ldb_channel *ldb_ch)
+{
+ return ldb_ch->link_type == LDB_CH_DUAL_LINK_EVEN_ODD_PIXELS ||
+ ldb_ch->link_type == LDB_CH_DUAL_LINK_ODD_EVEN_PIXELS;
+}
+
+int ldb_bridge_atomic_check_helper(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+
+ ldb_ch->in_bus_format = bridge_state->input_bus_cfg.format;
+ ldb_ch->out_bus_format = bridge_state->output_bus_cfg.format;
+
+ return 0;
+}
+
+void ldb_bridge_mode_set_helper(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+
+ if (is_split)
+ ldb->ldb_ctrl |= LDB_SPLIT_MODE_EN;
+
+ switch (ldb_ch->out_bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ if (ldb_ch->chno == 0 || is_split)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+ if (ldb_ch->chno == 1 || is_split)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ if (ldb_ch->chno == 0 || is_split)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+ LDB_BIT_MAP_CH0_JEIDA;
+ if (ldb_ch->chno == 1 || is_split)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+ LDB_BIT_MAP_CH1_JEIDA;
+ break;
+ }
+}
+
+void ldb_bridge_enable_helper(struct drm_bridge *bridge)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+
+ /*
+ * Platform specific bridge drivers should set ldb_ctrl properly
+ * for the enablement, so just write the ctrl_reg here.
+ */
+ regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ldb_ctrl);
+}
+
+void ldb_bridge_disable_helper(struct drm_bridge *bridge)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+
+ if (ldb_ch->chno == 0 || is_split)
+ ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
+ if (ldb_ch->chno == 1 || is_split)
+ ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
+
+ regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ldb_ctrl);
+}
+
+int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ DRM_DEV_ERROR(ldb->dev,
+ "do not support creating a drm_connector\n");
+ return -EINVAL;
+ }
+
+ if (!bridge->encoder) {
+ DRM_DEV_ERROR(ldb->dev, "missing encoder\n");
+ return -ENODEV;
+ }
+
+ return drm_bridge_attach(bridge->encoder,
+ ldb_ch->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+int ldb_init_helper(struct ldb *ldb)
+{
+ struct device *dev = ldb->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int ret;
+ u32 i;
+
+ ldb->regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(ldb->regmap)) {
+ ret = PTR_ERR(ldb->regmap);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get regmap: %d\n", ret);
+ return ret;
+ }
+
+ for_each_available_child_of_node(np, child) {
+ struct ldb_channel *ldb_ch;
+
+ ret = of_property_read_u32(child, "reg", &i);
+ if (ret || i > MAX_LDB_CHAN_NUM - 1) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(dev,
+ "invalid channel node address: %u\n", i);
+ of_node_put(child);
+ return ret;
+ }
+
+ ldb_ch = ldb->channel[i];
+ ldb_ch->ldb = ldb;
+ ldb_ch->chno = i;
+ ldb_ch->is_available = true;
+ ldb_ch->np = child;
+
+ ldb->available_ch_cnt++;
+ }
+
+ return 0;
+}
+
+int ldb_find_next_bridge_helper(struct ldb *ldb)
+{
+ struct device *dev = ldb->dev;
+ struct ldb_channel *ldb_ch;
+ int ret, i;
+
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ ldb_ch = ldb->channel[i];
+
+ if (!ldb_ch->is_available)
+ continue;
+
+ ldb_ch->next_bridge = devm_drm_of_get_bridge(dev, ldb_ch->np,
+ 1, 0);
+ if (IS_ERR(ldb_ch->next_bridge)) {
+ ret = PTR_ERR(ldb_ch->next_bridge);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "failed to get next bridge: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void ldb_add_bridge_helper(struct ldb *ldb,
+ const struct drm_bridge_funcs *bridge_funcs)
+{
+ struct ldb_channel *ldb_ch;
+ int i;
+
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ ldb_ch = ldb->channel[i];
+
+ if (!ldb_ch->is_available)
+ continue;
+
+ ldb_ch->bridge.driver_private = ldb_ch;
+ ldb_ch->bridge.funcs = bridge_funcs;
+ ldb_ch->bridge.of_node = ldb_ch->np;
+
+ drm_bridge_add(&ldb_ch->bridge);
+ }
+}
+
+void ldb_remove_bridge_helper(struct ldb *ldb)
+{
+ struct ldb_channel *ldb_ch;
+ int i;
+
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ ldb_ch = ldb->channel[i];
+
+ if (!ldb_ch->is_available)
+ continue;
+
+ drm_bridge_remove(&ldb_ch->bridge);
+ }
+}
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
new file mode 100644
index 000000000000..a0a5cde27fbc
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Copyright 2019,2020,2022 NXP
+ */
+
+#ifndef __IMX_LDB_HELPER__
+#define __IMX_LDB_HELPER__
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#define LDB_CH0_MODE_EN_TO_DI0 BIT(0)
+#define LDB_CH0_MODE_EN_TO_DI1 (3 << 0)
+#define LDB_CH0_MODE_EN_MASK (3 << 0)
+#define LDB_CH1_MODE_EN_TO_DI0 BIT(2)
+#define LDB_CH1_MODE_EN_TO_DI1 (3 << 2)
+#define LDB_CH1_MODE_EN_MASK (3 << 2)
+#define LDB_SPLIT_MODE_EN BIT(4)
+#define LDB_DATA_WIDTH_CH0_24 BIT(5)
+#define LDB_BIT_MAP_CH0_JEIDA BIT(6)
+#define LDB_DATA_WIDTH_CH1_24 BIT(7)
+#define LDB_BIT_MAP_CH1_JEIDA BIT(8)
+#define LDB_DI0_VS_POL_ACT_LOW BIT(9)
+#define LDB_DI1_VS_POL_ACT_LOW BIT(10)
+
+#define MAX_LDB_CHAN_NUM 2
+
+enum ldb_channel_link_type {
+ LDB_CH_SINGLE_LINK,
+ LDB_CH_DUAL_LINK_EVEN_ODD_PIXELS,
+ LDB_CH_DUAL_LINK_ODD_EVEN_PIXELS,
+};
+
+struct ldb;
+
+struct ldb_channel {
+ struct ldb *ldb;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct device_node *np;
+ u32 chno;
+ bool is_available;
+ u32 in_bus_format;
+ u32 out_bus_format;
+ enum ldb_channel_link_type link_type;
+};
+
+struct ldb {
+ struct regmap *regmap;
+ struct device *dev;
+ struct ldb_channel *channel[MAX_LDB_CHAN_NUM];
+ unsigned int ctrl_reg;
+ u32 ldb_ctrl;
+ unsigned int available_ch_cnt;
+};
+
+#define bridge_to_ldb_ch(b) container_of(b, struct ldb_channel, bridge)
+
+bool ldb_channel_is_single_link(struct ldb_channel *ldb_ch);
+bool ldb_channel_is_split_link(struct ldb_channel *ldb_ch);
+
+int ldb_bridge_atomic_check_helper(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+
+void ldb_bridge_mode_set_helper(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode);
+
+void ldb_bridge_enable_helper(struct drm_bridge *bridge);
+
+void ldb_bridge_disable_helper(struct drm_bridge *bridge);
+
+int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags);
+
+int ldb_init_helper(struct ldb *ldb);
+
+int ldb_find_next_bridge_helper(struct ldb *ldb);
+
+void ldb_add_bridge_helper(struct ldb *ldb,
+ const struct drm_bridge_funcs *bridge_funcs);
+
+void ldb_remove_bridge_helper(struct ldb *ldb);
+
+#endif /* __IMX_LDB_HELPER__ */
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c
new file mode 100644
index 000000000000..178af8d2d80b
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2020 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/media-bus-format.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+
+#include "imx-ldb-helper.h"
+
+#define LDB_CH0_10BIT_EN BIT(22)
+#define LDB_CH1_10BIT_EN BIT(23)
+#define LDB_CH0_DATA_WIDTH_24BIT BIT(24)
+#define LDB_CH1_DATA_WIDTH_24BIT BIT(26)
+#define LDB_CH0_DATA_WIDTH_30BIT (2 << 24)
+#define LDB_CH1_DATA_WIDTH_30BIT (2 << 26)
+
+#define SS_CTRL 0x20
+#define CH_HSYNC_M(id) BIT(0 + ((id) * 2))
+#define CH_VSYNC_M(id) BIT(1 + ((id) * 2))
+#define CH_PHSYNC(id) BIT(0 + ((id) * 2))
+#define CH_PVSYNC(id) BIT(1 + ((id) * 2))
+
+#define DRIVER_NAME "imx8qm-ldb"
+
+struct imx8qm_ldb_channel {
+ struct ldb_channel base;
+ struct phy *phy;
+};
+
+struct imx8qm_ldb {
+ struct ldb base;
+ struct device *dev;
+ struct imx8qm_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct clk *clk_pixel;
+ struct clk *clk_bypass;
+ int active_chno;
+};
+
+static inline struct imx8qm_ldb_channel *
+base_to_imx8qm_ldb_channel(struct ldb_channel *base)
+{
+ return container_of(base, struct imx8qm_ldb_channel, base);
+}
+
+static inline struct imx8qm_ldb *base_to_imx8qm_ldb(struct ldb *base)
+{
+ return container_of(base, struct imx8qm_ldb, base);
+}
+
+static void imx8qm_ldb_set_phy_cfg(struct imx8qm_ldb *imx8qm_ldb,
+ unsigned long di_clk,
+ bool is_split, bool is_slave,
+ struct phy_configure_opts_lvds *phy_cfg)
+{
+ phy_cfg->bits_per_lane_and_dclk_cycle = 7;
+ phy_cfg->lanes = 4;
+ phy_cfg->differential_clk_rate = is_split ? di_clk / 2 : di_clk;
+ phy_cfg->is_slave = is_slave;
+}
+
+static int imx8qm_ldb_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+ struct imx8qm_ldb_channel *imx8qm_ldb_ch =
+ base_to_imx8qm_ldb_channel(ldb_ch);
+ struct imx8qm_ldb *imx8qm_ldb = base_to_imx8qm_ldb(ldb);
+ struct drm_display_mode *adj = &crtc_state->adjusted_mode;
+ unsigned long di_clk = adj->clock * 1000;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+ union phy_configure_opts opts = { };
+ struct phy_configure_opts_lvds *phy_cfg = &opts.lvds;
+ int ret;
+
+ ret = ldb_bridge_atomic_check_helper(bridge, bridge_state,
+ crtc_state, conn_state);
+ if (ret)
+ return ret;
+
+ imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, false, phy_cfg);
+ ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts);
+ if (ret < 0) {
+ DRM_DEV_DEBUG_DRIVER(imx8qm_ldb->dev,
+ "failed to validate PHY: %d\n", ret);
+ return ret;
+ }
+
+ if (is_split) {
+ imx8qm_ldb_ch =
+ &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
+ phy_cfg);
+ ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts);
+ if (ret < 0) {
+ DRM_DEV_DEBUG_DRIVER(imx8qm_ldb->dev,
+ "failed to validate slave PHY: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void
+imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+ struct imx8qm_ldb_channel *imx8qm_ldb_ch =
+ base_to_imx8qm_ldb_channel(ldb_ch);
+ struct imx8qm_ldb *imx8qm_ldb = base_to_imx8qm_ldb(ldb);
+ struct device *dev = imx8qm_ldb->dev;
+ unsigned long di_clk = adjusted_mode->clock * 1000;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+ union phy_configure_opts opts = { };
+ struct phy_configure_opts_lvds *phy_cfg = &opts.lvds;
+ u32 chno = ldb_ch->chno;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to get runtime PM sync: %d\n", ret);
+
+ ret = phy_init(imx8qm_ldb_ch->phy);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to initialize PHY: %d\n", ret);
+
+ clk_set_rate(imx8qm_ldb->clk_bypass, di_clk);
+ clk_set_rate(imx8qm_ldb->clk_pixel, di_clk);
+
+ imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, false, phy_cfg);
+ ret = phy_configure(imx8qm_ldb_ch->phy, &opts);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to configure PHY: %d\n", ret);
+
+ if (is_split) {
+ imx8qm_ldb_ch =
+ &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
+ phy_cfg);
+ ret = phy_configure(imx8qm_ldb_ch->phy, &opts);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to configure slave PHY: %d\n",
+ ret);
+ }
+
+ /* input VSYNC signal from pixel link is active low */
+ if (ldb_ch->chno == 0 || is_split)
+ ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW;
+ if (ldb_ch->chno == 1 || is_split)
+ ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW;
+
+ switch (ldb_ch->out_bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ if (ldb_ch->chno == 0 || is_split)
+ ldb->ldb_ctrl |= LDB_CH0_DATA_WIDTH_24BIT;
+ if (ldb_ch->chno == 1 || is_split)
+ ldb->ldb_ctrl |= LDB_CH1_DATA_WIDTH_24BIT;
+ break;
+ }
+
+ ldb_bridge_mode_set_helper(bridge, mode, adjusted_mode);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ regmap_update_bits(ldb->regmap, SS_CTRL, CH_VSYNC_M(chno), 0);
+ else if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ regmap_update_bits(ldb->regmap, SS_CTRL,
+ CH_VSYNC_M(chno), CH_PVSYNC(chno));
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ regmap_update_bits(ldb->regmap, SS_CTRL, CH_HSYNC_M(chno), 0);
+ else if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ regmap_update_bits(ldb->regmap, SS_CTRL,
+ CH_HSYNC_M(chno), CH_PHSYNC(chno));
+}
+
+static void
+imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+ struct imx8qm_ldb_channel *imx8qm_ldb_ch =
+ base_to_imx8qm_ldb_channel(ldb_ch);
+ struct imx8qm_ldb *imx8qm_ldb = base_to_imx8qm_ldb(ldb);
+ struct device *dev = imx8qm_ldb->dev;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+ int ret;
+
+ clk_prepare_enable(imx8qm_ldb->clk_pixel);
+ clk_prepare_enable(imx8qm_ldb->clk_bypass);
+
+ /* both DI0 and DI1 connect with pixel link, so ok to use DI0 only */
+ if (ldb_ch->chno == 0 || is_split) {
+ ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
+ ldb->ldb_ctrl |= LDB_CH0_MODE_EN_TO_DI0;
+ }
+ if (ldb_ch->chno == 1 || is_split) {
+ ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
+ ldb->ldb_ctrl |= LDB_CH1_MODE_EN_TO_DI0;
+ }
+
+ if (is_split) {
+ ret = phy_power_on(imx8qm_ldb->channel[0].phy);
+ if (ret)
+ DRM_DEV_ERROR(dev,
+ "failed to power on channel0 PHY: %d\n",
+ ret);
+
+ ret = phy_power_on(imx8qm_ldb->channel[1].phy);
+ if (ret)
+ DRM_DEV_ERROR(dev,
+ "failed to power on channel1 PHY: %d\n",
+ ret);
+ } else {
+ ret = phy_power_on(imx8qm_ldb_ch->phy);
+ if (ret)
+ DRM_DEV_ERROR(dev, "failed to power on PHY: %d\n", ret);
+ }
+
+ ldb_bridge_enable_helper(bridge);
+}
+
+static void
+imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+ struct imx8qm_ldb_channel *imx8qm_ldb_ch =
+ base_to_imx8qm_ldb_channel(ldb_ch);
+ struct imx8qm_ldb *imx8qm_ldb = base_to_imx8qm_ldb(ldb);
+ struct device *dev = imx8qm_ldb->dev;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+ int ret;
+
+ ldb_bridge_disable_helper(bridge);
+
+ if (is_split) {
+ ret = phy_power_off(imx8qm_ldb->channel[0].phy);
+ if (ret)
+ DRM_DEV_ERROR(dev,
+ "failed to power off channel0 PHY: %d\n",
+ ret);
+ ret = phy_power_off(imx8qm_ldb->channel[1].phy);
+ if (ret)
+ DRM_DEV_ERROR(dev,
+ "failed to power off channel1 PHY: %d\n",
+ ret);
+ } else {
+ ret = phy_power_off(imx8qm_ldb_ch->phy);
+ if (ret)
+ DRM_DEV_ERROR(dev, "failed to power off PHY: %d\n", ret);
+ }
+
+ clk_disable_unprepare(imx8qm_ldb->clk_bypass);
+ clk_disable_unprepare(imx8qm_ldb->clk_pixel);
+
+ ret = pm_runtime_put(dev);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to put runtime PM: %d\n", ret);
+}
+
+static const u32 imx8qm_ldb_bus_output_fmts[] = {
+ MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ MEDIA_BUS_FMT_FIXED,
+};
+
+static bool imx8qm_ldb_bus_output_fmt_supported(u32 fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx8qm_ldb_bus_output_fmts); i++) {
+ if (imx8qm_ldb_bus_output_fmts[i] == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+imx8qm_ldb_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct drm_display_info *di;
+ const struct drm_format_info *finfo;
+ u32 *input_fmts;
+
+ if (!imx8qm_ldb_bus_output_fmt_supported(output_fmt))
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ switch (output_fmt) {
+ case MEDIA_BUS_FMT_FIXED:
+ di = &conn_state->connector->display_info;
+
+ /*
+ * Look at the first bus format to determine input format.
+ * Default to MEDIA_BUS_FMT_RGB888_1X36_CPADLO, if no match.
+ */
+ if (di->num_bus_formats) {
+ finfo = drm_format_info(di->bus_formats[0]);
+
+ input_fmts[0] = finfo->depth == 18 ?
+ MEDIA_BUS_FMT_RGB666_1X36_CPADLO :
+ MEDIA_BUS_FMT_RGB888_1X36_CPADLO;
+ } else {
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X36_CPADLO;
+ }
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X36_CPADLO;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X36_CPADLO;
+ break;
+ default:
+ kfree(input_fmts);
+ input_fmts = NULL;
+ break;
+ }
+
+ return input_fmts;
+}
+
+static u32 *
+imx8qm_ldb_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ *num_output_fmts = ARRAY_SIZE(imx8qm_ldb_bus_output_fmts);
+ return kmemdup(imx8qm_ldb_bus_output_fmts,
+ sizeof(imx8qm_ldb_bus_output_fmts), GFP_KERNEL);
+}
+
+static enum drm_mode_status
+imx8qm_ldb_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ bool is_single = ldb_channel_is_single_link(ldb_ch);
+
+ if (mode->clock > 300000)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock > 150000 && is_single)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static const struct drm_bridge_funcs imx8qm_ldb_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .mode_valid = imx8qm_ldb_bridge_mode_valid,
+ .attach = ldb_bridge_attach_helper,
+ .atomic_check = imx8qm_ldb_bridge_atomic_check,
+ .mode_set = imx8qm_ldb_bridge_mode_set,
+ .atomic_enable = imx8qm_ldb_bridge_atomic_enable,
+ .atomic_disable = imx8qm_ldb_bridge_atomic_disable,
+ .atomic_get_input_bus_fmts =
+ imx8qm_ldb_bridge_atomic_get_input_bus_fmts,
+ .atomic_get_output_bus_fmts =
+ imx8qm_ldb_bridge_atomic_get_output_bus_fmts,
+};
+
+static int imx8qm_ldb_get_phy(struct imx8qm_ldb *imx8qm_ldb)
+{
+ struct imx8qm_ldb_channel *imx8qm_ldb_ch;
+ struct ldb_channel *ldb_ch;
+ struct device *dev = imx8qm_ldb->dev;
+ int i, ret;
+
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ ldb_ch = &imx8qm_ldb_ch->base;
+
+ if (!ldb_ch->is_available)
+ continue;
+
+ imx8qm_ldb_ch->phy = devm_of_phy_get(dev, ldb_ch->np,
+ "lvds_phy");
+ if (IS_ERR(imx8qm_ldb_ch->phy)) {
+ ret = PTR_ERR(imx8qm_ldb_ch->phy);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "failed to get channel%d PHY: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int imx8qm_ldb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx8qm_ldb *imx8qm_ldb;
+ struct imx8qm_ldb_channel *imx8qm_ldb_ch;
+ struct ldb *ldb;
+ struct ldb_channel *ldb_ch;
+ struct device_node *port1, *port2;
+ int pixel_order;
+ int ret, i;
+
+ imx8qm_ldb = devm_kzalloc(dev, sizeof(*imx8qm_ldb), GFP_KERNEL);
+ if (!imx8qm_ldb)
+ return -ENOMEM;
+
+ imx8qm_ldb->clk_pixel = devm_clk_get(dev, "pixel");
+ if (IS_ERR(imx8qm_ldb->clk_pixel)) {
+ ret = PTR_ERR(imx8qm_ldb->clk_pixel);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "failed to get pixel clock: %d\n", ret);
+ return ret;
+ }
+
+ imx8qm_ldb->clk_bypass = devm_clk_get(dev, "bypass");
+ if (IS_ERR(imx8qm_ldb->clk_bypass)) {
+ ret = PTR_ERR(imx8qm_ldb->clk_bypass);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "failed to get bypass clock: %d\n", ret);
+ return ret;
+ }
+
+ imx8qm_ldb->dev = dev;
+
+ ldb = &imx8qm_ldb->base;
+ ldb->dev = dev;
+ ldb->ctrl_reg = 0xe0;
+
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
+ ldb->channel[i] = &imx8qm_ldb->channel[i].base;
+
+ ret = ldb_init_helper(ldb);
+ if (ret)
+ return ret;
+
+ if (ldb->available_ch_cnt == 0) {
+ DRM_DEV_DEBUG_DRIVER(dev, "no available channel\n");
+ return 0;
+ }
+
+ if (ldb->available_ch_cnt == 2) {
+ port1 = of_graph_get_port_by_id(ldb->channel[0]->np, 1);
+ port2 = of_graph_get_port_by_id(ldb->channel[1]->np, 1);
+ pixel_order =
+ drm_of_lvds_get_dual_link_pixel_order(port1, port2);
+ of_node_put(port1);
+ of_node_put(port2);
+
+ if (pixel_order != DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) {
+ DRM_DEV_ERROR(dev, "invalid dual link pixel order: %d\n",
+ pixel_order);
+ return -EINVAL;
+ }
+
+ imx8qm_ldb->active_chno = 0;
+ imx8qm_ldb_ch = &imx8qm_ldb->channel[0];
+ ldb_ch = &imx8qm_ldb_ch->base;
+ ldb_ch->link_type = pixel_order;
+ } else {
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ ldb_ch = &imx8qm_ldb_ch->base;
+
+ if (ldb_ch->is_available) {
+ imx8qm_ldb->active_chno = ldb_ch->chno;
+ break;
+ }
+ }
+ }
+
+ ret = imx8qm_ldb_get_phy(imx8qm_ldb);
+ if (ret)
+ return ret;
+
+ ret = ldb_find_next_bridge_helper(ldb);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, imx8qm_ldb);
+ pm_runtime_enable(dev);
+
+ ldb_add_bridge_helper(ldb, &imx8qm_ldb_bridge_funcs);
+
+ return ret;
+}
+
+static int imx8qm_ldb_remove(struct platform_device *pdev)
+{
+ struct imx8qm_ldb *imx8qm_ldb = platform_get_drvdata(pdev);
+ struct ldb *ldb = &imx8qm_ldb->base;
+
+ ldb_remove_bridge_helper(ldb);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused imx8qm_ldb_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused imx8qm_ldb_runtime_resume(struct device *dev)
+{
+ struct imx8qm_ldb *imx8qm_ldb = dev_get_drvdata(dev);
+ struct ldb *ldb = &imx8qm_ldb->base;
+
+ /* disable LDB by resetting the control register to POR default */
+ regmap_write(ldb->regmap, ldb->ctrl_reg, 0);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx8qm_ldb_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx8qm_ldb_runtime_suspend,
+ imx8qm_ldb_runtime_resume, NULL)
+};
+
+static const struct of_device_id imx8qm_ldb_dt_ids[] = {
+ { .compatible = "fsl,imx8qm-ldb" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8qm_ldb_dt_ids);
+
+static struct platform_driver imx8qm_ldb_driver = {
+ .probe = imx8qm_ldb_probe,
+ .remove = imx8qm_ldb_remove,
+ .driver = {
+ .pm = &imx8qm_ldb_pm_ops,
+ .name = DRIVER_NAME,
+ .of_match_table = imx8qm_ldb_dt_ids,
+ },
+};
+module_platform_driver(imx8qm_ldb_driver);
+
+MODULE_DESCRIPTION("i.MX8QM LVDS Display Bridge(LDB)/Pixel Mapper bridge driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c
new file mode 100644
index 000000000000..63948d5d20fd
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2020 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/media-bus-format.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+
+#include "imx-ldb-helper.h"
+
+#define LDB_CH_SEL BIT(28)
+
+#define SS_CTRL 0x20
+#define CH_HSYNC_M(id) BIT(0 + ((id) * 2))
+#define CH_VSYNC_M(id) BIT(1 + ((id) * 2))
+#define CH_PHSYNC(id) BIT(0 + ((id) * 2))
+#define CH_PVSYNC(id) BIT(1 + ((id) * 2))
+
+#define DRIVER_NAME "imx8qxp-ldb"
+
+struct imx8qxp_ldb_channel {
+ struct ldb_channel base;
+ struct phy *phy;
+ unsigned int di_id;
+};
+
+struct imx8qxp_ldb {
+ struct ldb base;
+ struct device *dev;
+ struct imx8qxp_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct clk *clk_pixel;
+ struct clk *clk_bypass;
+ struct drm_bridge *companion;
+ int active_chno;
+};
+
+static inline struct imx8qxp_ldb_channel *
+base_to_imx8qxp_ldb_channel(struct ldb_channel *base)
+{
+ return container_of(base, struct imx8qxp_ldb_channel, base);
+}
+
+static inline struct imx8qxp_ldb *base_to_imx8qxp_ldb(struct ldb *base)
+{
+ return container_of(base, struct imx8qxp_ldb, base);
+}
+
+static void imx8qxp_ldb_set_phy_cfg(struct imx8qxp_ldb *imx8qxp_ldb,
+ unsigned long di_clk, bool is_split,
+ struct phy_configure_opts_lvds *phy_cfg)
+{
+ phy_cfg->bits_per_lane_and_dclk_cycle = 7;
+ phy_cfg->lanes = 4;
+
+ if (is_split) {
+ phy_cfg->differential_clk_rate = di_clk / 2;
+ phy_cfg->is_slave = !imx8qxp_ldb->companion;
+ } else {
+ phy_cfg->differential_clk_rate = di_clk;
+ phy_cfg->is_slave = false;
+ }
+}
+
+static int
+imx8qxp_ldb_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+ struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
+ base_to_imx8qxp_ldb_channel(ldb_ch);
+ struct imx8qxp_ldb *imx8qxp_ldb = base_to_imx8qxp_ldb(ldb);
+ struct drm_bridge *companion = imx8qxp_ldb->companion;
+ struct drm_display_mode *adj = &crtc_state->adjusted_mode;
+ unsigned long di_clk = adj->clock * 1000;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+ union phy_configure_opts opts = { };
+ struct phy_configure_opts_lvds *phy_cfg = &opts.lvds;
+ int ret;
+
+ ret = ldb_bridge_atomic_check_helper(bridge, bridge_state,
+ crtc_state, conn_state);
+ if (ret)
+ return ret;
+
+ imx8qxp_ldb_set_phy_cfg(imx8qxp_ldb, di_clk, is_split, phy_cfg);
+ ret = phy_validate(imx8qxp_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts);
+ if (ret < 0) {
+ DRM_DEV_DEBUG_DRIVER(imx8qxp_ldb->dev,
+ "failed to validate PHY: %d\n", ret);
+ return ret;
+ }
+
+ if (is_split && companion) {
+ ret = companion->funcs->atomic_check(companion,
+ bridge_state, crtc_state, conn_state);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void
+imx8qxp_ldb_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb_channel *companion_ldb_ch;
+ struct ldb *ldb = ldb_ch->ldb;
+ struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
+ base_to_imx8qxp_ldb_channel(ldb_ch);
+ struct imx8qxp_ldb *imx8qxp_ldb = base_to_imx8qxp_ldb(ldb);
+ struct drm_bridge *companion = imx8qxp_ldb->companion;
+ struct device *dev = imx8qxp_ldb->dev;
+ unsigned long di_clk = adjusted_mode->clock * 1000;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+ union phy_configure_opts opts = { };
+ struct phy_configure_opts_lvds *phy_cfg = &opts.lvds;
+ u32 chno = ldb_ch->chno;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to get runtime PM sync: %d\n", ret);
+
+ ret = phy_init(imx8qxp_ldb_ch->phy);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to initialize PHY: %d\n", ret);
+
+ ret = phy_set_mode(imx8qxp_ldb_ch->phy, PHY_MODE_LVDS);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to set PHY mode: %d\n", ret);
+
+ if (is_split && companion) {
+ companion_ldb_ch = bridge_to_ldb_ch(companion);
+
+ companion_ldb_ch->in_bus_format = ldb_ch->in_bus_format;
+ companion_ldb_ch->out_bus_format = ldb_ch->out_bus_format;
+ }
+
+ clk_set_rate(imx8qxp_ldb->clk_bypass, di_clk);
+ clk_set_rate(imx8qxp_ldb->clk_pixel, di_clk);
+
+ imx8qxp_ldb_set_phy_cfg(imx8qxp_ldb, di_clk, is_split, phy_cfg);
+ ret = phy_configure(imx8qxp_ldb_ch->phy, &opts);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to configure PHY: %d\n", ret);
+
+ if (chno == 0)
+ ldb->ldb_ctrl &= ~LDB_CH_SEL;
+ else
+ ldb->ldb_ctrl |= LDB_CH_SEL;
+
+ /* input VSYNC signal from pixel link is active low */
+ if (imx8qxp_ldb_ch->di_id == 0)
+ ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW;
+ else
+ ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW;
+
+ /*
+ * For split mode, settle input VSYNC signal polarity and
+ * channel selection down early.
+ */
+ if (is_split)
+ regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ldb_ctrl);
+
+ ldb_bridge_mode_set_helper(bridge, mode, adjusted_mode);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ regmap_update_bits(ldb->regmap, SS_CTRL, CH_VSYNC_M(chno), 0);
+ else if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ regmap_update_bits(ldb->regmap, SS_CTRL,
+ CH_VSYNC_M(chno), CH_PVSYNC(chno));
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ regmap_update_bits(ldb->regmap, SS_CTRL, CH_HSYNC_M(chno), 0);
+ else if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ regmap_update_bits(ldb->regmap, SS_CTRL,
+ CH_HSYNC_M(chno), CH_PHSYNC(chno));
+
+ if (is_split && companion)
+ companion->funcs->mode_set(companion, mode, adjusted_mode);
+}
+
+static void
+imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+ struct imx8qxp_ldb *imx8qxp_ldb = base_to_imx8qxp_ldb(ldb);
+ struct drm_bridge *companion = imx8qxp_ldb->companion;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+
+ clk_prepare_enable(imx8qxp_ldb->clk_pixel);
+ clk_prepare_enable(imx8qxp_ldb->clk_bypass);
+
+ if (is_split && companion)
+ companion->funcs->atomic_pre_enable(companion, old_bridge_state);
+}
+
+static void
+imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+ struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
+ base_to_imx8qxp_ldb_channel(ldb_ch);
+ struct imx8qxp_ldb *imx8qxp_ldb = base_to_imx8qxp_ldb(ldb);
+ struct drm_bridge *companion = imx8qxp_ldb->companion;
+ struct device *dev = imx8qxp_ldb->dev;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+ int ret;
+
+ if (ldb_ch->chno == 0 || is_split) {
+ ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
+ ldb->ldb_ctrl |= imx8qxp_ldb_ch->di_id == 0 ?
+ LDB_CH0_MODE_EN_TO_DI0 : LDB_CH0_MODE_EN_TO_DI1;
+ }
+ if (ldb_ch->chno == 1 || is_split) {
+ ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
+ ldb->ldb_ctrl |= imx8qxp_ldb_ch->di_id == 0 ?
+ LDB_CH1_MODE_EN_TO_DI0 : LDB_CH1_MODE_EN_TO_DI1;
+ }
+
+ ldb_bridge_enable_helper(bridge);
+
+ ret = phy_power_on(imx8qxp_ldb_ch->phy);
+ if (ret)
+ DRM_DEV_ERROR(dev, "failed to power on PHY: %d\n", ret);
+
+ if (is_split && companion)
+ companion->funcs->atomic_enable(companion, old_bridge_state);
+}
+
+static void
+imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ struct ldb *ldb = ldb_ch->ldb;
+ struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
+ base_to_imx8qxp_ldb_channel(ldb_ch);
+ struct imx8qxp_ldb *imx8qxp_ldb = base_to_imx8qxp_ldb(ldb);
+ struct drm_bridge *companion = imx8qxp_ldb->companion;
+ struct device *dev = imx8qxp_ldb->dev;
+ bool is_split = ldb_channel_is_split_link(ldb_ch);
+ int ret;
+
+ ret = phy_power_off(imx8qxp_ldb_ch->phy);
+ if (ret)
+ DRM_DEV_ERROR(dev, "failed to power off PHY: %d\n", ret);
+
+ ret = phy_exit(imx8qxp_ldb_ch->phy);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to teardown PHY: %d\n", ret);
+
+ ldb_bridge_disable_helper(bridge);
+
+ clk_disable_unprepare(imx8qxp_ldb->clk_bypass);
+ clk_disable_unprepare(imx8qxp_ldb->clk_pixel);
+
+ if (is_split && companion)
+ companion->funcs->atomic_disable(companion, old_bridge_state);
+
+ ret = pm_runtime_put(dev);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to put runtime PM: %d\n", ret);
+}
+
+static const u32 imx8qxp_ldb_bus_output_fmts[] = {
+ MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ MEDIA_BUS_FMT_FIXED,
+};
+
+static bool imx8qxp_ldb_bus_output_fmt_supported(u32 fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx8qxp_ldb_bus_output_fmts); i++) {
+ if (imx8qxp_ldb_bus_output_fmts[i] == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+imx8qxp_ldb_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct drm_display_info *di;
+ const struct drm_format_info *finfo;
+ u32 *input_fmts;
+
+ if (!imx8qxp_ldb_bus_output_fmt_supported(output_fmt))
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ switch (output_fmt) {
+ case MEDIA_BUS_FMT_FIXED:
+ di = &conn_state->connector->display_info;
+
+ /*
+ * Look at the first bus format to determine input format.
+ * Default to MEDIA_BUS_FMT_RGB888_1X24, if no match.
+ */
+ if (di->num_bus_formats) {
+ finfo = drm_format_info(di->bus_formats[0]);
+
+ input_fmts[0] = finfo->depth == 18 ?
+ MEDIA_BUS_FMT_RGB666_1X24_CPADHI :
+ MEDIA_BUS_FMT_RGB888_1X24;
+ } else {
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+ }
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ default:
+ kfree(input_fmts);
+ input_fmts = NULL;
+ break;
+ }
+
+ return input_fmts;
+}
+
+static u32 *
+imx8qxp_ldb_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ *num_output_fmts = ARRAY_SIZE(imx8qxp_ldb_bus_output_fmts);
+ return kmemdup(imx8qxp_ldb_bus_output_fmts,
+ sizeof(imx8qxp_ldb_bus_output_fmts), GFP_KERNEL);
+}
+
+static enum drm_mode_status
+imx8qxp_ldb_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct ldb_channel *ldb_ch = bridge->driver_private;
+ bool is_single = ldb_channel_is_single_link(ldb_ch);
+
+ if (mode->clock > 170000)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock > 150000 && is_single)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static const struct drm_bridge_funcs imx8qxp_ldb_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .mode_valid = imx8qxp_ldb_bridge_mode_valid,
+ .attach = ldb_bridge_attach_helper,
+ .atomic_check = imx8qxp_ldb_bridge_atomic_check,
+ .mode_set = imx8qxp_ldb_bridge_mode_set,
+ .atomic_pre_enable = imx8qxp_ldb_bridge_atomic_pre_enable,
+ .atomic_enable = imx8qxp_ldb_bridge_atomic_enable,
+ .atomic_disable = imx8qxp_ldb_bridge_atomic_disable,
+ .atomic_get_input_bus_fmts =
+ imx8qxp_ldb_bridge_atomic_get_input_bus_fmts,
+ .atomic_get_output_bus_fmts =
+ imx8qxp_ldb_bridge_atomic_get_output_bus_fmts,
+};
+
+static int imx8qxp_ldb_set_di_id(struct imx8qxp_ldb *imx8qxp_ldb)
+{
+ struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
+ &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
+ struct device_node *ep, *remote;
+ struct device *dev = imx8qxp_ldb->dev;
+ struct of_endpoint endpoint;
+ int ret;
+
+ ep = of_graph_get_endpoint_by_regs(ldb_ch->np, 0, -1);
+ if (!ep) {
+ DRM_DEV_ERROR(dev, "failed to get port0 endpoint\n");
+ return -EINVAL;
+ }
+
+ remote = of_graph_get_remote_endpoint(ep);
+ of_node_put(ep);
+ if (!remote) {
+ DRM_DEV_ERROR(dev, "failed to get port0 remote endpoint\n");
+ return -EINVAL;
+ }
+
+ ret = of_graph_parse_endpoint(remote, &endpoint);
+ of_node_put(remote);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to parse port0 remote endpoint: %d\n",
+ ret);
+ return ret;
+ }
+
+ imx8qxp_ldb_ch->di_id = endpoint.id;
+
+ return 0;
+}
+
+static int
+imx8qxp_ldb_check_chno_and_dual_link(struct ldb_channel *ldb_ch, int link)
+{
+ if ((link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS && ldb_ch->chno != 0) ||
+ (link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS && ldb_ch->chno != 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int imx8qxp_ldb_parse_dt_companion(struct imx8qxp_ldb *imx8qxp_ldb)
+{
+ struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
+ &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
+ struct ldb_channel *companion_ldb_ch;
+ struct device_node *companion;
+ struct device_node *child;
+ struct device_node *companion_port = NULL;
+ struct device_node *port1, *port2;
+ struct device *dev = imx8qxp_ldb->dev;
+ const struct of_device_id *match;
+ u32 i;
+ int dual_link;
+ int ret;
+
+ /* Locate the companion LDB for dual-link operation, if any. */
+ companion = of_parse_phandle(dev->of_node, "fsl,companion-ldb", 0);
+ if (!companion)
+ return 0;
+
+ if (!of_device_is_available(companion)) {
+ DRM_DEV_ERROR(dev, "companion LDB is not available\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * Sanity check: the companion bridge must have the same compatible
+ * string.
+ */
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!of_device_is_compatible(companion, match->compatible)) {
+ DRM_DEV_ERROR(dev, "companion LDB is incompatible\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ for_each_available_child_of_node(companion, child) {
+ ret = of_property_read_u32(child, "reg", &i);
+ if (ret || i > MAX_LDB_CHAN_NUM - 1) {
+ DRM_DEV_ERROR(dev,
+ "invalid channel node address: %u\n", i);
+ ret = -EINVAL;
+ of_node_put(child);
+ goto out;
+ }
+
+ /*
+ * Channel numbers have to be different, because channel0
+ * transmits odd pixels and channel1 transmits even pixels.
+ */
+ if (i == (ldb_ch->chno ^ 0x1)) {
+ companion_port = child;
+ break;
+ }
+ }
+
+ if (!companion_port) {
+ DRM_DEV_ERROR(dev,
+ "failed to find companion LDB channel port\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * We need to work out if the sink is expecting us to function in
+ * dual-link mode. We do this by looking at the DT port nodes we are
+ * connected to. If they are marked as expecting odd pixels and
+ * even pixels than we need to enable LDB split mode.
+ */
+ port1 = of_graph_get_port_by_id(ldb_ch->np, 1);
+ port2 = of_graph_get_port_by_id(companion_port, 1);
+ dual_link = drm_of_lvds_get_dual_link_pixel_order(port1, port2);
+ of_node_put(port1);
+ of_node_put(port2);
+
+ switch (dual_link) {
+ case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS:
+ ldb_ch->link_type = LDB_CH_DUAL_LINK_ODD_EVEN_PIXELS;
+ break;
+ case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS:
+ ldb_ch->link_type = LDB_CH_DUAL_LINK_EVEN_ODD_PIXELS;
+ break;
+ default:
+ ret = dual_link;
+ DRM_DEV_ERROR(dev,
+ "failed to get dual link pixel order: %d\n", ret);
+ goto out;
+ }
+
+ ret = imx8qxp_ldb_check_chno_and_dual_link(ldb_ch, dual_link);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev,
+ "unmatched channel number(%u) vs dual link(%d)\n",
+ ldb_ch->chno, dual_link);
+ goto out;
+ }
+
+ imx8qxp_ldb->companion = of_drm_find_bridge(companion_port);
+ if (!imx8qxp_ldb->companion) {
+ ret = -EPROBE_DEFER;
+ DRM_DEV_DEBUG_DRIVER(dev,
+ "failed to find bridge for companion bridge: %d\n",
+ ret);
+ goto out;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev,
+ "dual-link configuration detected (companion bridge %pOF)\n",
+ companion);
+
+ companion_ldb_ch = bridge_to_ldb_ch(imx8qxp_ldb->companion);
+ companion_ldb_ch->link_type = ldb_ch->link_type;
+out:
+ of_node_put(companion_port);
+ of_node_put(companion);
+ return ret;
+}
+
+static int imx8qxp_ldb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx8qxp_ldb *imx8qxp_ldb;
+ struct imx8qxp_ldb_channel *imx8qxp_ldb_ch;
+ struct ldb *ldb;
+ struct ldb_channel *ldb_ch;
+ int ret, i;
+
+ imx8qxp_ldb = devm_kzalloc(dev, sizeof(*imx8qxp_ldb), GFP_KERNEL);
+ if (!imx8qxp_ldb)
+ return -ENOMEM;
+
+ imx8qxp_ldb->clk_pixel = devm_clk_get(dev, "pixel");
+ if (IS_ERR(imx8qxp_ldb->clk_pixel)) {
+ ret = PTR_ERR(imx8qxp_ldb->clk_pixel);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "failed to get pixel clock: %d\n", ret);
+ return ret;
+ }
+
+ imx8qxp_ldb->clk_bypass = devm_clk_get(dev, "bypass");
+ if (IS_ERR(imx8qxp_ldb->clk_bypass)) {
+ ret = PTR_ERR(imx8qxp_ldb->clk_bypass);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "failed to get bypass clock: %d\n", ret);
+ return ret;
+ }
+
+ imx8qxp_ldb->dev = dev;
+
+ ldb = &imx8qxp_ldb->base;
+ ldb->dev = dev;
+ ldb->ctrl_reg = 0xe0;
+
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
+ ldb->channel[i] = &imx8qxp_ldb->channel[i].base;
+
+ ret = ldb_init_helper(ldb);
+ if (ret)
+ return ret;
+
+ if (ldb->available_ch_cnt == 0) {
+ DRM_DEV_DEBUG_DRIVER(dev, "no available channel\n");
+ return 0;
+ } else if (ldb->available_ch_cnt > 1) {
+ DRM_DEV_ERROR(dev, "invalid available channel number(%u)\n",
+ ldb->available_ch_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qxp_ldb_ch = &imx8qxp_ldb->channel[i];
+ ldb_ch = &imx8qxp_ldb_ch->base;
+
+ if (ldb_ch->is_available) {
+ imx8qxp_ldb->active_chno = ldb_ch->chno;
+ break;
+ }
+ }
+
+ imx8qxp_ldb_ch->phy = devm_of_phy_get(dev, ldb_ch->np, "lvds_phy");
+ if (IS_ERR(imx8qxp_ldb_ch->phy)) {
+ ret = PTR_ERR(imx8qxp_ldb_ch->phy);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get channel%d PHY: %d\n",
+ imx8qxp_ldb->active_chno, ret);
+ return ret;
+ }
+
+ ret = ldb_find_next_bridge_helper(ldb);
+ if (ret)
+ return ret;
+
+ ret = imx8qxp_ldb_set_di_id(imx8qxp_ldb);
+ if (ret)
+ return ret;
+
+ ret = imx8qxp_ldb_parse_dt_companion(imx8qxp_ldb);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, imx8qxp_ldb);
+ pm_runtime_enable(dev);
+
+ ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs);
+
+ return ret;
+}
+
+static int imx8qxp_ldb_remove(struct platform_device *pdev)
+{
+ struct imx8qxp_ldb *imx8qxp_ldb = platform_get_drvdata(pdev);
+ struct ldb *ldb = &imx8qxp_ldb->base;
+
+ ldb_remove_bridge_helper(ldb);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused imx8qxp_ldb_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused imx8qxp_ldb_runtime_resume(struct device *dev)
+{
+ struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev);
+ struct ldb *ldb = &imx8qxp_ldb->base;
+
+ /* disable LDB by resetting the control register to POR default */
+ regmap_write(ldb->regmap, ldb->ctrl_reg, 0);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx8qxp_ldb_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend,
+ imx8qxp_ldb_runtime_resume, NULL)
+};
+
+static const struct of_device_id imx8qxp_ldb_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-ldb" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8qxp_ldb_dt_ids);
+
+static struct platform_driver imx8qxp_ldb_driver = {
+ .probe = imx8qxp_ldb_probe,
+ .remove = imx8qxp_ldb_remove,
+ .driver = {
+ .pm = &imx8qxp_ldb_pm_ops,
+ .name = DRIVER_NAME,
+ .of_match_table = imx8qxp_ldb_dt_ids,
+ },
+};
+module_platform_driver(imx8qxp_ldb_driver);
+
+MODULE_DESCRIPTION("i.MX8QXP LVDS Display Bridge(LDB)/Pixel Mapper bridge driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
new file mode 100644
index 000000000000..503bd8db8afe
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2020 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_print.h>
+
+#define PC_CTRL_REG 0x0
+#define PC_COMBINE_ENABLE BIT(0)
+#define PC_DISP_BYPASS(n) BIT(1 + 21 * (n))
+#define PC_DISP_HSYNC_POLARITY(n) BIT(2 + 11 * (n))
+#define PC_DISP_HSYNC_POLARITY_POS(n) DISP_HSYNC_POLARITY(n)
+#define PC_DISP_VSYNC_POLARITY(n) BIT(3 + 11 * (n))
+#define PC_DISP_VSYNC_POLARITY_POS(n) DISP_VSYNC_POLARITY(n)
+#define PC_DISP_DVALID_POLARITY(n) BIT(4 + 11 * (n))
+#define PC_DISP_DVALID_POLARITY_POS(n) DISP_DVALID_POLARITY(n)
+#define PC_VSYNC_MASK_ENABLE BIT(5)
+#define PC_SKIP_MODE BIT(6)
+#define PC_SKIP_NUMBER_MASK GENMASK(12, 7)
+#define PC_SKIP_NUMBER(n) FIELD_PREP(PC_SKIP_NUMBER_MASK, (n))
+#define PC_DISP0_PIX_DATA_FORMAT_MASK GENMASK(18, 16)
+#define PC_DISP0_PIX_DATA_FORMAT(fmt) \
+ FIELD_PREP(PC_DISP0_PIX_DATA_FORMAT_MASK, (fmt))
+#define PC_DISP1_PIX_DATA_FORMAT_MASK GENMASK(21, 19)
+#define PC_DISP1_PIX_DATA_FORMAT(fmt) \
+ FIELD_PREP(PC_DISP1_PIX_DATA_FORMAT_MASK, (fmt))
+
+#define PC_SW_RESET_REG 0x20
+#define PC_SW_RESET_N BIT(0)
+#define PC_DISP_SW_RESET_N(n) BIT(1 + (n))
+#define PC_FULL_RESET_N (PC_SW_RESET_N | \
+ PC_DISP_SW_RESET_N(0) | \
+ PC_DISP_SW_RESET_N(1))
+
+#define PC_REG_SET 0x4
+#define PC_REG_CLR 0x8
+
+#define DRIVER_NAME "imx8qxp-pixel-combiner"
+
+enum imx8qxp_pc_pix_data_format {
+ RGB,
+ YUV444,
+ YUV422,
+ SPLIT_RGB,
+};
+
+struct imx8qxp_pc_channel {
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct imx8qxp_pc *pc;
+ unsigned int stream_id;
+ bool is_available;
+};
+
+struct imx8qxp_pc {
+ struct device *dev;
+ struct imx8qxp_pc_channel ch[2];
+ struct clk *clk_apb;
+ void __iomem *base;
+};
+
+static inline u32 imx8qxp_pc_read(struct imx8qxp_pc *pc, unsigned int offset)
+{
+ return readl(pc->base + offset);
+}
+
+static inline void
+imx8qxp_pc_write(struct imx8qxp_pc *pc, unsigned int offset, u32 value)
+{
+ writel(value, pc->base + offset);
+}
+
+static inline void
+imx8qxp_pc_write_set(struct imx8qxp_pc *pc, unsigned int offset, u32 value)
+{
+ imx8qxp_pc_write(pc, offset + PC_REG_SET, value);
+}
+
+static inline void
+imx8qxp_pc_write_clr(struct imx8qxp_pc *pc, unsigned int offset, u32 value)
+{
+ imx8qxp_pc_write(pc, offset + PC_REG_CLR, value);
+}
+
+static enum drm_mode_status
+imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->hdisplay > 2560)
+ return MODE_BAD_HVALUE;
+
+ return MODE_OK;
+}
+
+static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct imx8qxp_pc_channel *ch = bridge->driver_private;
+ struct imx8qxp_pc *pc = ch->pc;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ DRM_DEV_ERROR(pc->dev,
+ "do not support creating a drm_connector\n");
+ return -EINVAL;
+ }
+
+ if (!bridge->encoder) {
+ DRM_DEV_ERROR(pc->dev, "missing encoder\n");
+ return -ENODEV;
+ }
+
+ return drm_bridge_attach(bridge->encoder,
+ ch->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+static void
+imx8qxp_pc_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct imx8qxp_pc_channel *ch = bridge->driver_private;
+ struct imx8qxp_pc *pc = ch->pc;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_get_sync(pc->dev);
+ if (ret < 0)
+ DRM_DEV_ERROR(pc->dev,
+ "failed to get runtime PM sync: %d\n", ret);
+
+ ret = clk_prepare_enable(pc->clk_apb);
+ if (ret)
+ DRM_DEV_ERROR(pc->dev, "%s: failed to enable apb clock: %d\n",
+ __func__, ret);
+
+ /* HSYNC to pixel link is active low. */
+ imx8qxp_pc_write_clr(pc, PC_CTRL_REG,
+ PC_DISP_HSYNC_POLARITY(ch->stream_id));
+
+ /* VSYNC to pixel link is active low. */
+ imx8qxp_pc_write_clr(pc, PC_CTRL_REG,
+ PC_DISP_VSYNC_POLARITY(ch->stream_id));
+
+ /* Data enable to pixel link is active high. */
+ imx8qxp_pc_write_set(pc, PC_CTRL_REG,
+ PC_DISP_DVALID_POLARITY(ch->stream_id));
+
+ /* Mask the first frame output which may be incomplete. */
+ imx8qxp_pc_write_set(pc, PC_CTRL_REG, PC_VSYNC_MASK_ENABLE);
+
+ /* Only support RGB currently. */
+ val = imx8qxp_pc_read(pc, PC_CTRL_REG);
+ if (ch->stream_id == 0) {
+ val &= ~PC_DISP0_PIX_DATA_FORMAT_MASK;
+ val |= PC_DISP0_PIX_DATA_FORMAT(RGB);
+ } else {
+ val &= ~PC_DISP1_PIX_DATA_FORMAT_MASK;
+ val |= PC_DISP1_PIX_DATA_FORMAT(RGB);
+ }
+ imx8qxp_pc_write(pc, PC_CTRL_REG, val);
+
+ /* Only support bypass mode currently. */
+ imx8qxp_pc_write_set(pc, PC_CTRL_REG, PC_DISP_BYPASS(ch->stream_id));
+
+ clk_disable_unprepare(pc->clk_apb);
+}
+
+static void
+imx8qxp_pc_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct imx8qxp_pc_channel *ch = bridge->driver_private;
+ struct imx8qxp_pc *pc = ch->pc;
+ int ret;
+
+ ret = pm_runtime_put(pc->dev);
+ if (ret < 0)
+ DRM_DEV_ERROR(pc->dev, "failed to put runtime PM: %d\n", ret);
+}
+
+static const u32 imx8qxp_pc_bus_output_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X36_CPADLO,
+ MEDIA_BUS_FMT_RGB666_1X36_CPADLO,
+};
+
+static bool imx8qxp_pc_bus_output_fmt_supported(u32 fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx8qxp_pc_bus_output_fmts); i++) {
+ if (imx8qxp_pc_bus_output_fmts[i] == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+imx8qxp_pc_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ if (!imx8qxp_pc_bus_output_fmt_supported(output_fmt))
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ switch (output_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X36_CPADLO:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X30_CPADLO;
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X36_CPADLO:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X30_CPADLO;
+ break;
+ default:
+ kfree(input_fmts);
+ input_fmts = NULL;
+ break;
+ }
+
+ return input_fmts;
+}
+
+static u32 *
+imx8qxp_pc_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ *num_output_fmts = ARRAY_SIZE(imx8qxp_pc_bus_output_fmts);
+ return kmemdup(imx8qxp_pc_bus_output_fmts,
+ sizeof(imx8qxp_pc_bus_output_fmts), GFP_KERNEL);
+}
+
+static const struct drm_bridge_funcs imx8qxp_pc_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .mode_valid = imx8qxp_pc_bridge_mode_valid,
+ .attach = imx8qxp_pc_bridge_attach,
+ .mode_set = imx8qxp_pc_bridge_mode_set,
+ .atomic_disable = imx8qxp_pc_bridge_atomic_disable,
+ .atomic_get_input_bus_fmts =
+ imx8qxp_pc_bridge_atomic_get_input_bus_fmts,
+ .atomic_get_output_bus_fmts =
+ imx8qxp_pc_bridge_atomic_get_output_bus_fmts,
+};
+
+static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
+{
+ struct imx8qxp_pc *pc;
+ struct imx8qxp_pc_channel *ch;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child, *remote;
+ u32 i;
+ int ret;
+
+ pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ pc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pc->base))
+ return PTR_ERR(pc->base);
+
+ pc->dev = dev;
+
+ pc->clk_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(pc->clk_apb)) {
+ ret = PTR_ERR(pc->clk_apb);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get apb clock: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pc);
+ pm_runtime_enable(dev);
+
+ for_each_available_child_of_node(np, child) {
+ ret = of_property_read_u32(child, "reg", &i);
+ if (ret || i > 1) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(dev,
+ "invalid channel(%u) node address\n", i);
+ goto free_child;
+ }
+
+ ch = &pc->ch[i];
+ ch->pc = pc;
+ ch->stream_id = i;
+
+ remote = of_graph_get_remote_node(child, 1, 0);
+ if (!remote) {
+ ret = -ENODEV;
+ DRM_DEV_ERROR(dev,
+ "channel%u failed to get port1's remote node: %d\n",
+ i, ret);
+ goto free_child;
+ }
+
+ ch->next_bridge = of_drm_find_bridge(remote);
+ if (!ch->next_bridge) {
+ of_node_put(remote);
+ ret = -EPROBE_DEFER;
+ DRM_DEV_DEBUG_DRIVER(dev,
+ "channel%u failed to find next bridge: %d\n",
+ i, ret);
+ goto free_child;
+ }
+
+ of_node_put(remote);
+
+ ch->bridge.driver_private = ch;
+ ch->bridge.funcs = &imx8qxp_pc_bridge_funcs;
+ ch->bridge.of_node = child;
+ ch->is_available = true;
+
+ drm_bridge_add(&ch->bridge);
+ }
+
+ return 0;
+
+free_child:
+ of_node_put(child);
+
+ if (i == 1 && pc->ch[0].next_bridge)
+ drm_bridge_remove(&pc->ch[0].bridge);
+
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int imx8qxp_pc_bridge_remove(struct platform_device *pdev)
+{
+ struct imx8qxp_pc *pc = platform_get_drvdata(pdev);
+ struct imx8qxp_pc_channel *ch;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ ch = &pc->ch[i];
+
+ if (!ch->is_available)
+ continue;
+
+ drm_bridge_remove(&ch->bridge);
+ ch->is_available = false;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx8qxp_pc *pc = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(pc->clk_apb);
+ if (ret)
+ DRM_DEV_ERROR(pc->dev, "%s: failed to enable apb clock: %d\n",
+ __func__, ret);
+
+ /* Disable pixel combiner by full reset. */
+ imx8qxp_pc_write_clr(pc, PC_SW_RESET_REG, PC_FULL_RESET_N);
+
+ clk_disable_unprepare(pc->clk_apb);
+
+ /* Ensure the reset takes effect. */
+ usleep_range(10, 20);
+
+ return ret;
+}
+
+static int __maybe_unused imx8qxp_pc_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx8qxp_pc *pc = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(pc->clk_apb);
+ if (ret) {
+ DRM_DEV_ERROR(pc->dev, "%s: failed to enable apb clock: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* out of reset */
+ imx8qxp_pc_write_set(pc, PC_SW_RESET_REG, PC_FULL_RESET_N);
+
+ clk_disable_unprepare(pc->clk_apb);
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx8qxp_pc_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx8qxp_pc_runtime_suspend,
+ imx8qxp_pc_runtime_resume, NULL)
+};
+
+static const struct of_device_id imx8qxp_pc_dt_ids[] = {
+ { .compatible = "fsl,imx8qm-pixel-combiner", },
+ { .compatible = "fsl,imx8qxp-pixel-combiner", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8qxp_pc_dt_ids);
+
+static struct platform_driver imx8qxp_pc_bridge_driver = {
+ .probe = imx8qxp_pc_bridge_probe,
+ .remove = imx8qxp_pc_bridge_remove,
+ .driver = {
+ .pm = &imx8qxp_pc_pm_ops,
+ .name = DRIVER_NAME,
+ .of_match_table = imx8qxp_pc_dt_ids,
+ },
+};
+module_platform_driver(imx8qxp_pc_bridge_driver);
+
+MODULE_DESCRIPTION("i.MX8QM/QXP pixel combiner bridge driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
new file mode 100644
index 000000000000..9e5f2b4dc2e5
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2020,2022 NXP
+ */
+
+#include <linux/firmware/imx/svc/misc.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_print.h>
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+#define DRIVER_NAME "imx8qxp-display-pixel-link"
+#define PL_MAX_MST_ADDR 3
+#define PL_MAX_NEXT_BRIDGES 2
+
+struct imx8qxp_pixel_link {
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct device *dev;
+ struct imx_sc_ipc *ipc_handle;
+ u8 stream_id;
+ u8 dc_id;
+ u32 sink_rsc;
+ u32 mst_addr;
+ u8 mst_addr_ctrl;
+ u8 mst_en_ctrl;
+ u8 mst_vld_ctrl;
+ u8 sync_ctrl;
+};
+
+static void imx8qxp_pixel_link_enable_mst_en(struct imx8qxp_pixel_link *pl)
+{
+ int ret;
+
+ ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc,
+ pl->mst_en_ctrl, true);
+ if (ret)
+ DRM_DEV_ERROR(pl->dev,
+ "failed to enable DC%u stream%u pixel link mst_en: %d\n",
+ pl->dc_id, pl->stream_id, ret);
+}
+
+static void imx8qxp_pixel_link_enable_mst_vld(struct imx8qxp_pixel_link *pl)
+{
+ int ret;
+
+ ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc,
+ pl->mst_vld_ctrl, true);
+ if (ret)
+ DRM_DEV_ERROR(pl->dev,
+ "failed to enable DC%u stream%u pixel link mst_vld: %d\n",
+ pl->dc_id, pl->stream_id, ret);
+}
+
+static void imx8qxp_pixel_link_enable_sync(struct imx8qxp_pixel_link *pl)
+{
+ int ret;
+
+ ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc,
+ pl->sync_ctrl, true);
+ if (ret)
+ DRM_DEV_ERROR(pl->dev,
+ "failed to enable DC%u stream%u pixel link sync: %d\n",
+ pl->dc_id, pl->stream_id, ret);
+}
+
+static int imx8qxp_pixel_link_disable_mst_en(struct imx8qxp_pixel_link *pl)
+{
+ int ret;
+
+ ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc,
+ pl->mst_en_ctrl, false);
+ if (ret)
+ DRM_DEV_ERROR(pl->dev,
+ "failed to disable DC%u stream%u pixel link mst_en: %d\n",
+ pl->dc_id, pl->stream_id, ret);
+
+ return ret;
+}
+
+static int imx8qxp_pixel_link_disable_mst_vld(struct imx8qxp_pixel_link *pl)
+{
+ int ret;
+
+ ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc,
+ pl->mst_vld_ctrl, false);
+ if (ret)
+ DRM_DEV_ERROR(pl->dev,
+ "failed to disable DC%u stream%u pixel link mst_vld: %d\n",
+ pl->dc_id, pl->stream_id, ret);
+
+ return ret;
+}
+
+static int imx8qxp_pixel_link_disable_sync(struct imx8qxp_pixel_link *pl)
+{
+ int ret;
+
+ ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc,
+ pl->sync_ctrl, false);
+ if (ret)
+ DRM_DEV_ERROR(pl->dev,
+ "failed to disable DC%u stream%u pixel link sync: %d\n",
+ pl->dc_id, pl->stream_id, ret);
+
+ return ret;
+}
+
+static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl)
+{
+ int ret;
+
+ ret = imx_sc_misc_set_control(pl->ipc_handle,
+ pl->sink_rsc, pl->mst_addr_ctrl,
+ pl->mst_addr);
+ if (ret)
+ DRM_DEV_ERROR(pl->dev,
+ "failed to set DC%u stream%u pixel link mst addr(%u): %d\n",
+ pl->dc_id, pl->stream_id, pl->mst_addr, ret);
+}
+
+static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct imx8qxp_pixel_link *pl = bridge->driver_private;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ DRM_DEV_ERROR(pl->dev,
+ "do not support creating a drm_connector\n");
+ return -EINVAL;
+ }
+
+ if (!bridge->encoder) {
+ DRM_DEV_ERROR(pl->dev, "missing encoder\n");
+ return -ENODEV;
+ }
+
+ return drm_bridge_attach(bridge->encoder,
+ pl->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+static void
+imx8qxp_pixel_link_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct imx8qxp_pixel_link *pl = bridge->driver_private;
+
+ imx8qxp_pixel_link_set_mst_addr(pl);
+}
+
+static void
+imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct imx8qxp_pixel_link *pl = bridge->driver_private;
+
+ imx8qxp_pixel_link_enable_mst_en(pl);
+ imx8qxp_pixel_link_enable_mst_vld(pl);
+ imx8qxp_pixel_link_enable_sync(pl);
+}
+
+static void
+imx8qxp_pixel_link_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct imx8qxp_pixel_link *pl = bridge->driver_private;
+
+ imx8qxp_pixel_link_disable_mst_en(pl);
+ imx8qxp_pixel_link_disable_mst_vld(pl);
+ imx8qxp_pixel_link_disable_sync(pl);
+}
+
+static const u32 imx8qxp_pixel_link_bus_output_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X36_CPADLO,
+ MEDIA_BUS_FMT_RGB666_1X36_CPADLO,
+};
+
+static bool imx8qxp_pixel_link_bus_output_fmt_supported(u32 fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx8qxp_pixel_link_bus_output_fmts); i++) {
+ if (imx8qxp_pixel_link_bus_output_fmts[i] == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+imx8qxp_pixel_link_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ if (!imx8qxp_pixel_link_bus_output_fmt_supported(output_fmt))
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+
+ return input_fmts;
+}
+
+static u32 *
+imx8qxp_pixel_link_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ *num_output_fmts = ARRAY_SIZE(imx8qxp_pixel_link_bus_output_fmts);
+ return kmemdup(imx8qxp_pixel_link_bus_output_fmts,
+ sizeof(imx8qxp_pixel_link_bus_output_fmts), GFP_KERNEL);
+}
+
+static const struct drm_bridge_funcs imx8qxp_pixel_link_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .attach = imx8qxp_pixel_link_bridge_attach,
+ .mode_set = imx8qxp_pixel_link_bridge_mode_set,
+ .atomic_enable = imx8qxp_pixel_link_bridge_atomic_enable,
+ .atomic_disable = imx8qxp_pixel_link_bridge_atomic_disable,
+ .atomic_get_input_bus_fmts =
+ imx8qxp_pixel_link_bridge_atomic_get_input_bus_fmts,
+ .atomic_get_output_bus_fmts =
+ imx8qxp_pixel_link_bridge_atomic_get_output_bus_fmts,
+};
+
+static int imx8qxp_pixel_link_disable_all_controls(struct imx8qxp_pixel_link *pl)
+{
+ int ret;
+
+ ret = imx8qxp_pixel_link_disable_mst_en(pl);
+ if (ret)
+ return ret;
+
+ ret = imx8qxp_pixel_link_disable_mst_vld(pl);
+ if (ret)
+ return ret;
+
+ return imx8qxp_pixel_link_disable_sync(pl);
+}
+
+static struct drm_bridge *
+imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl)
+{
+ struct device_node *np = pl->dev->of_node;
+ struct device_node *port, *remote;
+ struct drm_bridge *next_bridge[PL_MAX_NEXT_BRIDGES];
+ u32 port_id;
+ bool found_port = false;
+ int reg, ep_cnt = 0;
+ /* select the first next bridge by default */
+ int bridge_sel = 0;
+
+ for (port_id = 1; port_id <= PL_MAX_MST_ADDR + 1; port_id++) {
+ port = of_graph_get_port_by_id(np, port_id);
+ if (!port)
+ continue;
+
+ if (of_device_is_available(port)) {
+ found_port = true;
+ of_node_put(port);
+ break;
+ }
+
+ of_node_put(port);
+ }
+
+ if (!found_port) {
+ DRM_DEV_ERROR(pl->dev, "no available output port\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ for (reg = 0; reg < PL_MAX_NEXT_BRIDGES; reg++) {
+ remote = of_graph_get_remote_node(np, port_id, reg);
+ if (!remote)
+ continue;
+
+ if (!of_device_is_available(remote->parent)) {
+ DRM_DEV_DEBUG(pl->dev,
+ "port%u endpoint%u remote parent is not available\n",
+ port_id, reg);
+ of_node_put(remote);
+ continue;
+ }
+
+ next_bridge[ep_cnt] = of_drm_find_bridge(remote);
+ if (!next_bridge[ep_cnt]) {
+ of_node_put(remote);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ /* specially select the next bridge with companion PXL2DPI */
+ if (of_find_property(remote, "fsl,companion-pxl2dpi", NULL))
+ bridge_sel = ep_cnt;
+
+ ep_cnt++;
+
+ of_node_put(remote);
+ }
+
+ pl->mst_addr = port_id - 1;
+
+ return next_bridge[bridge_sel];
+}
+
+static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
+{
+ struct imx8qxp_pixel_link *pl;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ pl = devm_kzalloc(dev, sizeof(*pl), GFP_KERNEL);
+ if (!pl)
+ return -ENOMEM;
+
+ ret = imx_scu_get_handle(&pl->ipc_handle);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get SCU ipc handle: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = of_property_read_u8(np, "fsl,dc-id", &pl->dc_id);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to get DC index: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u8(np, "fsl,dc-stream-id", &pl->stream_id);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to get DC stream index: %d\n", ret);
+ return ret;
+ }
+
+ pl->dev = dev;
+
+ pl->sink_rsc = pl->dc_id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
+
+ if (pl->stream_id == 0) {
+ pl->mst_addr_ctrl = IMX_SC_C_PXL_LINK_MST1_ADDR;
+ pl->mst_en_ctrl = IMX_SC_C_PXL_LINK_MST1_ENB;
+ pl->mst_vld_ctrl = IMX_SC_C_PXL_LINK_MST1_VLD;
+ pl->sync_ctrl = IMX_SC_C_SYNC_CTRL0;
+ } else {
+ pl->mst_addr_ctrl = IMX_SC_C_PXL_LINK_MST2_ADDR;
+ pl->mst_en_ctrl = IMX_SC_C_PXL_LINK_MST2_ENB;
+ pl->mst_vld_ctrl = IMX_SC_C_PXL_LINK_MST2_VLD;
+ pl->sync_ctrl = IMX_SC_C_SYNC_CTRL1;
+ }
+
+ /* disable all controls to POR default */
+ ret = imx8qxp_pixel_link_disable_all_controls(pl);
+ if (ret)
+ return ret;
+
+ pl->next_bridge = imx8qxp_pixel_link_find_next_bridge(pl);
+ if (IS_ERR(pl->next_bridge)) {
+ ret = PTR_ERR(pl->next_bridge);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to find next bridge: %d\n",
+ ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pl);
+
+ pl->bridge.driver_private = pl;
+ pl->bridge.funcs = &imx8qxp_pixel_link_bridge_funcs;
+ pl->bridge.of_node = np;
+
+ drm_bridge_add(&pl->bridge);
+
+ return ret;
+}
+
+static int imx8qxp_pixel_link_bridge_remove(struct platform_device *pdev)
+{
+ struct imx8qxp_pixel_link *pl = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&pl->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id imx8qxp_pixel_link_dt_ids[] = {
+ { .compatible = "fsl,imx8qm-dc-pixel-link", },
+ { .compatible = "fsl,imx8qxp-dc-pixel-link", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8qxp_pixel_link_dt_ids);
+
+static struct platform_driver imx8qxp_pixel_link_bridge_driver = {
+ .probe = imx8qxp_pixel_link_bridge_probe,
+ .remove = imx8qxp_pixel_link_bridge_remove,
+ .driver = {
+ .of_match_table = imx8qxp_pixel_link_dt_ids,
+ .name = DRIVER_NAME,
+ },
+};
+module_platform_driver(imx8qxp_pixel_link_bridge_driver);
+
+MODULE_DESCRIPTION("i.MX8QXP/QM display pixel link bridge driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
new file mode 100644
index 000000000000..d0fec82f0cf8
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2020 NXP
+ */
+
+#include <linux/firmware/imx/svc/misc.h>
+#include <linux/media-bus-format.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+#define PXL2DPI_CTRL 0x40
+#define CFG1_16BIT 0x0
+#define CFG2_16BIT 0x1
+#define CFG3_16BIT 0x2
+#define CFG1_18BIT 0x3
+#define CFG2_18BIT 0x4
+#define CFG_24BIT 0x5
+
+#define DRIVER_NAME "imx8qxp-pxl2dpi"
+
+struct imx8qxp_pxl2dpi {
+ struct regmap *regmap;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct drm_bridge *companion;
+ struct device *dev;
+ struct imx_sc_ipc *ipc_handle;
+ u32 sc_resource;
+ u32 in_bus_format;
+ u32 out_bus_format;
+ u32 pl_sel;
+};
+
+#define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge)
+
+static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ DRM_DEV_ERROR(p2d->dev,
+ "do not support creating a drm_connector\n");
+ return -EINVAL;
+ }
+
+ if (!bridge->encoder) {
+ DRM_DEV_ERROR(p2d->dev, "missing encoder\n");
+ return -ENODEV;
+ }
+
+ return drm_bridge_attach(bridge->encoder,
+ p2d->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+static int
+imx8qxp_pxl2dpi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
+
+ p2d->in_bus_format = bridge_state->input_bus_cfg.format;
+ p2d->out_bus_format = bridge_state->output_bus_cfg.format;
+
+ return 0;
+}
+
+static void
+imx8qxp_pxl2dpi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
+ struct imx8qxp_pxl2dpi *companion_p2d;
+ int ret;
+
+ ret = pm_runtime_get_sync(p2d->dev);
+ if (ret < 0)
+ DRM_DEV_ERROR(p2d->dev,
+ "failed to get runtime PM sync: %d\n", ret);
+
+ ret = imx_sc_misc_set_control(p2d->ipc_handle, p2d->sc_resource,
+ IMX_SC_C_PXL_LINK_SEL, p2d->pl_sel);
+ if (ret)
+ DRM_DEV_ERROR(p2d->dev,
+ "failed to set pixel link selection(%u): %d\n",
+ p2d->pl_sel, ret);
+
+ switch (p2d->out_bus_format) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ regmap_write(p2d->regmap, PXL2DPI_CTRL, CFG_24BIT);
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ regmap_write(p2d->regmap, PXL2DPI_CTRL, CFG2_18BIT);
+ break;
+ default:
+ DRM_DEV_ERROR(p2d->dev,
+ "unsupported output bus format 0x%08x\n",
+ p2d->out_bus_format);
+ }
+
+ if (p2d->companion) {
+ companion_p2d = bridge_to_p2d(p2d->companion);
+
+ companion_p2d->in_bus_format = p2d->in_bus_format;
+ companion_p2d->out_bus_format = p2d->out_bus_format;
+
+ p2d->companion->funcs->mode_set(p2d->companion, mode,
+ adjusted_mode);
+ }
+}
+
+static void
+imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
+ int ret;
+
+ ret = pm_runtime_put(p2d->dev);
+ if (ret < 0)
+ DRM_DEV_ERROR(p2d->dev, "failed to put runtime PM: %d\n", ret);
+
+ if (p2d->companion)
+ p2d->companion->funcs->atomic_disable(p2d->companion,
+ old_bridge_state);
+}
+
+static const u32 imx8qxp_pxl2dpi_bus_output_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB666_1X24_CPADHI,
+};
+
+static bool imx8qxp_pxl2dpi_bus_output_fmt_supported(u32 fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx8qxp_pxl2dpi_bus_output_fmts); i++) {
+ if (imx8qxp_pxl2dpi_bus_output_fmts[i] == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+imx8qxp_pxl2dpi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ if (!imx8qxp_pxl2dpi_bus_output_fmt_supported(output_fmt))
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ switch (output_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X36_CPADLO;
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X36_CPADLO;
+ break;
+ default:
+ kfree(input_fmts);
+ input_fmts = NULL;
+ break;
+ }
+
+ return input_fmts;
+}
+
+static u32 *
+imx8qxp_pxl2dpi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ *num_output_fmts = ARRAY_SIZE(imx8qxp_pxl2dpi_bus_output_fmts);
+ return kmemdup(imx8qxp_pxl2dpi_bus_output_fmts,
+ sizeof(imx8qxp_pxl2dpi_bus_output_fmts), GFP_KERNEL);
+}
+
+static const struct drm_bridge_funcs imx8qxp_pxl2dpi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .attach = imx8qxp_pxl2dpi_bridge_attach,
+ .atomic_check = imx8qxp_pxl2dpi_bridge_atomic_check,
+ .mode_set = imx8qxp_pxl2dpi_bridge_mode_set,
+ .atomic_disable = imx8qxp_pxl2dpi_bridge_atomic_disable,
+ .atomic_get_input_bus_fmts =
+ imx8qxp_pxl2dpi_bridge_atomic_get_input_bus_fmts,
+ .atomic_get_output_bus_fmts =
+ imx8qxp_pxl2dpi_bridge_atomic_get_output_bus_fmts,
+};
+
+static struct device_node *
+imx8qxp_pxl2dpi_get_available_ep_from_port(struct imx8qxp_pxl2dpi *p2d,
+ u32 port_id)
+{
+ struct device_node *port, *ep;
+ int ep_cnt;
+
+ port = of_graph_get_port_by_id(p2d->dev->of_node, port_id);
+ if (!port) {
+ DRM_DEV_ERROR(p2d->dev, "failed to get port@%u\n", port_id);
+ return ERR_PTR(-ENODEV);
+ }
+
+ ep_cnt = of_get_available_child_count(port);
+ if (ep_cnt == 0) {
+ DRM_DEV_ERROR(p2d->dev, "no available endpoints of port@%u\n",
+ port_id);
+ ep = ERR_PTR(-ENODEV);
+ goto out;
+ } else if (ep_cnt > 1) {
+ DRM_DEV_ERROR(p2d->dev,
+ "invalid available endpoints of port@%u\n",
+ port_id);
+ ep = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ ep = of_get_next_available_child(port, NULL);
+ if (!ep) {
+ DRM_DEV_ERROR(p2d->dev,
+ "failed to get available endpoint of port@%u\n",
+ port_id);
+ ep = ERR_PTR(-ENODEV);
+ goto out;
+ }
+out:
+ of_node_put(port);
+ return ep;
+}
+
+static struct drm_bridge *
+imx8qxp_pxl2dpi_find_next_bridge(struct imx8qxp_pxl2dpi *p2d)
+{
+ struct device_node *ep, *remote;
+ struct drm_bridge *next_bridge;
+ int ret;
+
+ ep = imx8qxp_pxl2dpi_get_available_ep_from_port(p2d, 1);
+ if (IS_ERR(ep)) {
+ ret = PTR_ERR(ep);
+ return ERR_PTR(ret);
+ }
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ DRM_DEV_ERROR(p2d->dev, "no available remote\n");
+ next_bridge = ERR_PTR(-ENODEV);
+ goto out;
+ } else if (!of_device_is_available(remote->parent)) {
+ DRM_DEV_ERROR(p2d->dev, "remote parent is not available\n");
+ next_bridge = ERR_PTR(-ENODEV);
+ goto out;
+ }
+
+ next_bridge = of_drm_find_bridge(remote);
+ if (!next_bridge) {
+ next_bridge = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+out:
+ of_node_put(remote);
+ of_node_put(ep);
+
+ return next_bridge;
+}
+
+static int imx8qxp_pxl2dpi_set_pixel_link_sel(struct imx8qxp_pxl2dpi *p2d)
+{
+ struct device_node *ep;
+ struct of_endpoint endpoint;
+ int ret;
+
+ ep = imx8qxp_pxl2dpi_get_available_ep_from_port(p2d, 0);
+ if (IS_ERR(ep))
+ return PTR_ERR(ep);
+
+ ret = of_graph_parse_endpoint(ep, &endpoint);
+ if (ret) {
+ DRM_DEV_ERROR(p2d->dev,
+ "failed to parse endpoint of port@0: %d\n", ret);
+ goto out;
+ }
+
+ p2d->pl_sel = endpoint.id;
+out:
+ of_node_put(ep);
+
+ return ret;
+}
+
+static int imx8qxp_pxl2dpi_parse_dt_companion(struct imx8qxp_pxl2dpi *p2d)
+{
+ struct imx8qxp_pxl2dpi *companion_p2d;
+ struct device *dev = p2d->dev;
+ struct device_node *companion;
+ struct device_node *port1, *port2;
+ const struct of_device_id *match;
+ int dual_link;
+ int ret = 0;
+
+ /* Locate the companion PXL2DPI for dual-link operation, if any. */
+ companion = of_parse_phandle(dev->of_node, "fsl,companion-pxl2dpi", 0);
+ if (!companion)
+ return 0;
+
+ if (!of_device_is_available(companion)) {
+ DRM_DEV_ERROR(dev, "companion PXL2DPI is not available\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * Sanity check: the companion bridge must have the same compatible
+ * string.
+ */
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!of_device_is_compatible(companion, match->compatible)) {
+ DRM_DEV_ERROR(dev, "companion PXL2DPI is incompatible\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ p2d->companion = of_drm_find_bridge(companion);
+ if (!p2d->companion) {
+ ret = -EPROBE_DEFER;
+ DRM_DEV_DEBUG_DRIVER(p2d->dev,
+ "failed to find companion bridge: %d\n",
+ ret);
+ goto out;
+ }
+
+ companion_p2d = bridge_to_p2d(p2d->companion);
+
+ /*
+ * We need to work out if the sink is expecting us to function in
+ * dual-link mode. We do this by looking at the DT port nodes that
+ * the next bridges are connected to. If they are marked as expecting
+ * even pixels and odd pixels than we need to use the companion PXL2DPI.
+ */
+ port1 = of_graph_get_port_by_id(p2d->next_bridge->of_node, 1);
+ port2 = of_graph_get_port_by_id(companion_p2d->next_bridge->of_node, 1);
+ dual_link = drm_of_lvds_get_dual_link_pixel_order(port1, port2);
+ of_node_put(port1);
+ of_node_put(port2);
+
+ if (dual_link < 0) {
+ ret = dual_link;
+ DRM_DEV_ERROR(dev, "failed to get dual link pixel order: %d\n",
+ ret);
+ goto out;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev,
+ "dual-link configuration detected (companion bridge %pOF)\n",
+ companion);
+out:
+ of_node_put(companion);
+ return ret;
+}
+
+static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
+{
+ struct imx8qxp_pxl2dpi *p2d;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ p2d = devm_kzalloc(dev, sizeof(*p2d), GFP_KERNEL);
+ if (!p2d)
+ return -ENOMEM;
+
+ p2d->regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(p2d->regmap)) {
+ ret = PTR_ERR(p2d->regmap);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = imx_scu_get_handle(&p2d->ipc_handle);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get SCU ipc handle: %d\n",
+ ret);
+ return ret;
+ }
+
+ p2d->dev = dev;
+
+ ret = of_property_read_u32(np, "fsl,sc-resource", &p2d->sc_resource);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to get SC resource %d\n", ret);
+ return ret;
+ }
+
+ p2d->next_bridge = imx8qxp_pxl2dpi_find_next_bridge(p2d);
+ if (IS_ERR(p2d->next_bridge)) {
+ ret = PTR_ERR(p2d->next_bridge);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to find next bridge: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = imx8qxp_pxl2dpi_set_pixel_link_sel(p2d);
+ if (ret)
+ return ret;
+
+ ret = imx8qxp_pxl2dpi_parse_dt_companion(p2d);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, p2d);
+ pm_runtime_enable(dev);
+
+ p2d->bridge.driver_private = p2d;
+ p2d->bridge.funcs = &imx8qxp_pxl2dpi_bridge_funcs;
+ p2d->bridge.of_node = np;
+
+ drm_bridge_add(&p2d->bridge);
+
+ return ret;
+}
+
+static int imx8qxp_pxl2dpi_bridge_remove(struct platform_device *pdev)
+{
+ struct imx8qxp_pxl2dpi *p2d = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&p2d->bridge);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id imx8qxp_pxl2dpi_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-pxl2dpi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8qxp_pxl2dpi_dt_ids);
+
+static struct platform_driver imx8qxp_pxl2dpi_bridge_driver = {
+ .probe = imx8qxp_pxl2dpi_bridge_probe,
+ .remove = imx8qxp_pxl2dpi_bridge_remove,
+ .driver = {
+ .of_match_table = imx8qxp_pxl2dpi_dt_ids,
+ .name = DRIVER_NAME,
+ },
+};
+module_platform_driver(imx8qxp_pxl2dpi_bridge_driver);
+
+MODULE_DESCRIPTION("i.MX8QXP pixel link to DPI bridge driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 448c58e60c11..44278d54d35d 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -7,6 +7,7 @@
*
*/
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index c642d1e02b2f..28bad30dc4e5 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -7,10 +7,12 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
#include <linux/regmap.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -607,7 +609,6 @@ static int lt8912_parse_dt(struct lt8912 *lt)
int ret;
int data_lanes;
struct device_node *port_node;
- struct device_node *endpoint;
gp_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(gp_reset)) {
@@ -618,16 +619,12 @@ static int lt8912_parse_dt(struct lt8912 *lt)
}
lt->gp_reset = gp_reset;
- endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
- if (!endpoint)
- return -ENODEV;
-
- data_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
- of_node_put(endpoint);
+ data_lanes = drm_of_get_data_lanes_count_ep(dev->of_node, 0, -1, 1, 4);
if (data_lanes < 0) {
dev_err(lt->dev, "%s: Bad data-lanes property\n", __func__);
return data_lanes;
}
+
lt->data_lanes = data_lanes;
lt->host_node = of_graph_get_remote_node(dev->of_node, 0, -1);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index e92821fbc639..9a3e90427d12 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
@@ -686,7 +687,7 @@ static int lt9211_host_attach(struct lt9211 *ctx)
int ret;
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
- dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
+ dsi_lanes = drm_of_get_data_lanes_count(endpoint, 1, 4);
host_node = of_graph_get_remote_port_parent(endpoint);
host = of_find_mipi_dsi_host_by_node(host_node);
of_node_put(host_node);
@@ -695,8 +696,8 @@ static int lt9211_host_attach(struct lt9211 *ctx)
if (!host)
return -EPROBE_DEFER;
- if (dsi_lanes < 0 || dsi_lanes > 4)
- return -EINVAL;
+ if (dsi_lanes < 0)
+ return dsi_lanes;
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi))
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 7ef8fe5abc12..8a60e83482a0 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -5,7 +5,9 @@
*/
#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
@@ -578,15 +580,13 @@ static struct lt9611_mode *lt9611_find_mode(const struct drm_display_mode *mode)
}
/* connector funcs */
-static enum drm_connector_status
-lt9611_connector_detect(struct drm_connector *connector, bool force)
+static enum drm_connector_status __lt9611_detect(struct lt9611 *lt9611)
{
- struct lt9611 *lt9611 = connector_to_lt9611(connector);
unsigned int reg_val = 0;
int connected = 0;
regmap_read(lt9611->regmap, 0x825e, &reg_val);
- connected = (reg_val & BIT(0));
+ connected = (reg_val & (BIT(2) | BIT(0)));
lt9611->status = connected ? connector_status_connected :
connector_status_disconnected;
@@ -594,6 +594,12 @@ lt9611_connector_detect(struct drm_connector *connector, bool force)
return lt9611->status;
}
+static enum drm_connector_status
+lt9611_connector_detect(struct drm_connector *connector, bool force)
+{
+ return __lt9611_detect(connector_to_lt9611(connector));
+}
+
static int lt9611_read_edid(struct lt9611 *lt9611)
{
unsigned int temp;
@@ -893,17 +899,7 @@ static void lt9611_bridge_mode_set(struct drm_bridge *bridge,
static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
{
- struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- unsigned int reg_val = 0;
- int connected;
-
- regmap_read(lt9611->regmap, 0x825e, &reg_val);
- connected = reg_val & BIT(0);
-
- lt9611->status = connected ? connector_status_connected :
- connector_status_disconnected;
-
- return lt9611->status;
+ return __lt9611_detect(bridge_to_lt9611(bridge));
}
static struct edid *lt9611_bridge_get_edid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 3d62e6bf6892..fdf12d4c6416 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -6,6 +6,7 @@
#include <linux/firmware.h>
#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -982,7 +983,7 @@ static int lt9611uxc_remove(struct i2c_client *client)
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
disable_irq(client->irq);
- flush_scheduled_work();
+ cancel_work_sync(&lt9611uxc->work);
lt9611uxc_audio_exit(lt9611uxc);
drm_bridge_remove(&lt9611uxc->bridge);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index ad74e6558eb3..6dc2a4e191d7 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -12,6 +12,7 @@
#include <linux/irq.h>
#include <linux/math64.h>
#include <linux/mfd/syscon.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
@@ -665,6 +666,12 @@ static int nwl_dsi_mode_set(struct nwl_dsi *dsi)
return ret;
}
+ ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set DSI phy mode: %d\n", ret);
+ goto uninit_phy;
+ }
+
ret = phy_configure(dsi->phy, phy_cfg);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 0ee563eb2b6f..4277bf4f032b 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -171,6 +171,19 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
};
/**
+ * drm_bridge_is_panel - Checks if a drm_bridge is a panel_bridge.
+ *
+ * @bridge: The drm_bridge to be checked.
+ *
+ * Returns true if the bridge is a panel bridge, or false otherwise.
+ */
+bool drm_bridge_is_panel(const struct drm_bridge *bridge)
+{
+ return bridge->funcs == &panel_bridge_bridge_funcs;
+}
+EXPORT_SYMBOL(drm_bridge_is_panel);
+
+/**
* drm_panel_bridge_add - Creates a &drm_bridge and &drm_connector that
* just calls the appropriate functions from &drm_panel.
*
@@ -269,6 +282,27 @@ void drm_panel_bridge_remove(struct drm_bridge *bridge)
}
EXPORT_SYMBOL(drm_panel_bridge_remove);
+/**
+ * drm_panel_bridge_set_orientation - Set the connector's panel orientation
+ * from the bridge that can be transformed to panel bridge.
+ *
+ * @connector: The connector to be set panel orientation.
+ * @bridge: The drm_bridge to be transformed to panel bridge.
+ *
+ * Returns 0 on success, negative errno on failure.
+ */
+int drm_panel_bridge_set_orientation(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct panel_bridge *panel_bridge;
+
+ panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ return drm_connector_set_orientation_from_panel(connector,
+ panel_bridge->panel);
+}
+EXPORT_SYMBOL(drm_panel_bridge_set_orientation);
+
static void devm_drm_panel_bridge_release(struct device *dev, void *res)
{
struct drm_bridge **bridge = res;
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 37b308850b4e..b5750e5f71d7 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -324,11 +324,7 @@ error:
static int ps8622_backlight_update(struct backlight_device *bl)
{
struct ps8622_bridge *ps8622 = dev_get_drvdata(&bl->dev);
- int ret, brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
+ int ret, brightness = backlight_get_brightness(bl);
if (!ps8622->enabled)
return -EINVAL;
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index edb939b14c04..31e88cb39f8a 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -16,6 +16,7 @@
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -168,23 +169,35 @@ static bool ps8640_of_panel_on_aux_bus(struct device *dev)
return true;
}
-static int ps8640_ensure_hpd(struct ps8640 *ps_bridge)
+static int _ps8640_wait_hpd_asserted(struct ps8640 *ps_bridge, unsigned long wait_us)
{
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
- struct device *dev = &ps_bridge->page[PAGE2_TOP_CNTL]->dev;
int status;
- int ret;
/*
* Apparently something about the firmware in the chip signals that
* HPD goes high by reporting GPIO9 as high (even though HPD isn't
* actually connected to GPIO9).
*/
- ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
- status & PS_GPIO9, 20 * 1000, 200 * 1000);
+ return regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
+ status & PS_GPIO9, wait_us / 10, wait_us);
+}
- if (ret < 0)
- dev_warn(dev, "HPD didn't go high: %d\n", ret);
+static int ps8640_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
+{
+ struct ps8640 *ps_bridge = aux_to_ps8640(aux);
+ struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
+ int ret;
+
+ /*
+ * Note that this function is called by code that has already powered
+ * the panel. We have to power ourselves up but we don't need to worry
+ * about powering the panel.
+ */
+ pm_runtime_get_sync(dev);
+ ret = _ps8640_wait_hpd_asserted(ps_bridge, wait_us);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -323,9 +336,7 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
int ret;
pm_runtime_get_sync(dev);
- ret = ps8640_ensure_hpd(ps_bridge);
- if (!ret)
- ret = ps8640_aux_transfer_msg(aux, msg);
+ ret = ps8640_aux_transfer_msg(aux, msg);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -369,8 +380,8 @@ static int __maybe_unused ps8640_resume(struct device *dev)
* Mystery 200 ms delay for the "MCU to be ready". It's unclear if
* this is truly necessary since the MCU will already signal that
* things are "good to go" by signaling HPD on "gpio 9". See
- * ps8640_ensure_hpd(). For now we'll keep this mystery delay just in
- * case.
+ * _ps8640_wait_hpd_asserted(). For now we'll keep this mystery delay
+ * just in case.
*/
msleep(200);
@@ -406,7 +417,9 @@ static void ps8640_pre_enable(struct drm_bridge *bridge)
int ret;
pm_runtime_get_sync(dev);
- ps8640_ensure_hpd(ps_bridge);
+ ret = _ps8640_wait_hpd_asserted(ps_bridge, 200 * 1000);
+ if (ret < 0)
+ dev_warn(dev, "HPD didn't go high: %d\n", ret);
/*
* The Manufacturer Command Set (MCS) is a device dependent interface
@@ -537,12 +550,11 @@ static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.pre_enable = ps8640_pre_enable,
};
-static int ps8640_bridge_host_attach(struct device *dev, struct ps8640 *ps_bridge)
+static int ps8640_bridge_get_dsi_resources(struct device *dev, struct ps8640 *ps_bridge)
{
struct device_node *in_ep, *dsi_node;
struct mipi_dsi_device *dsi;
struct mipi_dsi_host *host;
- int ret;
const struct mipi_dsi_device_info info = { .type = "ps8640",
.channel = 0,
.node = NULL,
@@ -577,17 +589,40 @@ static int ps8640_bridge_host_attach(struct device *dev, struct ps8640 *ps_bridg
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = NUM_MIPI_LANES;
- ret = devm_mipi_dsi_attach(dev, dsi);
+ return 0;
+}
+
+static int ps8640_bridge_link_panel(struct drm_dp_aux *aux)
+{
+ struct ps8640 *ps_bridge = aux_to_ps8640(aux);
+ struct device *dev = aux->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ /*
+ * NOTE about returning -EPROBE_DEFER from this function: if we
+ * return an error (most relevant to -EPROBE_DEFER) it will only
+ * be passed out to ps8640_probe() if it called this directly (AKA the
+ * panel isn't under the "aux-bus" node). That should be fine because
+ * if the panel is under "aux-bus" it's guaranteed to have probed by
+ * the time this function has been called.
+ */
+
+ /* port@1 is ps8640 output port */
+ ps_bridge->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
+ if (IS_ERR(ps_bridge->panel_bridge))
+ return PTR_ERR(ps_bridge->panel_bridge);
+
+ ret = devm_drm_bridge_add(dev, &ps_bridge->bridge);
if (ret)
return ret;
- return 0;
+ return devm_mipi_dsi_attach(dev, ps_bridge->dsi);
}
static int ps8640_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- struct device_node *np = dev->of_node;
struct ps8640 *ps_bridge;
int ret;
u32 i;
@@ -628,6 +663,14 @@ static int ps8640_probe(struct i2c_client *client)
if (!ps8640_of_panel_on_aux_bus(&client->dev))
ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
+ /*
+ * Get MIPI DSI resources early. These can return -EPROBE_DEFER so
+ * we want to get them out of the way sooner.
+ */
+ ret = ps8640_bridge_get_dsi_resources(&client->dev, ps_bridge);
+ if (ret)
+ return ret;
+
ps_bridge->page[PAGE0_DP_CNTL] = client;
ps_bridge->regmap[PAGE0_DP_CNTL] = devm_regmap_init_i2c(client, ps8640_regmap_config);
@@ -652,6 +695,7 @@ static int ps8640_probe(struct i2c_client *client)
ps_bridge->aux.name = "parade-ps8640-aux";
ps_bridge->aux.dev = dev;
ps_bridge->aux.transfer = ps8640_aux_transfer;
+ ps_bridge->aux.wait_hpd_asserted = ps8640_wait_hpd_asserted;
drm_dp_aux_init(&ps_bridge->aux);
pm_runtime_enable(dev);
@@ -670,35 +714,19 @@ static int ps8640_probe(struct i2c_client *client)
if (ret)
return ret;
- devm_of_dp_aux_populate_ep_devices(&ps_bridge->aux);
-
- /* port@1 is ps8640 output port */
- ps_bridge->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
- if (IS_ERR(ps_bridge->panel_bridge))
- return PTR_ERR(ps_bridge->panel_bridge);
-
- drm_bridge_add(&ps_bridge->bridge);
+ ret = devm_of_dp_aux_populate_bus(&ps_bridge->aux, ps8640_bridge_link_panel);
- ret = ps8640_bridge_host_attach(dev, ps_bridge);
- if (ret)
- goto err_bridge_remove;
-
- return 0;
+ /*
+ * If devm_of_dp_aux_populate_bus() returns -ENODEV then it's up to
+ * usa to call ps8640_bridge_link_panel() directly. NOTE: in this case
+ * the function is allowed to -EPROBE_DEFER.
+ */
+ if (ret == -ENODEV)
+ return ps8640_bridge_link_panel(&ps_bridge->aux);
-err_bridge_remove:
- drm_bridge_remove(&ps_bridge->bridge);
return ret;
}
-static int ps8640_remove(struct i2c_client *client)
-{
- struct ps8640 *ps_bridge = i2c_get_clientdata(client);
-
- drm_bridge_remove(&ps_bridge->bridge);
-
- return 0;
-}
-
static const struct of_device_id ps8640_match[] = {
{ .compatible = "parade,ps8640" },
{ }
@@ -707,7 +735,6 @@ MODULE_DEVICE_TABLE(of, ps8640_match);
static struct i2c_driver ps8640_driver = {
.probe_new = ps8640_probe,
- .remove = ps8640_remove,
.driver = {
.name = "ps8640",
.of_match_table = ps8640_match,
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 65549fbfdc87..281f8a9ba4fd 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -15,6 +15,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c-mux.h>
#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index ec7745c31da0..ab0bce4a988c 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -605,7 +605,7 @@ static void *sii8620_burst_get_tx_buf(struct sii8620 *ctx, int len)
u8 *buf = &ctx->burst.tx_buf[ctx->burst.tx_count];
int size = len + 2;
- if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+ if (ctx->burst.tx_count + size >= ARRAY_SIZE(ctx->burst.tx_buf)) {
dev_err(ctx->dev, "TX-BLK buffer exhausted\n");
ctx->error = -EINVAL;
return NULL;
@@ -622,7 +622,7 @@ static u8 *sii8620_burst_get_rx_buf(struct sii8620 *ctx, int len)
u8 *buf = &ctx->burst.rx_buf[ctx->burst.rx_count];
int size = len + 1;
- if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+ if (ctx->burst.rx_count + size >= ARRAY_SIZE(ctx->burst.rx_buf)) {
dev_err(ctx->dev, "RX-BLK buffer exhausted\n");
ctx->error = -EINVAL;
return NULL;
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index d974282c12b2..2c5c5211bdab 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -15,6 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 3e1be9894ed1..25a60eb4d67c 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
+#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index dca41ed32f8a..fdfb14aca926 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 485717c8f0b4..02bd757a8987 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -3,10 +3,7 @@
* TC358767/TC358867/TC9595 DSI/DPI-to-DPI/(e)DP bridge driver
*
* The TC358767/TC358867/TC9595 can operate in multiple modes.
- * The following modes are supported:
- * DPI->(e)DP -- supported
- * DSI->DPI .... supported
- * DSI->(e)DP .. NOT supported
+ * All modes are supported -- DPI->(e)DP / DSI->DPI / DSI->(e)DP .
*
* Copyright (C) 2016 CogentEmbedded Inc
* Author: Andrey Gusakov <andrey.gusakov@cogentembedded.com>
@@ -27,6 +24,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -291,7 +289,6 @@ struct tc_data {
struct drm_connector connector;
struct mipi_dsi_device *dsi;
- u8 dsi_lanes;
/* link settings */
struct tc_edp_link link;
@@ -309,6 +306,9 @@ struct tc_data {
/* do we have IRQ */
bool have_irq;
+ /* Input connector type, DSI and not DPI. */
+ bool input_connector_dsi;
+
/* HPD pin number (0 or 1) or -ENODEV */
int hpd_pin;
};
@@ -1247,11 +1247,60 @@ static int tc_main_link_disable(struct tc_data *tc)
return regmap_write(tc->regmap, DP0CTL, 0);
}
-static int tc_dpi_stream_enable(struct tc_data *tc)
+static int tc_dsi_rx_enable(struct tc_data *tc)
{
+ u32 value;
int ret;
+
+ regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 3);
+ regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 3);
+ regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 3);
+ regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 3);
+ regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
+ regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
+ regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
+ regmap_write(tc->regmap, PPI_LPTXTIMECNT, LPX_PERIOD);
+
+ value = ((LANEENABLE_L0EN << tc->dsi->lanes) - LANEENABLE_L0EN) |
+ LANEENABLE_CLEN;
+ regmap_write(tc->regmap, PPI_LANEENABLE, value);
+ regmap_write(tc->regmap, DSI_LANEENABLE, value);
+
+ /* Set input interface */
+ value = DP0_AUDSRC_NO_INPUT;
+ if (tc_test_pattern)
+ value |= DP0_VIDSRC_COLOR_BAR;
+ else
+ value |= DP0_VIDSRC_DSI_RX;
+ ret = regmap_write(tc->regmap, SYSCTRL, value);
+ if (ret)
+ return ret;
+
+ usleep_range(120, 150);
+
+ regmap_write(tc->regmap, PPI_STARTPPI, PPI_START_FUNCTION);
+ regmap_write(tc->regmap, DSI_STARTDSI, DSI_RX_START);
+
+ return 0;
+}
+
+static int tc_dpi_rx_enable(struct tc_data *tc)
+{
u32 value;
+ /* Set input interface */
+ value = DP0_AUDSRC_NO_INPUT;
+ if (tc_test_pattern)
+ value |= DP0_VIDSRC_COLOR_BAR;
+ else
+ value |= DP0_VIDSRC_DPI_RX;
+ return regmap_write(tc->regmap, SYSCTRL, value);
+}
+
+static int tc_dpi_stream_enable(struct tc_data *tc)
+{
+ int ret;
+
dev_dbg(tc->dev, "enable video stream\n");
/* Setup PLL */
@@ -1277,20 +1326,6 @@ static int tc_dpi_stream_enable(struct tc_data *tc)
if (ret)
return ret;
- regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 3);
- regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 3);
- regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 3);
- regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 3);
- regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
- regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
- regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
- regmap_write(tc->regmap, PPI_LPTXTIMECNT, LPX_PERIOD);
-
- value = ((LANEENABLE_L0EN << tc->dsi_lanes) - LANEENABLE_L0EN) |
- LANEENABLE_CLEN;
- regmap_write(tc->regmap, PPI_LANEENABLE, value);
- regmap_write(tc->regmap, DSI_LANEENABLE, value);
-
ret = tc_set_common_video_mode(tc, &tc->mode);
if (ret)
return ret;
@@ -1299,22 +1334,7 @@ static int tc_dpi_stream_enable(struct tc_data *tc)
if (ret)
return ret;
- /* Set input interface */
- value = DP0_AUDSRC_NO_INPUT;
- if (tc_test_pattern)
- value |= DP0_VIDSRC_COLOR_BAR;
- else
- value |= DP0_VIDSRC_DSI_RX;
- ret = regmap_write(tc->regmap, SYSCTRL, value);
- if (ret)
- return ret;
-
- usleep_range(120, 150);
-
- regmap_write(tc->regmap, PPI_STARTPPI, PPI_START_FUNCTION);
- regmap_write(tc->regmap, DSI_STARTDSI, DSI_RX_START);
-
- return 0;
+ return tc_dsi_rx_enable(tc);
}
static int tc_dpi_stream_disable(struct tc_data *tc)
@@ -1333,8 +1353,18 @@ static int tc_edp_stream_enable(struct tc_data *tc)
dev_dbg(tc->dev, "enable video stream\n");
- /* PXL PLL setup */
- if (tc_test_pattern) {
+ /*
+ * Pixel PLL must be enabled for DSI input mode and test pattern.
+ *
+ * Per TC9595XBG datasheet Revision 0.1 2018-12-27 Figure 4.18
+ * "Clock Mode Selection and Clock Sources", either Pixel PLL
+ * or DPI_PCLK supplies StrmClk. DPI_PCLK is only available in
+ * case valid Pixel Clock are supplied to the chip DPI input.
+ * In case built-in test pattern is desired OR DSI input mode
+ * is used, DPI_PCLK is not available and thus Pixel PLL must
+ * be used instead.
+ */
+ if (tc->input_connector_dsi || tc_test_pattern) {
ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
1000 * tc->mode.clock);
if (ret)
@@ -1372,17 +1402,12 @@ static int tc_edp_stream_enable(struct tc_data *tc)
ret = regmap_write(tc->regmap, DP0CTL, value);
if (ret)
return ret;
+
/* Set input interface */
- value = DP0_AUDSRC_NO_INPUT;
- if (tc_test_pattern)
- value |= DP0_VIDSRC_COLOR_BAR;
+ if (tc->input_connector_dsi)
+ return tc_dsi_rx_enable(tc);
else
- value |= DP0_VIDSRC_DPI_RX;
- ret = regmap_write(tc->regmap, SYSCTRL, value);
- if (ret)
- return ret;
-
- return 0;
+ return tc_dpi_rx_enable(tc);
}
static int tc_edp_stream_disable(struct tc_data *tc)
@@ -1865,18 +1890,18 @@ static int tc_mipi_dsi_host_attach(struct tc_data *tc)
int dsi_lanes, ret;
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
- dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
+ dsi_lanes = drm_of_get_data_lanes_count(endpoint, 1, 4);
host_node = of_graph_get_remote_port_parent(endpoint);
host = of_find_mipi_dsi_host_by_node(host_node);
of_node_put(host_node);
of_node_put(endpoint);
- if (dsi_lanes < 0 || dsi_lanes > 4)
- return -EINVAL;
-
if (!host)
return -EPROBE_DEFER;
+ if (dsi_lanes < 0)
+ return dsi_lanes;
+
dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dsi))
return dev_err_probe(dev, PTR_ERR(dsi),
@@ -1884,8 +1909,7 @@ static int tc_mipi_dsi_host_attach(struct tc_data *tc)
tc->dsi = dsi;
- tc->dsi_lanes = dsi_lanes;
- dsi->lanes = tc->dsi_lanes;
+ dsi->lanes = dsi_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
@@ -1992,18 +2016,29 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
mode |= BIT(endpoint.port);
}
- if (mode == mode_dpi_to_edp || mode == mode_dpi_to_dp)
+ if (mode == mode_dpi_to_edp || mode == mode_dpi_to_dp) {
+ tc->input_connector_dsi = false;
return tc_probe_edp_bridge_endpoint(tc);
- else if (mode == mode_dsi_to_dpi)
+ } else if (mode == mode_dsi_to_dpi) {
+ tc->input_connector_dsi = true;
return tc_probe_dpi_bridge_endpoint(tc);
- else if (mode == mode_dsi_to_edp || mode == mode_dsi_to_dp)
- dev_warn(dev, "The mode DSI-to-(e)DP is not supported!\n");
- else
- dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
+ } else if (mode == mode_dsi_to_edp || mode == mode_dsi_to_dp) {
+ tc->input_connector_dsi = true;
+ return tc_probe_edp_bridge_endpoint(tc);
+ }
+
+ dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
return -EINVAL;
}
+static void tc_clk_disable(void *data)
+{
+ struct clk *refclk = data;
+
+ clk_disable_unprepare(refclk);
+}
+
static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
@@ -2020,6 +2055,24 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (ret)
return ret;
+ tc->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(tc->refclk)) {
+ ret = PTR_ERR(tc->refclk);
+ dev_err(dev, "Failed to get refclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(tc->refclk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, tc_clk_disable, tc->refclk);
+ if (ret)
+ return ret;
+
+ /* tRSTW = 100 cycles , at 13 MHz that is ~7.69 us */
+ usleep_range(10, 15);
+
/* Shut down GPIO is optional */
tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
if (IS_ERR(tc->sd_gpio))
@@ -2040,13 +2093,6 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
usleep_range(5000, 10000);
}
- tc->refclk = devm_clk_get(dev, "ref");
- if (IS_ERR(tc->refclk)) {
- ret = PTR_ERR(tc->refclk);
- dev_err(dev, "Failed to get refclk: %d\n", ret);
- return ret;
- }
-
tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config);
if (IS_ERR(tc->regmap)) {
ret = PTR_ERR(tc->regmap);
@@ -2137,7 +2183,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, tc);
- if (tc->bridge.type == DRM_MODE_CONNECTOR_DPI) { /* DPI output */
+ if (tc->input_connector_dsi) { /* DSI input */
ret = tc_mipi_dsi_host_attach(tc);
if (ret) {
drm_bridge_remove(&tc->bridge);
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 62a7ef352daa..f1c6e62b0e1d 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -13,6 +13,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -339,6 +340,7 @@ static void d2l_read(struct i2c_client *i2c, u16 addr, u32 *val)
goto fail;
pr_debug("d2l: I2C : addr:%04x value:%08x\n", addr, *val);
+ return;
fail:
dev_err(&i2c->dev, "Error %d reading from subaddress 0x%x\n",
@@ -429,7 +431,7 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
val = TC358775_VPCTRL_MSF(1);
dsiclk = mode->crtc_clock * 3 * tc->bpc / tc->num_dsi_lanes / 1000;
- clkdiv = dsiclk / DIVIDE_BY_3 * tc->lvds_link;
+ clkdiv = dsiclk / (tc->lvds_link == DUAL_LINK ? DIVIDE_BY_6 : DIVIDE_BY_3);
byteclk = dsiclk / 4;
t1 = hactive * (tc->bpc * 3 / 8) / tc->num_dsi_lanes;
t2 = ((100000 / clkdiv)) * (hactive + hback_porch + hsync_len + hfront_porch) / 1000;
@@ -529,8 +531,7 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
struct device_node *endpoint;
struct device_node *parent;
struct device_node *remote;
- struct property *prop;
- int len = 0;
+ int dsi_lanes = -1;
/*
* To get the data-lanes of dsi, we need to access the dsi0_out of port1
@@ -544,25 +545,15 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
of_node_put(endpoint);
if (parent) {
/* dsi0 port 1 */
- endpoint = of_graph_get_endpoint_by_regs(parent, 1, -1);
+ dsi_lanes = drm_of_get_data_lanes_count_ep(parent, 1, -1, 1, 4);
of_node_put(parent);
- if (endpoint) {
- prop = of_find_property(endpoint, "data-lanes",
- &len);
- of_node_put(endpoint);
- if (!prop) {
- dev_err(tc->dev,
- "failed to find data lane\n");
- return -EPROBE_DEFER;
- }
- }
}
}
- tc->num_dsi_lanes = len / sizeof(u32);
+ if (dsi_lanes < 0)
+ return dsi_lanes;
- if (tc->num_dsi_lanes < 1 || tc->num_dsi_lanes > 4)
- return -EINVAL;
+ tc->num_dsi_lanes = dsi_lanes;
tc->host_node = of_graph_get_remote_node(np, 0, 0);
if (!tc->host_node)
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
new file mode 100644
index 000000000000..cef454862b67
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2021 RenewOutReach
+ * Copyright (C) 2021 Amarula Solutions(India)
+ *
+ * Author:
+ * Jagan Teki <jagan@amarulasolutions.com>
+ * Christopher Vollo <chris@renewoutreach.org>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+enum cmd_registers {
+ WR_INPUT_SOURCE = 0x05, /* Write Input Source Select */
+ WR_EXT_SOURCE_FMT = 0x07, /* Write External Video Source Format */
+ WR_IMAGE_CROP = 0x10, /* Write Image Crop */
+ WR_DISPLAY_SIZE = 0x12, /* Write Display Size */
+ WR_IMAGE_FREEZE = 0x1A, /* Write Image Freeze */
+ WR_INPUT_IMAGE_SIZE = 0x2E, /* Write External Input Image Size */
+ WR_RGB_LED_EN = 0x52, /* Write RGB LED Enable */
+ WR_RGB_LED_CURRENT = 0x54, /* Write RGB LED Current */
+ WR_RGB_LED_MAX_CURRENT = 0x5C, /* Write RGB LED Max Current */
+ WR_DSI_HS_CLK = 0xBD, /* Write DSI HS Clock */
+ RD_DEVICE_ID = 0xD4, /* Read Controller Device ID */
+ WR_DSI_PORT_EN = 0xD7, /* Write DSI Port Enable */
+};
+
+enum input_source {
+ INPUT_EXTERNAL_VIDEO = 0,
+ INPUT_TEST_PATTERN,
+ INPUT_SPLASH_SCREEN,
+};
+
+#define DEV_ID_MASK GENMASK(3, 0)
+#define IMAGE_FREESE_EN BIT(0)
+#define DSI_PORT_EN 0
+#define EXT_SOURCE_FMT_DSI 0
+#define RED_LED_EN BIT(0)
+#define GREEN_LED_EN BIT(1)
+#define BLUE_LED_EN BIT(2)
+#define LED_MASK GENMASK(2, 0)
+#define MAX_BYTE_SIZE 8
+
+struct dlpc {
+ struct device *dev;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ struct drm_display_mode mode;
+
+ struct gpio_desc *enable_gpio;
+ struct regulator *vcc_intf;
+ struct regulator *vcc_flsh;
+ struct regmap *regmap;
+ unsigned int dsi_lanes;
+};
+
+static inline struct dlpc *bridge_to_dlpc(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct dlpc, bridge);
+}
+
+static bool dlpc_writeable_noinc_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WR_IMAGE_CROP:
+ case WR_DISPLAY_SIZE:
+ case WR_INPUT_IMAGE_SIZE:
+ case WR_DSI_HS_CLK:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_range dlpc_volatile_ranges[] = {
+ { .range_min = 0x10, .range_max = 0xBF },
+};
+
+static const struct regmap_access_table dlpc_volatile_table = {
+ .yes_ranges = dlpc_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dlpc_volatile_ranges),
+};
+
+static struct regmap_config dlpc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = WR_DSI_PORT_EN,
+ .writeable_noinc_reg = dlpc_writeable_noinc_reg,
+ .volatile_table = &dlpc_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
+ .name = "dlpc3433",
+};
+
+static void dlpc_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dlpc *dlpc = bridge_to_dlpc(bridge);
+ struct device *dev = dlpc->dev;
+ struct drm_display_mode *mode = &dlpc->mode;
+ struct regmap *regmap = dlpc->regmap;
+ char buf[MAX_BYTE_SIZE];
+ unsigned int devid;
+
+ regmap_read(regmap, RD_DEVICE_ID, &devid);
+ devid &= DEV_ID_MASK;
+
+ DRM_DEV_DEBUG(dev, "DLPC3433 device id: 0x%02x\n", devid);
+
+ if (devid != 0x01) {
+ DRM_DEV_ERROR(dev, "Unsupported DLPC device id: 0x%02x\n", devid);
+ return;
+ }
+
+ /* disable image freeze */
+ regmap_write(regmap, WR_IMAGE_FREEZE, IMAGE_FREESE_EN);
+
+ /* enable DSI port */
+ regmap_write(regmap, WR_DSI_PORT_EN, DSI_PORT_EN);
+
+ memset(buf, 0, MAX_BYTE_SIZE);
+
+ /* set image crop */
+ buf[4] = mode->hdisplay & 0xff;
+ buf[5] = (mode->hdisplay & 0xff00) >> 8;
+ buf[6] = mode->vdisplay & 0xff;
+ buf[7] = (mode->vdisplay & 0xff00) >> 8;
+ regmap_noinc_write(regmap, WR_IMAGE_CROP, buf, MAX_BYTE_SIZE);
+
+ /* set display size */
+ buf[4] = mode->hdisplay & 0xff;
+ buf[5] = (mode->hdisplay & 0xff00) >> 8;
+ buf[6] = mode->vdisplay & 0xff;
+ buf[7] = (mode->vdisplay & 0xff00) >> 8;
+ regmap_noinc_write(regmap, WR_DISPLAY_SIZE, buf, MAX_BYTE_SIZE);
+
+ /* set input image size */
+ buf[0] = mode->hdisplay & 0xff;
+ buf[1] = (mode->hdisplay & 0xff00) >> 8;
+ buf[2] = mode->vdisplay & 0xff;
+ buf[3] = (mode->vdisplay & 0xff00) >> 8;
+ regmap_noinc_write(regmap, WR_INPUT_IMAGE_SIZE, buf, 4);
+
+ /* set external video port */
+ regmap_write(regmap, WR_INPUT_SOURCE, INPUT_EXTERNAL_VIDEO);
+
+ /* set external video format select as DSI */
+ regmap_write(regmap, WR_EXT_SOURCE_FMT, EXT_SOURCE_FMT_DSI);
+
+ /* disable image freeze */
+ regmap_write(regmap, WR_IMAGE_FREEZE, 0x00);
+
+ /* enable RGB led */
+ regmap_update_bits(regmap, WR_RGB_LED_EN, LED_MASK,
+ RED_LED_EN | GREEN_LED_EN | BLUE_LED_EN);
+
+ msleep(10);
+}
+
+static void dlpc_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dlpc *dlpc = bridge_to_dlpc(bridge);
+ int ret;
+
+ gpiod_set_value(dlpc->enable_gpio, 1);
+
+ msleep(500);
+
+ ret = regulator_enable(dlpc->vcc_intf);
+ if (ret)
+ DRM_DEV_ERROR(dlpc->dev,
+ "failed to enable VCC_INTF regulator: %d\n", ret);
+
+ ret = regulator_enable(dlpc->vcc_flsh);
+ if (ret)
+ DRM_DEV_ERROR(dlpc->dev,
+ "failed to enable VCC_FLSH regulator: %d\n", ret);
+
+ msleep(10);
+}
+
+static void dlpc_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dlpc *dlpc = bridge_to_dlpc(bridge);
+
+ regulator_disable(dlpc->vcc_flsh);
+ regulator_disable(dlpc->vcc_intf);
+
+ msleep(10);
+
+ gpiod_set_value(dlpc->enable_gpio, 0);
+
+ msleep(500);
+}
+
+#define MAX_INPUT_SEL_FORMATS 1
+
+static u32 *
+dlpc_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ /* This is the DSI-end bus format */
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static void dlpc_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dlpc *dlpc = bridge_to_dlpc(bridge);
+
+ drm_mode_copy(&dlpc->mode, adjusted_mode);
+}
+
+static int dlpc_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dlpc *dlpc = bridge_to_dlpc(bridge);
+
+ return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags);
+}
+
+static const struct drm_bridge_funcs dlpc_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = dlpc_atomic_get_input_bus_fmts,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = dlpc_atomic_pre_enable,
+ .atomic_enable = dlpc_atomic_enable,
+ .atomic_post_disable = dlpc_atomic_post_disable,
+ .mode_set = dlpc_mode_set,
+ .attach = dlpc_attach,
+};
+
+static int dlpc3433_parse_dt(struct dlpc *dlpc)
+{
+ struct device *dev = dlpc->dev;
+ struct device_node *endpoint;
+ int ret;
+
+ dlpc->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(dlpc->enable_gpio))
+ return PTR_ERR(dlpc->enable_gpio);
+
+ dlpc->vcc_intf = devm_regulator_get(dlpc->dev, "vcc_intf");
+ if (IS_ERR(dlpc->vcc_intf))
+ return dev_err_probe(dev, PTR_ERR(dlpc->vcc_intf),
+ "failed to get VCC_INTF supply\n");
+
+ dlpc->vcc_flsh = devm_regulator_get(dlpc->dev, "vcc_flsh");
+ if (IS_ERR(dlpc->vcc_flsh))
+ return dev_err_probe(dev, PTR_ERR(dlpc->vcc_flsh),
+ "failed to get VCC_FLSH supply\n");
+
+ dlpc->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+ if (IS_ERR(dlpc->next_bridge))
+ return PTR_ERR(dlpc->next_bridge);
+
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+ dlpc->dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
+ if (dlpc->dsi_lanes < 0 || dlpc->dsi_lanes > 4) {
+ ret = -EINVAL;
+ goto err_put_endpoint;
+ }
+
+ dlpc->host_node = of_graph_get_remote_port_parent(endpoint);
+ if (!dlpc->host_node) {
+ ret = -ENODEV;
+ goto err_put_host;
+ }
+
+ of_node_put(endpoint);
+
+ return 0;
+
+err_put_host:
+ of_node_put(dlpc->host_node);
+err_put_endpoint:
+ of_node_put(endpoint);
+ return ret;
+}
+
+static int dlpc_host_attach(struct dlpc *dlpc)
+{
+ struct device *dev = dlpc->dev;
+ struct mipi_dsi_host *host;
+ struct mipi_dsi_device_info info = {
+ .type = "dlpc3433",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ host = of_find_mipi_dsi_host_by_node(dlpc->host_node);
+ if (!host) {
+ DRM_DEV_ERROR(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ dlpc->dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dlpc->dsi)) {
+ DRM_DEV_ERROR(dev, "failed to create dsi device\n");
+ return PTR_ERR(dlpc->dsi);
+ }
+
+ dlpc->dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST;
+ dlpc->dsi->format = MIPI_DSI_FMT_RGB565;
+ dlpc->dsi->lanes = dlpc->dsi_lanes;
+
+ return devm_mipi_dsi_attach(dev, dlpc->dsi);
+}
+
+static int dlpc3433_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct dlpc *dlpc;
+ int ret;
+
+ dlpc = devm_kzalloc(dev, sizeof(*dlpc), GFP_KERNEL);
+ if (!dlpc)
+ return -ENOMEM;
+
+ dlpc->dev = dev;
+
+ dlpc->regmap = devm_regmap_init_i2c(client, &dlpc_regmap_config);
+ if (IS_ERR(dlpc->regmap))
+ return PTR_ERR(dlpc->regmap);
+
+ ret = dlpc3433_parse_dt(dlpc);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, dlpc);
+ i2c_set_clientdata(client, dlpc);
+
+ dlpc->bridge.funcs = &dlpc_bridge_funcs;
+ dlpc->bridge.of_node = dev->of_node;
+ drm_bridge_add(&dlpc->bridge);
+
+ ret = dlpc_host_attach(dlpc);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
+ goto err_remove_bridge;
+ }
+
+ return 0;
+
+err_remove_bridge:
+ drm_bridge_remove(&dlpc->bridge);
+ return ret;
+}
+
+static int dlpc3433_remove(struct i2c_client *client)
+{
+ struct dlpc *dlpc = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&dlpc->bridge);
+ of_node_put(dlpc->host_node);
+
+ return 0;
+}
+
+static const struct i2c_device_id dlpc3433_id[] = {
+ { "ti,dlpc3433", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, dlpc3433_id);
+
+static const struct of_device_id dlpc3433_match_table[] = {
+ { .compatible = "ti,dlpc3433" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dlpc3433_match_table);
+
+static struct i2c_driver dlpc3433_driver = {
+ .probe_new = dlpc3433_probe,
+ .remove = dlpc3433_remove,
+ .id_table = dlpc3433_id,
+ .driver = {
+ .name = "ti-dlpc3433",
+ .of_match_table = dlpc3433_match_table,
+ },
+};
+module_i2c_driver(dlpc3433_driver);
+
+MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
+MODULE_AUTHOR("Christopher Vollo <chris@renewoutreach.org>");
+MODULE_DESCRIPTION("TI DLPC3433 MIPI DSI Display Controller Bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 2831f0813c3a..14e7aa77e758 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -29,6 +29,7 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
@@ -140,12 +141,10 @@ struct sn65dsi83 {
struct drm_bridge bridge;
struct device *dev;
struct regmap *regmap;
- struct device_node *host_node;
struct mipi_dsi_device *dsi;
struct drm_bridge *panel_bridge;
struct gpio_desc *enable_gpio;
struct regulator *vcc;
- int dsi_lanes;
bool lvds_dual_link;
bool lvds_dual_link_even_odd_swap;
};
@@ -306,7 +305,7 @@ static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx,
*/
return DIV_ROUND_UP(clamp((unsigned int)mode->clock *
mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) /
- ctx->dsi_lanes / 2, 40000U, 500000U), 5000U);
+ ctx->dsi->lanes / 2, 40000U, 500000U), 5000U);
}
static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
@@ -314,7 +313,7 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
/* The divider is (DSI_CLK / LVDS_CLK) - 1, which really is: */
unsigned int dsi_div = mipi_dsi_pixel_format_to_bpp(ctx->dsi->format);
- dsi_div /= ctx->dsi_lanes;
+ dsi_div /= ctx->dsi->lanes;
if (!ctx->lvds_dual_link)
dsi_div /= 2;
@@ -346,7 +345,7 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
}
/* Deassert reset */
- gpiod_set_value(ctx->enable_gpio, 1);
+ gpiod_set_value_cansleep(ctx->enable_gpio, 1);
usleep_range(1000, 1100);
/* Get the LVDS format from the bridge state. */
@@ -405,7 +404,7 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
/* Set number of DSI lanes and LVDS link config. */
regmap_write(ctx->regmap, REG_DSI_LANE,
REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE |
- REG_DSI_LANE_CHA_DSI_LANES(~(ctx->dsi_lanes - 1)) |
+ REG_DSI_LANE_CHA_DSI_LANES(~(ctx->dsi->lanes - 1)) |
/* CHB is DSI85-only, set to default on DSI83/DSI84 */
REG_DSI_LANE_CHB_DSI_LANES(3));
/* No equalization. */
@@ -502,7 +501,7 @@ static void sn65dsi83_atomic_disable(struct drm_bridge *bridge,
int ret;
/* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */
- gpiod_set_value(ctx->enable_gpio, 0);
+ gpiod_set_value_cansleep(ctx->enable_gpio, 0);
usleep_range(10000, 11000);
ret = regulator_disable(ctx->vcc);
@@ -569,22 +568,6 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
{
struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
- struct device_node *endpoint;
- int ret;
-
- endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
- ctx->dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
- ctx->host_node = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
-
- if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) {
- ret = -EINVAL;
- goto err_put_node;
- }
- if (!ctx->host_node) {
- ret = -ENODEV;
- goto err_put_node;
- }
ctx->lvds_dual_link = false;
ctx->lvds_dual_link_even_odd_swap = false;
@@ -610,10 +593,8 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
}
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0);
- if (IS_ERR(panel_bridge)) {
- ret = PTR_ERR(panel_bridge);
- goto err_put_node;
- }
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
ctx->panel_bridge = panel_bridge;
@@ -623,15 +604,13 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
"Failed to get supply 'vcc'\n");
return 0;
-
-err_put_node:
- of_node_put(ctx->host_node);
- return ret;
}
static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
{
struct device *dev = ctx->dev;
+ struct device_node *host_node;
+ struct device_node *endpoint;
struct mipi_dsi_device *dsi;
struct mipi_dsi_host *host;
const struct mipi_dsi_device_info info = {
@@ -639,13 +618,20 @@ static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
.channel = 0,
.node = NULL,
};
- int ret;
+ int dsi_lanes, ret;
+
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
+ dsi_lanes = drm_of_get_data_lanes_count(endpoint, 1, 4);
+ host_node = of_graph_get_remote_port_parent(endpoint);
+ host = of_find_mipi_dsi_host_by_node(host_node);
+ of_node_put(host_node);
+ of_node_put(endpoint);
- host = of_find_mipi_dsi_host_by_node(ctx->host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
+ if (!host)
return -EPROBE_DEFER;
- }
+
+ if (dsi_lanes < 0)
+ return dsi_lanes;
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi))
@@ -654,7 +640,7 @@ static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
ctx->dsi = dsi;
- dsi->lanes = ctx->dsi_lanes;
+ dsi->lanes = dsi_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
@@ -692,7 +678,7 @@ static int sn65dsi83_probe(struct i2c_client *client,
ctx->enable_gpio = devm_gpiod_get_optional(ctx->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(ctx->enable_gpio))
- return PTR_ERR(ctx->enable_gpio);
+ return dev_err_probe(dev, PTR_ERR(ctx->enable_gpio), "failed to get enable GPIO\n");
usleep_range(10000, 11000);
@@ -701,10 +687,8 @@ static int sn65dsi83_probe(struct i2c_client *client,
return ret;
ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config);
- if (IS_ERR(ctx->regmap)) {
- ret = PTR_ERR(ctx->regmap);
- goto err_put_node;
- }
+ if (IS_ERR(ctx->regmap))
+ return dev_err_probe(dev, PTR_ERR(ctx->regmap), "failed to get regmap\n");
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
@@ -721,8 +705,6 @@ static int sn65dsi83_probe(struct i2c_client *client,
err_remove_bridge:
drm_bridge_remove(&ctx->bridge);
-err_put_node:
- of_node_put(ctx->host_node);
return ret;
}
@@ -731,7 +713,6 @@ static int sn65dsi83_remove(struct i2c_client *client)
struct sn65dsi83 *ctx = i2c_get_clientdata(client);
drm_bridge_remove(&ctx->bridge);
- of_node_put(ctx->host_node);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 8cad662de9bb..d6dd4d99a229 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -752,7 +752,8 @@ ti_sn_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void ti_sn_bridge_disable(struct drm_bridge *bridge)
+static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1011,7 +1012,8 @@ exit:
return ret;
}
-static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
const char *last_err_str = "No supported DP rate";
@@ -1080,7 +1082,8 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
VSTREAM_ENABLE);
}
-static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge)
+static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1093,7 +1096,8 @@ static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge)
usleep_range(100, 110);
}
-static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
+static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1114,10 +1118,13 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
.mode_valid = ti_sn_bridge_mode_valid,
- .pre_enable = ti_sn_bridge_pre_enable,
- .enable = ti_sn_bridge_enable,
- .disable = ti_sn_bridge_disable,
- .post_disable = ti_sn_bridge_post_disable,
+ .atomic_pre_enable = ti_sn_bridge_atomic_pre_enable,
+ .atomic_enable = ti_sn_bridge_atomic_enable,
+ .atomic_disable = ti_sn_bridge_atomic_disable,
+ .atomic_post_disable = ti_sn_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
};
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
@@ -1142,8 +1149,8 @@ static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
* mappings that the hardware supports.
*/
endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
- dp_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
- if (dp_lanes > 0 && dp_lanes <= SN_MAX_DP_LANES) {
+ dp_lanes = drm_of_get_data_lanes_count(endpoint, 1, SN_MAX_DP_LANES);
+ if (dp_lanes > 0) {
of_property_read_u32_array(endpoint, "data-lanes",
lane_assignments, dp_lanes);
of_property_read_u32_array(endpoint, "lane-polarities",
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 756b3e6e776b..401fe61217c7 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -6,6 +6,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
@@ -14,6 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 1b6e6af37546..09712b88a5b8 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -3,7 +3,7 @@
config DRM_DP_AUX_BUS
tristate
depends on DRM
- depends on OF
+ depends on OF || COMPILE_TEST
config DRM_DISPLAY_HELPER
tristate
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index dccf3e2ea323..f5741b45ca07 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -3,10 +3,10 @@
* Copyright 2021 Google Inc.
*
* The DP AUX bus is used for devices that are connected over a DisplayPort
- * AUX bus. The devices on the far side of the bus are referred to as
- * endpoints in this code.
+ * AUX bus. The device on the far side of the bus is referred to as an
+ * endpoint in this code.
*
- * Commonly there is only one device connected to the DP AUX bus: a panel.
+ * There is only one device connected to the DP AUX bus: an eDP panel.
* Though historically panels (even DP panels) have been modeled as simple
* platform devices, putting them under the DP AUX bus allows the panel driver
* to perform transactions on that bus.
@@ -22,6 +22,11 @@
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
+struct dp_aux_ep_device_with_data {
+ struct dp_aux_ep_device aux_ep;
+ int (*done_probing)(struct drm_dp_aux *aux);
+};
+
/**
* dp_aux_ep_match() - The match function for the dp_aux_bus.
* @dev: The device to match.
@@ -48,6 +53,8 @@ static int dp_aux_ep_probe(struct device *dev)
{
struct dp_aux_ep_driver *aux_ep_drv = to_dp_aux_ep_drv(dev->driver);
struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev);
+ struct dp_aux_ep_device_with_data *aux_ep_with_data =
+ container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep);
int ret;
ret = dev_pm_domain_attach(dev, true);
@@ -56,7 +63,32 @@ static int dp_aux_ep_probe(struct device *dev)
ret = aux_ep_drv->probe(aux_ep);
if (ret)
- dev_pm_domain_detach(dev, true);
+ goto err_attached;
+
+ if (aux_ep_with_data->done_probing) {
+ ret = aux_ep_with_data->done_probing(aux_ep->aux);
+ if (ret) {
+ /*
+ * The done_probing() callback should not return
+ * -EPROBE_DEFER to us. If it does, we treat it as an
+ * error. Passing it on as-is would cause the _panel_
+ * to defer.
+ */
+ if (ret == -EPROBE_DEFER) {
+ dev_err(dev,
+ "DP AUX done_probing() can't defer\n");
+ ret = -EINVAL;
+ }
+ goto err_probed;
+ }
+ }
+
+ return 0;
+err_probed:
+ if (aux_ep_drv->remove)
+ aux_ep_drv->remove(aux_ep);
+err_attached:
+ dev_pm_domain_detach(dev, true);
return ret;
}
@@ -66,7 +98,6 @@ static int dp_aux_ep_probe(struct device *dev)
* @dev: The device to remove.
*
* Calls through to the endpoint driver remove.
- *
*/
static void dp_aux_ep_remove(struct device *dev)
{
@@ -120,12 +151,14 @@ ATTRIBUTE_GROUPS(dp_aux_ep_dev);
/**
* dp_aux_ep_dev_release() - Free memory for the dp_aux_ep device
* @dev: The device to free.
- *
- * Return: 0 if no error or negative error code.
*/
static void dp_aux_ep_dev_release(struct device *dev)
{
- kfree(to_dp_aux_ep_dev(dev));
+ struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev);
+ struct dp_aux_ep_device_with_data *aux_ep_with_data =
+ container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep);
+
+ kfree(aux_ep_with_data);
}
static struct device_type dp_aux_device_type_type = {
@@ -139,12 +172,14 @@ static struct device_type dp_aux_device_type_type = {
* @dev: The device to destroy.
* @data: Not used
*
- * This is just used as a callback by of_dp_aux_depopulate_ep_devices() and
+ * This is just used as a callback by of_dp_aux_depopulate_bus() and
* is called for _all_ of the child devices of the device providing the AUX bus.
* We'll only act on those that are of type "dp_aux_bus_type".
*
- * This function is effectively an inverse of what's in the loop
- * in of_dp_aux_populate_ep_devices().
+ * This function is effectively an inverse of what's in
+ * of_dp_aux_populate_bus(). NOTE: since we only populate one child
+ * then it's expected that only one device will match all the "if" tests in
+ * this function and get to the device_unregister().
*
* Return: 0 if no error or negative error code.
*/
@@ -167,122 +202,150 @@ static int of_dp_aux_ep_destroy(struct device *dev, void *data)
}
/**
- * of_dp_aux_depopulate_ep_devices() - Undo of_dp_aux_populate_ep_devices
- * @aux: The AUX channel whose devices we want to depopulate
+ * of_dp_aux_depopulate_bus() - Undo of_dp_aux_populate_bus
+ * @aux: The AUX channel whose device we want to depopulate
*
- * This will destroy all devices that were created
- * by of_dp_aux_populate_ep_devices().
+ * This will destroy the device that was created
+ * by of_dp_aux_populate_bus().
*/
-void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux)
+void of_dp_aux_depopulate_bus(struct drm_dp_aux *aux)
{
device_for_each_child_reverse(aux->dev, NULL, of_dp_aux_ep_destroy);
}
-EXPORT_SYMBOL_GPL(of_dp_aux_depopulate_ep_devices);
+EXPORT_SYMBOL_GPL(of_dp_aux_depopulate_bus);
/**
- * of_dp_aux_populate_ep_devices() - Populate the endpoint devices on the DP AUX
- * @aux: The AUX channel whose devices we want to populate. It is required that
+ * of_dp_aux_populate_bus() - Populate the endpoint device on the DP AUX
+ * @aux: The AUX channel whose device we want to populate. It is required that
* drm_dp_aux_init() has already been called for this AUX channel.
+ * @done_probing: Callback functions to call after EP device finishes probing.
+ * Will not be called if there are no EP devices and this
+ * function will return -ENODEV.
*
- * This will populate all the devices under the "aux-bus" node of the device
- * providing the AUX channel (AKA aux->dev).
+ * This will populate the device (expected to be an eDP panel) under the
+ * "aux-bus" node of the device providing the AUX channel (AKA aux->dev).
*
* When this function finishes, it is _possible_ (but not guaranteed) that
- * our sub-devices will have finished probing. It should be noted that if our
- * sub-devices return -EPROBE_DEFER that we will not return any error codes
- * ourselves but our sub-devices will _not_ have actually probed successfully
- * yet. There may be other cases (maybe added in the future?) where sub-devices
- * won't have been probed yet when this function returns, so it's best not to
- * rely on that.
+ * our sub-device will have finished probing. It should be noted that if our
+ * sub-device returns -EPROBE_DEFER or is probing asynchronously for some
+ * reason that we will not return any error codes ourselves but our
+ * sub-device will _not_ have actually probed successfully yet.
+ *
+ * In many cases it's important for the caller of this function to be notified
+ * when our sub device finishes probing. Our sub device is expected to be an
+ * eDP panel and the caller is expected to be an eDP controller. The eDP
+ * controller needs to be able to get a reference to the panel when it finishes
+ * probing. For this reason the caller can pass in a function pointer that
+ * will be called when our sub-device finishes probing.
*
* If this function succeeds you should later make sure you call
- * of_dp_aux_depopulate_ep_devices() to undo it, or just use the devm version
+ * of_dp_aux_depopulate_bus() to undo it, or just use the devm version
* of this function.
*
- * Return: 0 if no error or negative error code.
+ * Return: 0 if no error or negative error code; returns -ENODEV if there are
+ * no children. The done_probing() function won't be called in that
+ * case.
*/
-int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux)
+int of_dp_aux_populate_bus(struct drm_dp_aux *aux,
+ int (*done_probing)(struct drm_dp_aux *aux))
{
- struct device_node *bus, *np;
+ struct device_node *bus = NULL, *np = NULL;
struct dp_aux_ep_device *aux_ep;
+ struct dp_aux_ep_device_with_data *aux_ep_with_data;
int ret;
/* drm_dp_aux_init() should have been called already; warn if not */
WARN_ON_ONCE(!aux->ddc.algo);
if (!aux->dev->of_node)
- return 0;
-
+ return -ENODEV;
bus = of_get_child_by_name(aux->dev->of_node, "aux-bus");
if (!bus)
- return 0;
+ return -ENODEV;
- for_each_available_child_of_node(bus, np) {
- if (of_node_test_and_set_flag(np, OF_POPULATED))
- continue;
+ np = of_get_next_available_child(bus, NULL);
+ of_node_put(bus);
+ if (!np)
+ return -ENODEV;
- aux_ep = kzalloc(sizeof(*aux_ep), GFP_KERNEL);
- if (!aux_ep)
- continue;
- aux_ep->aux = aux;
+ if (of_node_test_and_set_flag(np, OF_POPULATED)) {
+ dev_err(aux->dev, "DP AUX EP device already populated\n");
+ ret = -EINVAL;
+ goto err_did_get_np;
+ }
- aux_ep->dev.parent = aux->dev;
- aux_ep->dev.bus = &dp_aux_bus_type;
- aux_ep->dev.type = &dp_aux_device_type_type;
- aux_ep->dev.of_node = of_node_get(np);
- dev_set_name(&aux_ep->dev, "aux-%s", dev_name(aux->dev));
+ aux_ep_with_data = kzalloc(sizeof(*aux_ep_with_data), GFP_KERNEL);
+ if (!aux_ep_with_data) {
+ ret = -ENOMEM;
+ goto err_did_set_populated;
+ }
- ret = device_register(&aux_ep->dev);
- if (ret) {
- dev_err(aux->dev, "Failed to create AUX EP for %pOF: %d\n", np, ret);
- of_node_clear_flag(np, OF_POPULATED);
- of_node_put(np);
+ aux_ep_with_data->done_probing = done_probing;
- /*
- * As per docs of device_register(), call this instead
- * of kfree() directly for error cases.
- */
- put_device(&aux_ep->dev);
+ aux_ep = &aux_ep_with_data->aux_ep;
+ aux_ep->aux = aux;
+ aux_ep->dev.parent = aux->dev;
+ aux_ep->dev.bus = &dp_aux_bus_type;
+ aux_ep->dev.type = &dp_aux_device_type_type;
+ aux_ep->dev.of_node = of_node_get(np);
+ dev_set_name(&aux_ep->dev, "aux-%s", dev_name(aux->dev));
- /*
- * Following in the footsteps of of_i2c_register_devices(),
- * we won't fail the whole function here--we'll just
- * continue registering any other devices we find.
- */
- }
- }
+ ret = device_register(&aux_ep->dev);
+ if (ret) {
+ dev_err(aux->dev, "Failed to create AUX EP for %pOF: %d\n", np, ret);
- of_node_put(bus);
+ /*
+ * As per docs of device_register(), call this instead
+ * of kfree() directly for error cases.
+ */
+ put_device(&aux_ep->dev);
+
+ goto err_did_set_populated;
+ }
return 0;
+
+err_did_set_populated:
+ of_node_clear_flag(np, OF_POPULATED);
+
+err_did_get_np:
+ of_node_put(np);
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(of_dp_aux_populate_bus);
-static void of_dp_aux_depopulate_ep_devices_void(void *data)
+static void of_dp_aux_depopulate_bus_void(void *data)
{
- of_dp_aux_depopulate_ep_devices(data);
+ of_dp_aux_depopulate_bus(data);
}
/**
- * devm_of_dp_aux_populate_ep_devices() - devm wrapper for of_dp_aux_populate_ep_devices()
- * @aux: The AUX channel whose devices we want to populate
+ * devm_of_dp_aux_populate_bus() - devm wrapper for of_dp_aux_populate_bus()
+ * @aux: The AUX channel whose device we want to populate
+ * @done_probing: Callback functions to call after EP device finishes probing.
+ * Will not be called if there are no EP devices and this
+ * function will return -ENODEV.
*
* Handles freeing w/ devm on the device "aux->dev".
*
- * Return: 0 if no error or negative error code.
+ * Return: 0 if no error or negative error code; returns -ENODEV if there are
+ * no children. The done_probing() function won't be called in that
+ * case.
*/
-int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux)
+int devm_of_dp_aux_populate_bus(struct drm_dp_aux *aux,
+ int (*done_probing)(struct drm_dp_aux *aux))
{
int ret;
- ret = of_dp_aux_populate_ep_devices(aux);
+ ret = of_dp_aux_populate_bus(aux, done_probing);
if (ret)
return ret;
return devm_add_action_or_reset(aux->dev,
- of_dp_aux_depopulate_ep_devices_void,
- aux);
+ of_dp_aux_depopulate_bus_void, aux);
}
-EXPORT_SYMBOL_GPL(devm_of_dp_aux_populate_ep_devices);
+EXPORT_SYMBOL_GPL(devm_of_dp_aux_populate_bus);
int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *drv, struct module *owner)
{
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index e7c22c2ca90c..e5bab236b3ae 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/i2c.h>
@@ -32,6 +33,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_panel.h>
@@ -1597,7 +1599,7 @@ static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg)
* Calculate the length of the i2c transfer in usec, assuming
* the i2c bus speed is as specified. Gives the the "worst"
* case estimate, ie. successful while as long as possible.
- * Doesn't account the the "MOT" bit, and instead assumes each
+ * Doesn't account the "MOT" bit, and instead assumes each
* message includes a START, ADDRESS and STOP. Neither does it
* account for additional random variables such as clock stretching.
*/
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 9aa2c20904e3..57e65423e50d 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -42,6 +42,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -2666,24 +2667,14 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg
}
list_for_each_entry(port, &mstb->ports, next) {
- struct drm_dp_mst_branch *mstb_child = NULL;
-
- if (port->input || !port->ddps)
+ if (port->input || !port->ddps || !port->mstb)
continue;
- if (port->mstb)
- mstb_child = drm_dp_mst_topology_get_mstb_validated(
- mgr, port->mstb);
-
- if (mstb_child) {
- ret = drm_dp_check_and_send_link_address(mgr,
- mstb_child);
- drm_dp_mst_topology_put_mstb(mstb_child);
- if (ret == 1)
- changed = true;
- else if (ret < 0)
- return ret;
- }
+ ret = drm_dp_check_and_send_link_address(mgr, port->mstb);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
}
return changed;
@@ -3860,9 +3851,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
if (!mgr->mst_primary)
goto out_fail;
- ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
- DP_RECEIVER_CAP_SIZE);
- if (ret != DP_RECEIVER_CAP_SIZE) {
+ if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
@@ -4851,6 +4840,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
drm_edid_get_monitor_name(mst_edid, name, namelen);
+ kfree(mst_edid);
}
/**
@@ -4910,8 +4900,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
u8 buf[DP_PAYLOAD_TABLE_SIZE];
int ret;
- ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
- if (ret) {
+ if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) {
seq_printf(m, "dpcd read failed\n");
goto out;
}
@@ -5467,8 +5456,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
*
* This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
* state vtable so that the private object state returned is that of a MST
- * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
- * to care of the locking, so warn if don't hold the connection_mutex.
+ * topology object.
*
* RETURNS:
*
diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c
index 74bd4a76b253..fdb7d5c17ba1 100644
--- a/drivers/gpu/drm/drm_aperture.c
+++ b/drivers/gpu/drm/drm_aperture.c
@@ -1,14 +1,7 @@
// SPDX-License-Identifier: MIT
-#include <linux/device.h>
-#include <linux/fb.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h> /* for firmware helpers */
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/vgaarb.h>
+#include <linux/aperture.h>
+#include <linux/platform_device.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
@@ -126,92 +119,6 @@
* afterwards.
*/
-struct drm_aperture {
- struct drm_device *dev;
- resource_size_t base;
- resource_size_t size;
- struct list_head lh;
- void (*detach)(struct drm_device *dev);
-};
-
-static LIST_HEAD(drm_apertures);
-static DEFINE_MUTEX(drm_apertures_lock);
-
-static bool overlap(resource_size_t base1, resource_size_t end1,
- resource_size_t base2, resource_size_t end2)
-{
- return (base1 < end2) && (end1 > base2);
-}
-
-static void devm_aperture_acquire_release(void *data)
-{
- struct drm_aperture *ap = data;
- bool detached = !ap->dev;
-
- if (detached)
- return;
-
- mutex_lock(&drm_apertures_lock);
- list_del(&ap->lh);
- mutex_unlock(&drm_apertures_lock);
-}
-
-static int devm_aperture_acquire(struct drm_device *dev,
- resource_size_t base, resource_size_t size,
- void (*detach)(struct drm_device *))
-{
- size_t end = base + size;
- struct list_head *pos;
- struct drm_aperture *ap;
-
- mutex_lock(&drm_apertures_lock);
-
- list_for_each(pos, &drm_apertures) {
- ap = container_of(pos, struct drm_aperture, lh);
- if (overlap(base, end, ap->base, ap->base + ap->size)) {
- mutex_unlock(&drm_apertures_lock);
- return -EBUSY;
- }
- }
-
- ap = devm_kzalloc(dev->dev, sizeof(*ap), GFP_KERNEL);
- if (!ap) {
- mutex_unlock(&drm_apertures_lock);
- return -ENOMEM;
- }
-
- ap->dev = dev;
- ap->base = base;
- ap->size = size;
- ap->detach = detach;
- INIT_LIST_HEAD(&ap->lh);
-
- list_add(&ap->lh, &drm_apertures);
-
- mutex_unlock(&drm_apertures_lock);
-
- return devm_add_action_or_reset(dev->dev, devm_aperture_acquire_release, ap);
-}
-
-static void drm_aperture_detach_firmware(struct drm_device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev->dev);
-
- /*
- * Remove the device from the device hierarchy. This is the right thing
- * to do for firmware-based DRM drivers, such as EFI, VESA or VGA. After
- * the new driver takes over the hardware, the firmware device's state
- * will be lost.
- *
- * For non-platform devices, a new callback would be required.
- *
- * If the aperture helpers ever need to handle native drivers, this call
- * would only have to unplug the DRM device, so that the hardware device
- * stays around after detachment.
- */
- platform_device_unregister(pdev);
-}
-
/**
* devm_aperture_acquire_from_firmware - Acquires ownership of a firmware framebuffer
* on behalf of a DRM driver.
@@ -239,39 +146,16 @@ static void drm_aperture_detach_firmware(struct drm_device *dev)
int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
resource_size_t size)
{
+ struct platform_device *pdev;
+
if (drm_WARN_ON(dev, !dev_is_platform(dev->dev)))
return -EINVAL;
- return devm_aperture_acquire(dev, base, size, drm_aperture_detach_firmware);
-}
-EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
-
-static void drm_aperture_detach_drivers(resource_size_t base, resource_size_t size)
-{
- resource_size_t end = base + size;
- struct list_head *pos, *n;
-
- mutex_lock(&drm_apertures_lock);
-
- list_for_each_safe(pos, n, &drm_apertures) {
- struct drm_aperture *ap =
- container_of(pos, struct drm_aperture, lh);
- struct drm_device *dev = ap->dev;
-
- if (WARN_ON_ONCE(!dev))
- continue;
-
- if (!overlap(base, end, ap->base, ap->base + ap->size))
- continue;
+ pdev = to_platform_device(dev->dev);
- ap->dev = NULL; /* detach from device */
- list_del(&ap->lh);
-
- ap->detach(dev);
- }
-
- mutex_unlock(&drm_apertures_lock);
+ return devm_aperture_acquire_for_platform_device(pdev, base, size);
}
+EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
/**
* drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range
@@ -289,27 +173,7 @@ static void drm_aperture_detach_drivers(resource_size_t base, resource_size_t si
int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
bool primary, const struct drm_driver *req_driver)
{
-#if IS_REACHABLE(CONFIG_FB)
- struct apertures_struct *a;
- int ret;
-
- a = alloc_apertures(1);
- if (!a)
- return -ENOMEM;
-
- a->ranges[0].base = base;
- a->ranges[0].size = size;
-
- ret = remove_conflicting_framebuffers(a, req_driver->name, primary);
- kfree(a);
-
- if (ret)
- return ret;
-#endif
-
- drm_aperture_detach_drivers(base, size);
-
- return 0;
+ return aperture_remove_conflicting_devices(base, size, primary, req_driver->name);
}
EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers);
@@ -328,26 +192,6 @@ EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers);
int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
const struct drm_driver *req_driver)
{
- resource_size_t base, size;
- int bar, ret = 0;
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
- if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
- continue;
- base = pci_resource_start(pdev, bar);
- size = pci_resource_len(pdev, bar);
- drm_aperture_detach_drivers(base, size);
- }
-
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
-#if IS_REACHABLE(CONFIG_FB)
- ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name);
-#endif
- if (ret == 0)
- ret = vga_remove_vgacon(pdev);
- return ret;
+ return aperture_remove_conflicting_pci_devices(pdev, req_driver->name);
}
EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 58c0283fb6b0..f197f59f6d99 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -31,12 +31,14 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include <drm/drm_writeback.h>
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 9603193d2fa1..8bf41aa24068 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -31,10 +31,12 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
@@ -876,6 +878,61 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
/**
+ * drm_atomic_helper_check_crtc_state() - Check CRTC state for validity
+ * @crtc_state: CRTC state to check
+ * @can_disable_primary_planes: can the CRTC be enabled without a primary plane?
+ *
+ * Checks that a desired CRTC update is valid. Drivers that provide
+ * their own CRTC handling rather than helper-provided implementations may
+ * still wish to call this function to avoid duplication of error checking
+ * code.
+ *
+ * Note that @can_disable_primary_planes only tests if the CRTC can be
+ * enabled without a primary plane. To test if a primary plane can be updated
+ * without a CRTC, use drm_atomic_helper_check_plane_state() in the plane's
+ * atomic check.
+ *
+ * RETURNS:
+ * Zero if update appears valid, error code on failure
+ */
+int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state,
+ bool can_disable_primary_planes)
+{
+ struct drm_device *dev = crtc_state->crtc->dev;
+ struct drm_atomic_state *state = crtc_state->state;
+
+ if (!crtc_state->enable)
+ return 0;
+
+ /* needs at least one primary plane to be enabled */
+ if (!can_disable_primary_planes) {
+ bool has_primary_plane = false;
+ struct drm_plane *plane;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ struct drm_plane_state *plane_state;
+
+ if (plane->type != DRM_PLANE_TYPE_PRIMARY)
+ continue;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ if (plane_state->fb && plane_state->crtc) {
+ has_primary_plane = true;
+ break;
+ }
+ }
+ if (!has_primary_plane) {
+ drm_dbg_kms(dev, "Cannot enable CRTC without a primary plane.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_crtc_state);
+
+/**
* drm_atomic_helper_check_planes - validate state object for planes changes
* @dev: DRM device
* @state: the driver state object
@@ -1011,9 +1068,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
return drm_atomic_crtc_effectively_active(old_state);
/*
- * We need to run through the crtc_funcs->disable() function if the CRTC
- * is currently on, if it's transitioning to self refresh mode, or if
- * it's in self refresh mode and needs to be fully disabled.
+ * We need to disable bridge(s) and CRTC if we're transitioning out of
+ * self-refresh and changing CRTCs at the same time, because the
+ * bridge tracks self-refresh status via CRTC state.
+ */
+ if (old_state->self_refresh_active &&
+ old_state->crtc != new_state->crtc)
+ return true;
+
+ /*
+ * We also need to run through the crtc_funcs->disable() function if
+ * the CRTC is currently on, if it's transitioning to self refresh
+ * mode, or if it's in self refresh mode and needs to be fully
+ * disabled.
*/
return old_state->active ||
(old_state->self_refresh_active && !new_state->active) ||
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 3b6d3bdbd099..bf31b9d92094 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -26,10 +26,12 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 434f3d4cb8a2..79730fa1dd8e 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
#include <drm/drm_writeback.h>
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 46ee5d5df6b4..b4c8cab7158c 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -80,7 +80,7 @@
*
* Note that the source rectangle must fully lie within the bounds of the
* &drm_framebuffer. The destination rectangle can lie outside of the visible
- * area of the current mode of the CRTC. It must be apprpriately clipped by the
+ * area of the current mode of the CRTC. It must be appropriately clipped by the
* driver, which can be done by calling drm_plane_helper_check_update(). Drivers
* are also allowed to round the subpixel sampling positions appropriately, but
* only to the next full pixel. No pixel outside of the source rectangle may
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c96847fc0ebc..6abf7a2407e9 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -22,6 +22,7 @@
*/
#include <linux/err.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -170,6 +171,29 @@ void drm_bridge_add(struct drm_bridge *bridge)
}
EXPORT_SYMBOL(drm_bridge_add);
+static void drm_bridge_remove_void(void *bridge)
+{
+ drm_bridge_remove(bridge);
+}
+
+/**
+ * devm_drm_bridge_add - devm managed version of drm_bridge_add()
+ *
+ * @dev: device to tie the bridge lifetime to
+ * @bridge: bridge control structure
+ *
+ * This is the managed version of drm_bridge_add() which automatically
+ * calls drm_bridge_remove() when @dev is unbound.
+ *
+ * Return: 0 if no error or negative error code.
+ */
+int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
+{
+ drm_bridge_add(bridge);
+ return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
+}
+EXPORT_SYMBOL(devm_drm_bridge_add);
+
/**
* drm_bridge_remove - remove the given bridge from the global bridge list
*
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 6b3dad03d77d..1c7d936523df 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -331,7 +331,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_bridge_connector *bridge_connector;
struct drm_connector *connector;
struct i2c_adapter *ddc = NULL;
- struct drm_bridge *bridge;
+ struct drm_bridge *bridge, *panel_bridge = NULL;
int connector_type;
bridge_connector = kzalloc(sizeof(*bridge_connector), GFP_KERNEL);
@@ -373,6 +373,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge->ddc)
ddc = bridge->ddc;
+
+ if (drm_bridge_is_panel(bridge))
+ panel_bridge = bridge;
}
if (connector_type == DRM_MODE_CONNECTOR_Unknown) {
@@ -392,6 +395,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
+ if (panel_bridge)
+ drm_panel_bridge_set_orientation(connector, panel_bridge);
+
return connector;
}
EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index e6346a67cd98..bbc535cc50dd 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_print.h>
@@ -158,22 +159,31 @@ drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int
return NULL;
}
-static struct drm_display_mode *
-drm_connector_pick_cmdline_mode(struct drm_connector *connector)
+static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode;
bool prefer_non_interlace;
+ /*
+ * Find a user-defined mode. If the user gave us a valid
+ * mode on the kernel command line, it will show up in this
+ * list.
+ */
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->type & DRM_MODE_TYPE_USERDEF)
+ return mode;
+ }
+
cmdline_mode = &connector->cmdline_mode;
if (cmdline_mode->specified == false)
return NULL;
- /* attempt to find a matching mode in the list of modes
- * we have gotten so far, if not add a CVT mode that conforms
+ /*
+ * Attempt to find a matching mode in the list of modes we
+ * have gotten so far.
*/
- if (cmdline_mode->rb || cmdline_mode->margins)
- goto create_mode;
prefer_non_interlace = !cmdline_mode->interlace;
again:
@@ -207,12 +217,7 @@ again:
goto again;
}
-create_mode:
- mode = drm_mode_create_from_cmdline_mode(connector->dev, cmdline_mode);
- if (mode)
- list_add(&mode->head, &connector->modes);
-
- return mode;
+ return NULL;
}
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 1c48d162c77e..1ab083b35e3b 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -24,6 +24,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_panel.h>
#include <drm/drm_utils.h>
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
@@ -31,6 +32,7 @@
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_sysfs.h>
+#include <linux/fb.h>
#include <linux/uaccess.h>
#include "drm_crtc_internal.h"
@@ -250,7 +252,7 @@ int drm_connector_init(struct drm_device *dev,
connector->funcs = funcs;
/* connector index is used with 32bit bitmasks */
- ret = ida_simple_get(&config->connector_ida, 0, 32, GFP_KERNEL);
+ ret = ida_alloc_max(&config->connector_ida, 31, GFP_KERNEL);
if (ret < 0) {
DRM_DEBUG_KMS("Failed to allocate %s connector index: %d\n",
drm_connector_enum_list[connector_type].name,
@@ -262,7 +264,7 @@ int drm_connector_init(struct drm_device *dev,
connector->connector_type = connector_type;
connector->connector_type_id =
- ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+ ida_alloc_min(connector_ida, 1, GFP_KERNEL);
if (connector->connector_type_id < 0) {
ret = connector->connector_type_id;
goto out_put_id;
@@ -322,10 +324,10 @@ int drm_connector_init(struct drm_device *dev,
connector->debugfs_entry = NULL;
out_put_type_id:
if (ret)
- ida_simple_remove(connector_ida, connector->connector_type_id);
+ ida_free(connector_ida, connector->connector_type_id);
out_put_id:
if (ret)
- ida_simple_remove(&config->connector_ida, connector->index);
+ ida_free(&config->connector_ida, connector->index);
out_put:
if (ret)
drm_mode_object_unregister(dev, &connector->base);
@@ -479,11 +481,10 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->modes, head)
drm_mode_remove(connector, mode);
- ida_simple_remove(&drm_connector_enum_list[connector->connector_type].ida,
+ ida_free(&drm_connector_enum_list[connector->connector_type].ida,
connector->connector_type_id);
- ida_simple_remove(&dev->mode_config.connector_ida,
- connector->index);
+ ida_free(&dev->mode_config.connector_ida, connector->index);
kfree(connector->display_info.bus_formats);
drm_mode_object_unregister(dev, &connector->base);
@@ -2078,80 +2079,6 @@ int drm_connector_set_tile_property(struct drm_connector *connector)
EXPORT_SYMBOL(drm_connector_set_tile_property);
/**
- * drm_connector_update_edid_property - update the edid property of a connector
- * @connector: drm connector
- * @edid: new value of the edid property
- *
- * This function creates a new blob modeset object and assigns its id to the
- * connector's edid property.
- * Since we also parse tile information from EDID's displayID block, we also
- * set the connector's tile property here. See drm_connector_set_tile_property()
- * for more details.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_connector_update_edid_property(struct drm_connector *connector,
- const struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- size_t size = 0;
- int ret;
- const struct edid *old_edid;
-
- /* ignore requests to set edid when overridden */
- if (connector->override_edid)
- return 0;
-
- if (edid)
- size = EDID_LENGTH * (1 + edid->extensions);
-
- /* Set the display info, using edid if available, otherwise
- * resetting the values to defaults. This duplicates the work
- * done in drm_add_edid_modes, but that function is not
- * consistently called before this one in all drivers and the
- * computation is cheap enough that it seems better to
- * duplicate it rather than attempt to ensure some arbitrary
- * ordering of calls.
- */
- if (edid)
- drm_add_display_info(connector, edid);
- else
- drm_reset_display_info(connector);
-
- drm_update_tile_info(connector, edid);
-
- if (connector->edid_blob_ptr) {
- old_edid = (const struct edid *)connector->edid_blob_ptr->data;
- if (old_edid) {
- if (!drm_edid_are_equal(edid, old_edid)) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Edid was changed.\n",
- connector->base.id, connector->name);
-
- connector->epoch_counter += 1;
- DRM_DEBUG_KMS("Updating change counter to %llu\n",
- connector->epoch_counter);
- }
- }
- }
-
- drm_object_property_set_value(&connector->base,
- dev->mode_config.non_desktop_property,
- connector->display_info.non_desktop);
-
- ret = drm_property_replace_global_blob(dev,
- &connector->edid_blob_ptr,
- size,
- edid,
- &connector->base,
- dev->mode_config.edid_property);
- if (ret)
- return ret;
- return drm_connector_set_tile_property(connector);
-}
-EXPORT_SYMBOL(drm_connector_update_edid_property);
-
-/**
* drm_connector_set_link_status_property - Set link status property of a connector
* @connector: drm connector
* @link_status: new value of link status property (0: Good, 1: Bad)
@@ -2320,6 +2247,9 @@ EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
* It is allowed to call this function with a panel_orientation of
* DRM_MODE_PANEL_ORIENTATION_UNKNOWN, in which case it is a no-op.
*
+ * The function shouldn't be called in panel after drm is registered (i.e.
+ * drm_dev_register() is called in drm).
+ *
* Returns:
* Zero on success, negative errno on failure.
*/
@@ -2389,6 +2319,33 @@ int drm_connector_set_panel_orientation_with_quirk(
}
EXPORT_SYMBOL(drm_connector_set_panel_orientation_with_quirk);
+/**
+ * drm_connector_set_orientation_from_panel -
+ * set the connector's panel_orientation from panel's callback.
+ * @connector: connector for which to init the panel-orientation property.
+ * @panel: panel that can provide orientation information.
+ *
+ * Drm drivers should call this function before drm_dev_register().
+ * Orientation is obtained from panel's .get_orientation() callback.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_set_orientation_from_panel(
+ struct drm_connector *connector,
+ struct drm_panel *panel)
+{
+ enum drm_panel_orientation orientation;
+
+ if (panel && panel->funcs && panel->funcs->get_orientation)
+ orientation = panel->funcs->get_orientation(panel);
+ else
+ orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+
+ return drm_connector_set_panel_orientation(connector, orientation);
+}
+EXPORT_SYMBOL(drm_connector_set_orientation_from_panel);
+
static const struct drm_prop_enum_list privacy_screen_enum[] = {
{ PRIVACY_SCREEN_DISABLED, "Disabled" },
{ PRIVACY_SCREEN_ENABLED, "Enabled" },
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 26a77a735905..cad2a7e5166f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -35,9 +35,11 @@
#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/uaccess.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index b632825654a9..8a6d54515f92 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -44,6 +44,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 63279e984342..56041b604881 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -286,6 +286,5 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
/* drm_edid.c */
void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
-void drm_reset_display_info(struct drm_connector *connector);
-u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
-void drm_update_tile_info(struct drm_connector *connector, const struct edid *edid);
+int drm_edid_override_set(struct drm_connector *connector, const void *edid, size_t size);
+int drm_edid_override_reset(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 8eeff0c7bdd4..937b699ac2a8 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -33,6 +33,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
+#include <drm/drm_framebuffer.h>
static void convert_clip_rect_to_rect(const struct drm_clip_rect *src,
struct drm_mode_rect *dest,
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 7f1b82dbaebb..493922069c90 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -350,31 +350,20 @@ static ssize_t edid_write(struct file *file, const char __user *ubuf,
struct seq_file *m = file->private_data;
struct drm_connector *connector = m->private;
char *buf;
- struct edid *edid;
int ret;
buf = memdup_user(ubuf, len);
if (IS_ERR(buf))
return PTR_ERR(buf);
- edid = (struct edid *) buf;
-
- if (len == 5 && !strncmp(buf, "reset", 5)) {
- connector->override_edid = false;
- ret = drm_connector_update_edid_property(connector, NULL);
- } else if (len < EDID_LENGTH ||
- EDID_LENGTH * (1 + edid->extensions) > len)
- ret = -EINVAL;
- else {
- connector->override_edid = false;
- ret = drm_connector_update_edid_property(connector, edid);
- if (!ret)
- connector->override_edid = true;
- }
+ if (len == 5 && !strncmp(buf, "reset", 5))
+ ret = drm_edid_override_reset(connector);
+ else
+ ret = drm_edid_override_set(connector, buf, len);
kfree(buf);
- return (ret) ? ret : len;
+ return ret ? ret : len;
}
/*
@@ -395,6 +384,23 @@ static int vrr_range_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(vrr_range);
+/*
+ * Returns Connector's max supported bpc through debugfs file.
+ * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
+ */
+static int output_bpc_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Maximum: %u\n", connector->display_info.bpc);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(output_bpc);
+
static const struct file_operations drm_edid_fops = {
.owner = THIS_MODULE,
.open = edid_open,
@@ -437,6 +443,10 @@ void drm_debugfs_connector_add(struct drm_connector *connector)
debugfs_create_file("vrr_range", S_IRUGO, root, connector,
&vrr_range_fops);
+ /* max bpc */
+ debugfs_create_file("output_bpc", 0444, root, connector,
+ &output_bpc_fops);
+
if (connector->funcs->debugfs_init)
connector->funcs->debugfs_init(connector, root);
}
diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
index 32da557b960f..38ea8203df45 100644
--- a/drivers/gpu/drm/drm_displayid.c
+++ b/drivers/gpu/drm/drm_displayid.c
@@ -33,11 +33,11 @@ static int validate_displayid(const u8 *displayid, int length, int idx)
return 0;
}
-static const u8 *drm_find_displayid_extension(const struct edid *edid,
+static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
int *length, int *idx,
int *ext_index)
{
- const u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT, ext_index);
+ const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index);
const struct displayid_header *base;
int ret;
@@ -58,12 +58,12 @@ static const u8 *drm_find_displayid_extension(const struct edid *edid,
return displayid;
}
-void displayid_iter_edid_begin(const struct edid *edid,
+void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
struct displayid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
- iter->edid = edid;
+ iter->drm_edid = drm_edid;
}
static const struct displayid_block *
@@ -88,7 +88,7 @@ __displayid_iter_next(struct displayid_iter *iter)
{
const struct displayid_block *block;
- if (!iter->edid)
+ if (!iter->drm_edid)
return NULL;
if (iter->section) {
@@ -96,7 +96,7 @@ __displayid_iter_next(struct displayid_iter *iter)
block = displayid_iter_block(iter);
if (WARN_ON(!block)) {
iter->section = NULL;
- iter->edid = NULL;
+ iter->drm_edid = NULL;
return NULL;
}
@@ -109,12 +109,12 @@ __displayid_iter_next(struct displayid_iter *iter)
}
for (;;) {
- iter->section = drm_find_displayid_extension(iter->edid,
+ iter->section = drm_find_displayid_extension(iter->drm_edid,
&iter->length,
&iter->idx,
&iter->ext_index);
if (!iter->section) {
- iter->edid = NULL;
+ iter->drm_edid = NULL;
return NULL;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index bc43e1b32092..bbc25e3b7220 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -45,10 +45,6 @@
#include "drm_crtc_internal.h"
-#define version_greater(edid, maj, min) \
- (((edid)->version > (maj)) || \
- ((edid)->version == (maj) && (edid)->revision > (min)))
-
static int oui(u8 first, u8 second, u8 third)
{
return (first << 16) | (second << 8) | third;
@@ -96,7 +92,7 @@ static int oui(u8 first, u8 second, u8 third)
struct detailed_mode_closure {
struct drm_connector *connector;
- const struct edid *edid;
+ const struct drm_edid *drm_edid;
bool preferred;
u32 quirks;
int modes;
@@ -1567,6 +1563,33 @@ static const struct drm_display_mode edid_4k_modes[] = {
/*** DDC fetch and block validation ***/
+/*
+ * The opaque EDID type, internal to drm_edid.c.
+ */
+struct drm_edid {
+ /* Size allocated for edid */
+ size_t size;
+ const struct edid *edid;
+};
+
+static bool version_greater(const struct drm_edid *drm_edid,
+ u8 version, u8 revision)
+{
+ const struct edid *edid = drm_edid->edid;
+
+ return edid->version > version ||
+ (edid->version == version && edid->revision > revision);
+}
+
+static int edid_hfeeodb_extension_block_count(const struct edid *edid);
+
+static int edid_hfeeodb_block_count(const struct edid *edid)
+{
+ int eeodb = edid_hfeeodb_extension_block_count(edid);
+
+ return eeodb ? eeodb + 1 : 0;
+}
+
static int edid_extension_block_count(const struct edid *edid)
{
return edid->extensions;
@@ -1599,6 +1622,114 @@ static const void *edid_extension_block_data(const struct edid *edid, int index)
return edid_block_data(edid, index + 1);
}
+static int drm_edid_block_count(const struct drm_edid *drm_edid)
+{
+ int num_blocks;
+
+ /* Starting point */
+ num_blocks = edid_block_count(drm_edid->edid);
+
+ /* HF-EEODB override */
+ if (drm_edid->size >= edid_size_by_blocks(2)) {
+ int eeodb;
+
+ /*
+ * Note: HF-EEODB may specify a smaller extension count than the
+ * regular one. Unlike in buffer allocation, here we can use it.
+ */
+ eeodb = edid_hfeeodb_block_count(drm_edid->edid);
+ if (eeodb)
+ num_blocks = eeodb;
+ }
+
+ /* Limit by allocated size */
+ num_blocks = min(num_blocks, (int)drm_edid->size / EDID_LENGTH);
+
+ return num_blocks;
+}
+
+static int drm_edid_extension_block_count(const struct drm_edid *drm_edid)
+{
+ return drm_edid_block_count(drm_edid) - 1;
+}
+
+static const void *drm_edid_block_data(const struct drm_edid *drm_edid, int index)
+{
+ return edid_block_data(drm_edid->edid, index);
+}
+
+static const void *drm_edid_extension_block_data(const struct drm_edid *drm_edid,
+ int index)
+{
+ return edid_extension_block_data(drm_edid->edid, index);
+}
+
+/*
+ * Initializer helper for legacy interfaces, where we have no choice but to
+ * trust edid size. Not for general purpose use.
+ */
+static const struct drm_edid *drm_edid_legacy_init(struct drm_edid *drm_edid,
+ const struct edid *edid)
+{
+ if (!edid)
+ return NULL;
+
+ memset(drm_edid, 0, sizeof(*drm_edid));
+
+ drm_edid->edid = edid;
+ drm_edid->size = edid_size(edid);
+
+ return drm_edid;
+}
+
+/*
+ * EDID base and extension block iterator.
+ *
+ * struct drm_edid_iter iter;
+ * const u8 *block;
+ *
+ * drm_edid_iter_begin(drm_edid, &iter);
+ * drm_edid_iter_for_each(block, &iter) {
+ * // do stuff with block
+ * }
+ * drm_edid_iter_end(&iter);
+ */
+struct drm_edid_iter {
+ const struct drm_edid *drm_edid;
+
+ /* Current block index. */
+ int index;
+};
+
+static void drm_edid_iter_begin(const struct drm_edid *drm_edid,
+ struct drm_edid_iter *iter)
+{
+ memset(iter, 0, sizeof(*iter));
+
+ iter->drm_edid = drm_edid;
+}
+
+static const void *__drm_edid_iter_next(struct drm_edid_iter *iter)
+{
+ const void *block = NULL;
+
+ if (!iter->drm_edid)
+ return NULL;
+
+ if (iter->index < drm_edid_block_count(iter->drm_edid))
+ block = drm_edid_block_data(iter->drm_edid, iter->index++);
+
+ return block;
+}
+
+#define drm_edid_iter_for_each(__block, __iter) \
+ while (((__block) = __drm_edid_iter_next(__iter)))
+
+static void drm_edid_iter_end(struct drm_edid_iter *iter)
+{
+ memset(iter, 0, sizeof(*iter));
+}
+
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
@@ -1911,30 +2042,42 @@ bool drm_edid_is_valid(struct edid *edid)
}
EXPORT_SYMBOL(drm_edid_is_valid);
-static struct edid *edid_filter_invalid_blocks(const struct edid *edid,
- int invalid_blocks)
+static struct edid *edid_filter_invalid_blocks(struct edid *edid,
+ size_t *alloc_size)
{
- struct edid *new, *dest_block;
- int valid_extensions = edid->extensions - invalid_blocks;
- int i;
+ struct edid *new;
+ int i, valid_blocks = 0;
- new = kmalloc(edid_size_by_blocks(valid_extensions + 1), GFP_KERNEL);
- if (!new)
- goto out;
-
- dest_block = new;
+ /*
+ * Note: If the EDID uses HF-EEODB, but has invalid blocks, we'll revert
+ * back to regular extension count here. We don't want to start
+ * modifying the HF-EEODB extension too.
+ */
for (i = 0; i < edid_block_count(edid); i++) {
- const void *block = edid_block_data(edid, i);
+ const void *src_block = edid_block_data(edid, i);
+
+ if (edid_block_valid(src_block, i == 0)) {
+ void *dst_block = (void *)edid_block_data(edid, valid_blocks);
- if (edid_block_valid(block, i == 0))
- memcpy(dest_block++, block, EDID_LENGTH);
+ memmove(dst_block, src_block, EDID_LENGTH);
+ valid_blocks++;
+ }
}
- new->extensions = valid_extensions;
- new->checksum = edid_block_compute_checksum(new);
+ /* We already trusted the base block to be valid here... */
+ if (WARN_ON(!valid_blocks)) {
+ kfree(edid);
+ return NULL;
+ }
-out:
- kfree(edid);
+ edid->extensions = valid_blocks - 1;
+ edid->checksum = edid_block_compute_checksum(edid);
+
+ *alloc_size = edid_size_by_blocks(valid_blocks);
+
+ new = krealloc(edid, *alloc_size, GFP_KERNEL);
+ if (!new)
+ kfree(edid);
return new;
}
@@ -2031,7 +2174,8 @@ static void connector_bad_edid(struct drm_connector *connector,
}
/* Get override or firmware EDID */
-static struct edid *drm_get_override_edid(struct drm_connector *connector)
+static struct edid *drm_get_override_edid(struct drm_connector *connector,
+ size_t *alloc_size)
{
struct edid *override = NULL;
@@ -2041,9 +2185,39 @@ static struct edid *drm_get_override_edid(struct drm_connector *connector)
if (!override)
override = drm_load_edid_firmware(connector);
+ /* FIXME: Get alloc size from deeper down the stack */
+ if (!IS_ERR_OR_NULL(override) && alloc_size)
+ *alloc_size = edid_size(override);
+
return IS_ERR(override) ? NULL : override;
}
+/* For debugfs edid_override implementation */
+int drm_edid_override_set(struct drm_connector *connector, const void *edid,
+ size_t size)
+{
+ int ret;
+
+ if (size < EDID_LENGTH || edid_size(edid) > size)
+ return -EINVAL;
+
+ connector->override_edid = false;
+
+ ret = drm_connector_update_edid_property(connector, edid);
+ if (!ret)
+ connector->override_edid = true;
+
+ return ret;
+}
+
+/* For debugfs edid_override implementation */
+int drm_edid_override_reset(struct drm_connector *connector)
+{
+ connector->override_edid = false;
+
+ return drm_connector_update_edid_property(connector, NULL);
+}
+
/**
* drm_add_override_edid_modes - add modes from override/firmware EDID
* @connector: connector we're probing
@@ -2060,7 +2234,7 @@ int drm_add_override_edid_modes(struct drm_connector *connector)
struct edid *override;
int num_modes = 0;
- override = drm_get_override_edid(connector);
+ override = drm_get_override_edid(connector, NULL);
if (override) {
drm_connector_update_edid_property(connector, override);
num_modes = drm_add_edid_modes(connector, override);
@@ -2109,39 +2283,20 @@ static enum edid_block_status edid_block_read(void *block, unsigned int block_nu
return status;
}
-/**
- * drm_do_get_edid - get EDID data using a custom EDID block read function
- * @connector: connector we're probing
- * @read_block: EDID block read function
- * @context: private data passed to the block read function
- *
- * When the I2C adapter connected to the DDC bus is hidden behind a device that
- * exposes a different interface to read EDID blocks this function can be used
- * to get EDID data using a custom block read function.
- *
- * As in the general case the DDC bus is accessible by the kernel at the I2C
- * level, drivers must make all reasonable efforts to expose it as an I2C
- * adapter and use drm_get_edid() instead of abusing this function.
- *
- * The EDID may be overridden using debugfs override_edid or firmware EDID
- * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
- * order. Having either of them bypasses actual EDID reads.
- *
- * Return: Pointer to valid EDID or NULL if we couldn't find any.
- */
-struct edid *drm_do_get_edid(struct drm_connector *connector,
- read_block_fn read_block,
- void *context)
+static struct edid *_drm_do_get_edid(struct drm_connector *connector,
+ read_block_fn read_block, void *context,
+ size_t *size)
{
enum edid_block_status status;
- int i, invalid_blocks = 0;
+ int i, num_blocks, invalid_blocks = 0;
struct edid *edid, *new;
+ size_t alloc_size = EDID_LENGTH;
- edid = drm_get_override_edid(connector);
+ edid = drm_get_override_edid(connector, &alloc_size);
if (edid)
goto ok;
- edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ edid = kmalloc(alloc_size, GFP_KERNEL);
if (!edid)
return NULL;
@@ -2169,12 +2324,14 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
if (!edid_extension_block_count(edid))
goto ok;
- new = krealloc(edid, edid_size(edid), GFP_KERNEL);
+ alloc_size = edid_size(edid);
+ new = krealloc(edid, alloc_size, GFP_KERNEL);
if (!new)
goto fail;
edid = new;
- for (i = 1; i < edid_block_count(edid); i++) {
+ num_blocks = edid_block_count(edid);
+ for (i = 1; i < num_blocks; i++) {
void *block = (void *)edid_block_data(edid, i);
status = edid_block_read(block, i, read_block, context);
@@ -2185,25 +2342,182 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
if (status == EDID_BLOCK_READ_FAIL)
goto fail;
invalid_blocks++;
+ } else if (i == 1) {
+ /*
+ * If the first EDID extension is a CTA extension, and
+ * the first Data Block is HF-EEODB, override the
+ * extension block count.
+ *
+ * Note: HF-EEODB could specify a smaller extension
+ * count too, but we can't risk allocating a smaller
+ * amount.
+ */
+ int eeodb = edid_hfeeodb_block_count(edid);
+
+ if (eeodb > num_blocks) {
+ num_blocks = eeodb;
+ alloc_size = edid_size_by_blocks(num_blocks);
+ new = krealloc(edid, alloc_size, GFP_KERNEL);
+ if (!new)
+ goto fail;
+ edid = new;
+ }
}
}
if (invalid_blocks) {
- connector_bad_edid(connector, edid, edid_block_count(edid));
+ connector_bad_edid(connector, edid, num_blocks);
- edid = edid_filter_invalid_blocks(edid, invalid_blocks);
+ edid = edid_filter_invalid_blocks(edid, &alloc_size);
}
ok:
+ if (size)
+ *size = alloc_size;
+
return edid;
fail:
kfree(edid);
return NULL;
}
+
+/**
+ * drm_do_get_edid - get EDID data using a custom EDID block read function
+ * @connector: connector we're probing
+ * @read_block: EDID block read function
+ * @context: private data passed to the block read function
+ *
+ * When the I2C adapter connected to the DDC bus is hidden behind a device that
+ * exposes a different interface to read EDID blocks this function can be used
+ * to get EDID data using a custom block read function.
+ *
+ * As in the general case the DDC bus is accessible by the kernel at the I2C
+ * level, drivers must make all reasonable efforts to expose it as an I2C
+ * adapter and use drm_get_edid() instead of abusing this function.
+ *
+ * The EDID may be overridden using debugfs override_edid or firmware EDID
+ * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * order. Having either of them bypasses actual EDID reads.
+ *
+ * Return: Pointer to valid EDID or NULL if we couldn't find any.
+ */
+struct edid *drm_do_get_edid(struct drm_connector *connector,
+ read_block_fn read_block,
+ void *context)
+{
+ return _drm_do_get_edid(connector, read_block, context, NULL);
+}
EXPORT_SYMBOL_GPL(drm_do_get_edid);
/**
+ * drm_edid_raw - Get a pointer to the raw EDID data.
+ * @drm_edid: drm_edid container
+ *
+ * Get a pointer to the raw EDID data.
+ *
+ * This is for transition only. Avoid using this like the plague.
+ *
+ * Return: Pointer to raw EDID data.
+ */
+const struct edid *drm_edid_raw(const struct drm_edid *drm_edid)
+{
+ if (!drm_edid || !drm_edid->size)
+ return NULL;
+
+ /*
+ * Do not return pointers where relying on EDID extension count would
+ * lead to buffer overflow.
+ */
+ if (WARN_ON(edid_size(drm_edid->edid) > drm_edid->size))
+ return NULL;
+
+ return drm_edid->edid;
+}
+EXPORT_SYMBOL(drm_edid_raw);
+
+/* Allocate struct drm_edid container *without* duplicating the edid data */
+static const struct drm_edid *_drm_edid_alloc(const void *edid, size_t size)
+{
+ struct drm_edid *drm_edid;
+
+ if (!edid || !size || size < EDID_LENGTH)
+ return NULL;
+
+ drm_edid = kzalloc(sizeof(*drm_edid), GFP_KERNEL);
+ if (drm_edid) {
+ drm_edid->edid = edid;
+ drm_edid->size = size;
+ }
+
+ return drm_edid;
+}
+
+/**
+ * drm_edid_alloc - Allocate a new drm_edid container
+ * @edid: Pointer to raw EDID data
+ * @size: Size of memory allocated for EDID
+ *
+ * Allocate a new drm_edid container. Do not calculate edid size from edid, pass
+ * the actual size that has been allocated for the data. There is no validation
+ * of the raw EDID data against the size, but at least the EDID base block must
+ * fit in the buffer.
+ *
+ * The returned pointer must be freed using drm_edid_free().
+ *
+ * Return: drm_edid container, or NULL on errors
+ */
+const struct drm_edid *drm_edid_alloc(const void *edid, size_t size)
+{
+ const struct drm_edid *drm_edid;
+
+ if (!edid || !size || size < EDID_LENGTH)
+ return NULL;
+
+ edid = kmemdup(edid, size, GFP_KERNEL);
+ if (!edid)
+ return NULL;
+
+ drm_edid = _drm_edid_alloc(edid, size);
+ if (!drm_edid)
+ kfree(edid);
+
+ return drm_edid;
+}
+EXPORT_SYMBOL(drm_edid_alloc);
+
+/**
+ * drm_edid_dup - Duplicate a drm_edid container
+ * @drm_edid: EDID to duplicate
+ *
+ * The returned pointer must be freed using drm_edid_free().
+ *
+ * Returns: drm_edid container copy, or NULL on errors
+ */
+const struct drm_edid *drm_edid_dup(const struct drm_edid *drm_edid)
+{
+ if (!drm_edid)
+ return NULL;
+
+ return drm_edid_alloc(drm_edid->edid, drm_edid->size);
+}
+EXPORT_SYMBOL(drm_edid_dup);
+
+/**
+ * drm_edid_free - Free the drm_edid container
+ * @drm_edid: EDID to free
+ */
+void drm_edid_free(const struct drm_edid *drm_edid)
+{
+ if (!drm_edid)
+ return;
+
+ kfree(drm_edid->edid);
+ kfree(drm_edid);
+}
+EXPORT_SYMBOL(drm_edid_free);
+
+/**
* drm_probe_ddc() - probe DDC presence
* @adapter: I2C adapter to probe
*
@@ -2239,12 +2553,118 @@ struct edid *drm_get_edid(struct drm_connector *connector,
if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter))
return NULL;
- edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
+ edid = _drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter, NULL);
drm_connector_update_edid_property(connector, edid);
return edid;
}
EXPORT_SYMBOL(drm_get_edid);
+/**
+ * drm_edid_read_custom - Read EDID data using given EDID block read function
+ * @connector: Connector to use
+ * @read_block: EDID block read function
+ * @context: Private data passed to the block read function
+ *
+ * When the I2C adapter connected to the DDC bus is hidden behind a device that
+ * exposes a different interface to read EDID blocks this function can be used
+ * to get EDID data using a custom block read function.
+ *
+ * As in the general case the DDC bus is accessible by the kernel at the I2C
+ * level, drivers must make all reasonable efforts to expose it as an I2C
+ * adapter and use drm_edid_read() or drm_edid_read_ddc() instead of abusing
+ * this function.
+ *
+ * The EDID may be overridden using debugfs override_edid or firmware EDID
+ * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * order. Having either of them bypasses actual EDID reads.
+ *
+ * The returned pointer must be freed using drm_edid_free().
+ *
+ * Return: Pointer to EDID, or NULL if probe/read failed.
+ */
+const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector,
+ read_block_fn read_block,
+ void *context)
+{
+ const struct drm_edid *drm_edid;
+ struct edid *edid;
+ size_t size = 0;
+
+ edid = _drm_do_get_edid(connector, read_block, context, &size);
+ if (!edid)
+ return NULL;
+
+ /* Sanity check for now */
+ drm_WARN_ON(connector->dev, !size);
+
+ drm_edid = _drm_edid_alloc(edid, size);
+ if (!drm_edid)
+ kfree(edid);
+
+ return drm_edid;
+}
+EXPORT_SYMBOL(drm_edid_read_custom);
+
+/**
+ * drm_edid_read_ddc - Read EDID data using given I2C adapter
+ * @connector: Connector to use
+ * @adapter: I2C adapter to use for DDC
+ *
+ * Read EDID using the given I2C adapter.
+ *
+ * The EDID may be overridden using debugfs override_edid or firmware EDID
+ * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * order. Having either of them bypasses actual EDID reads.
+ *
+ * Prefer initializing connector->ddc with drm_connector_init_with_ddc() and
+ * using drm_edid_read() instead of this function.
+ *
+ * The returned pointer must be freed using drm_edid_free().
+ *
+ * Return: Pointer to EDID, or NULL if probe/read failed.
+ */
+const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ const struct drm_edid *drm_edid;
+
+ if (connector->force == DRM_FORCE_OFF)
+ return NULL;
+
+ if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter))
+ return NULL;
+
+ drm_edid = drm_edid_read_custom(connector, drm_do_probe_ddc_edid, adapter);
+
+ /* Note: Do *not* call connector updates here. */
+
+ return drm_edid;
+}
+EXPORT_SYMBOL(drm_edid_read_ddc);
+
+/**
+ * drm_edid_read - Read EDID data using connector's I2C adapter
+ * @connector: Connector to use
+ *
+ * Read EDID using the connector's I2C adapter.
+ *
+ * The EDID may be overridden using debugfs override_edid or firmware EDID
+ * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * order. Having either of them bypasses actual EDID reads.
+ *
+ * The returned pointer must be freed using drm_edid_free().
+ *
+ * Return: Pointer to EDID, or NULL if probe/read failed.
+ */
+const struct drm_edid *drm_edid_read(struct drm_connector *connector)
+{
+ if (drm_WARN_ON(connector->dev, !connector->ddc))
+ return NULL;
+
+ return drm_edid_read_ddc(connector, connector->ddc);
+}
+EXPORT_SYMBOL(drm_edid_read);
+
static u32 edid_extract_panel_id(const struct edid *edid)
{
/*
@@ -2362,13 +2782,13 @@ EXPORT_SYMBOL(drm_edid_duplicate);
/**
* edid_get_quirks - return quirk flags for a given EDID
- * @edid: EDID to process
+ * @drm_edid: EDID to process
*
* This tells subsequent routines what fixes they need to apply.
*/
-static u32 edid_get_quirks(const struct edid *edid)
+static u32 edid_get_quirks(const struct drm_edid *drm_edid)
{
- u32 panel_id = edid_extract_panel_id(edid);
+ u32 panel_id = edid_extract_panel_id(drm_edid->edid);
const struct edid_quirk *quirk;
int i;
@@ -2523,20 +2943,21 @@ vtb_for_each_detailed_block(const u8 *ext, detailed_cb *cb, void *closure)
cb((const struct detailed_timing *)(det_base + 18 * i), closure);
}
-static void
-drm_for_each_detailed_block(const struct edid *edid, detailed_cb *cb, void *closure)
+static void drm_for_each_detailed_block(const struct drm_edid *drm_edid,
+ detailed_cb *cb, void *closure)
{
+ struct drm_edid_iter edid_iter;
+ const u8 *ext;
int i;
- if (edid == NULL)
+ if (!drm_edid)
return;
for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
- cb(&(edid->detailed_timings[i]), closure);
-
- for (i = 0; i < edid_extension_block_count(edid); i++) {
- const u8 *ext = edid_extension_block_data(edid, i);
+ cb(&drm_edid->edid->detailed_timings[i], closure);
+ drm_edid_iter_begin(drm_edid, &edid_iter);
+ drm_edid_iter_for_each(ext, &edid_iter) {
switch (*ext) {
case CEA_EXT:
cea_for_each_detailed_block(ext, cb, closure);
@@ -2548,6 +2969,7 @@ drm_for_each_detailed_block(const struct edid *edid, detailed_cb *cb, void *clos
break;
}
}
+ drm_edid_iter_end(&edid_iter);
}
static void
@@ -2568,16 +2990,16 @@ is_rb(const struct detailed_timing *descriptor, void *data)
/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
static bool
-drm_monitor_supports_rb(const struct edid *edid)
+drm_monitor_supports_rb(const struct drm_edid *drm_edid)
{
- if (edid->revision >= 4) {
+ if (drm_edid->edid->revision >= 4) {
bool ret = false;
- drm_for_each_detailed_block(edid, is_rb, &ret);
+ drm_for_each_detailed_block(drm_edid, is_rb, &ret);
return ret;
}
- return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+ return ((drm_edid->edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
}
static void
@@ -2596,11 +3018,11 @@ find_gtf2(const struct detailed_timing *descriptor, void *data)
/* Secondary GTF curve kicks in above some break frequency */
static int
-drm_gtf2_hbreak(const struct edid *edid)
+drm_gtf2_hbreak(const struct drm_edid *drm_edid)
{
const struct detailed_timing *descriptor = NULL;
- drm_for_each_detailed_block(edid, find_gtf2, &descriptor);
+ drm_for_each_detailed_block(drm_edid, find_gtf2, &descriptor);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.hfreq_start_khz) != 12);
@@ -2608,11 +3030,11 @@ drm_gtf2_hbreak(const struct edid *edid)
}
static int
-drm_gtf2_2c(const struct edid *edid)
+drm_gtf2_2c(const struct drm_edid *drm_edid)
{
const struct detailed_timing *descriptor = NULL;
- drm_for_each_detailed_block(edid, find_gtf2, &descriptor);
+ drm_for_each_detailed_block(drm_edid, find_gtf2, &descriptor);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.c) != 13);
@@ -2620,11 +3042,11 @@ drm_gtf2_2c(const struct edid *edid)
}
static int
-drm_gtf2_m(const struct edid *edid)
+drm_gtf2_m(const struct drm_edid *drm_edid)
{
const struct detailed_timing *descriptor = NULL;
- drm_for_each_detailed_block(edid, find_gtf2, &descriptor);
+ drm_for_each_detailed_block(drm_edid, find_gtf2, &descriptor);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.m) != 14);
@@ -2632,11 +3054,11 @@ drm_gtf2_m(const struct edid *edid)
}
static int
-drm_gtf2_k(const struct edid *edid)
+drm_gtf2_k(const struct drm_edid *drm_edid)
{
const struct detailed_timing *descriptor = NULL;
- drm_for_each_detailed_block(edid, find_gtf2, &descriptor);
+ drm_for_each_detailed_block(drm_edid, find_gtf2, &descriptor);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.k) != 16);
@@ -2644,11 +3066,11 @@ drm_gtf2_k(const struct edid *edid)
}
static int
-drm_gtf2_2j(const struct edid *edid)
+drm_gtf2_2j(const struct drm_edid *drm_edid)
{
const struct detailed_timing *descriptor = NULL;
- drm_for_each_detailed_block(edid, find_gtf2, &descriptor);
+ drm_for_each_detailed_block(drm_edid, find_gtf2, &descriptor);
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.gtf2.j) != 17);
@@ -2656,12 +3078,14 @@ drm_gtf2_2j(const struct edid *edid)
}
/* Get standard timing level (CVT/GTF/DMT). */
-static int standard_timing_level(const struct edid *edid)
+static int standard_timing_level(const struct drm_edid *drm_edid)
{
+ const struct edid *edid = drm_edid->edid;
+
if (edid->revision >= 2) {
if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
return LEVEL_CVT;
- if (drm_gtf2_hbreak(edid))
+ if (drm_gtf2_hbreak(drm_edid))
return LEVEL_GTF2;
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
return LEVEL_GTF;
@@ -2693,9 +3117,9 @@ static int drm_mode_hsync(const struct drm_display_mode *mode)
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
*/
-static struct drm_display_mode *
-drm_mode_std(struct drm_connector *connector, const struct edid *edid,
- const struct std_timing *t)
+static struct drm_display_mode *drm_mode_std(struct drm_connector *connector,
+ const struct drm_edid *drm_edid,
+ const struct std_timing *t)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *m, *mode = NULL;
@@ -2705,7 +3129,7 @@ drm_mode_std(struct drm_connector *connector, const struct edid *edid,
>> EDID_TIMING_ASPECT_SHIFT;
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
- int timing_level = standard_timing_level(edid);
+ int timing_level = standard_timing_level(drm_edid);
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
@@ -2716,7 +3140,7 @@ drm_mode_std(struct drm_connector *connector, const struct edid *edid,
vrefresh_rate = vfreq + 60;
/* the vdisplay is calculated based on the aspect ratio */
if (aspect_ratio == 0) {
- if (edid->revision < 3)
+ if (drm_edid->edid->revision < 3)
vsize = hsize;
else
vsize = (hsize * 10) / 16;
@@ -2759,7 +3183,7 @@ drm_mode_std(struct drm_connector *connector, const struct edid *edid,
}
/* check whether it can be found in default mode table */
- if (drm_monitor_supports_rb(edid)) {
+ if (drm_monitor_supports_rb(drm_edid)) {
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
true);
if (mode)
@@ -2785,14 +3209,14 @@ drm_mode_std(struct drm_connector *connector, const struct edid *edid,
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (!mode)
return NULL;
- if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(drm_edid)) {
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
- drm_gtf2_m(edid),
- drm_gtf2_2c(edid),
- drm_gtf2_k(edid),
- drm_gtf2_2j(edid));
+ drm_gtf2_m(drm_edid),
+ drm_gtf2_2c(drm_edid),
+ drm_gtf2_k(drm_edid),
+ drm_gtf2_2j(drm_edid));
}
break;
case LEVEL_CVT:
@@ -2851,7 +3275,7 @@ drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
* drm_display_mode.
*/
static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
- const struct edid *edid,
+ const struct drm_edid *drm_edid,
const struct detailed_timing *timing,
u32 quirks)
{
@@ -2939,8 +3363,8 @@ set_size:
}
if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
- mode->width_mm = edid->width_cm * 10;
- mode->height_mm = edid->height_cm * 10;
+ mode->width_mm = drm_edid->edid->width_cm * 10;
+ mode->height_mm = drm_edid->edid->height_cm * 10;
}
mode->type = DRM_MODE_TYPE_DRIVER;
@@ -2998,10 +3422,11 @@ range_pixel_clock(const struct edid *edid, const u8 *t)
return t[9] * 10000 + 5001;
}
-static bool
-mode_in_range(const struct drm_display_mode *mode, const struct edid *edid,
- const struct detailed_timing *timing)
+static bool mode_in_range(const struct drm_display_mode *mode,
+ const struct drm_edid *drm_edid,
+ const struct detailed_timing *timing)
{
+ const struct edid *edid = drm_edid->edid;
u32 max_clock;
const u8 *t = (const u8 *)timing;
@@ -3020,7 +3445,7 @@ mode_in_range(const struct drm_display_mode *mode, const struct edid *edid,
if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
return false;
- if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(drm_edid))
return false;
return true;
@@ -3044,16 +3469,16 @@ static bool valid_inferred_mode(const struct drm_connector *connector,
return ok;
}
-static int
-drm_dmt_modes_for_range(struct drm_connector *connector, const struct edid *edid,
- const struct detailed_timing *timing)
+static int drm_dmt_modes_for_range(struct drm_connector *connector,
+ const struct drm_edid *drm_edid,
+ const struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
- if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+ if (mode_in_range(drm_dmt_modes + i, drm_edid, timing) &&
valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
@@ -3079,9 +3504,9 @@ void drm_mode_fixup_1366x768(struct drm_display_mode *mode)
}
}
-static int
-drm_gtf_modes_for_range(struct drm_connector *connector, const struct edid *edid,
- const struct detailed_timing *timing)
+static int drm_gtf_modes_for_range(struct drm_connector *connector,
+ const struct drm_edid *drm_edid,
+ const struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
@@ -3095,7 +3520,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, const struct edid *edid
return modes;
drm_mode_fixup_1366x768(newmode);
- if (!mode_in_range(newmode, edid, timing) ||
+ if (!mode_in_range(newmode, drm_edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
@@ -3108,14 +3533,14 @@ drm_gtf_modes_for_range(struct drm_connector *connector, const struct edid *edid
return modes;
}
-static int
-drm_cvt_modes_for_range(struct drm_connector *connector, const struct edid *edid,
- const struct detailed_timing *timing)
+static int drm_cvt_modes_for_range(struct drm_connector *connector,
+ const struct drm_edid *drm_edid,
+ const struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
- bool rb = drm_monitor_supports_rb(edid);
+ bool rb = drm_monitor_supports_rb(drm_edid);
for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
@@ -3125,7 +3550,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, const struct edid *edid
return modes;
drm_mode_fixup_1366x768(newmode);
- if (!mode_in_range(newmode, edid, timing) ||
+ if (!mode_in_range(newmode, drm_edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
@@ -3149,25 +3574,25 @@ do_inferred_modes(const struct detailed_timing *timing, void *c)
return;
closure->modes += drm_dmt_modes_for_range(closure->connector,
- closure->edid,
+ closure->drm_edid,
timing);
- if (!version_greater(closure->edid, 1, 1))
+ if (!version_greater(closure->drm_edid, 1, 1))
return; /* GTF not defined yet */
switch (range->flags) {
case 0x02: /* secondary gtf, XXX could do more */
case 0x00: /* default gtf */
closure->modes += drm_gtf_modes_for_range(closure->connector,
- closure->edid,
+ closure->drm_edid,
timing);
break;
case 0x04: /* cvt, only in 1.4+ */
- if (!version_greater(closure->edid, 1, 3))
+ if (!version_greater(closure->drm_edid, 1, 3))
break;
closure->modes += drm_cvt_modes_for_range(closure->connector,
- closure->edid,
+ closure->drm_edid,
timing);
break;
case 0x01: /* just the ranges, no formula */
@@ -3176,16 +3601,16 @@ do_inferred_modes(const struct detailed_timing *timing, void *c)
}
}
-static int
-add_inferred_modes(struct drm_connector *connector, const struct edid *edid)
+static int add_inferred_modes(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
struct detailed_mode_closure closure = {
.connector = connector,
- .edid = edid,
+ .drm_edid = drm_edid,
};
- if (version_greater(edid, 1, 0))
- drm_for_each_detailed_block(edid, do_inferred_modes, &closure);
+ if (version_greater(drm_edid, 1, 0))
+ drm_for_each_detailed_block(drm_edid, do_inferred_modes, &closure);
return closure.modes;
}
@@ -3235,17 +3660,18 @@ do_established_modes(const struct detailed_timing *timing, void *c)
* bitmap of the supported "established modes" list (defined above). Tease them
* out and add them to the global modes list.
*/
-static int
-add_established_modes(struct drm_connector *connector, const struct edid *edid)
+static int add_established_modes(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
struct drm_device *dev = connector->dev;
+ const struct edid *edid = drm_edid->edid;
unsigned long est_bits = edid->established_timings.t1 |
(edid->established_timings.t2 << 8) |
((edid->established_timings.mfg_rsvd & 0x80) << 9);
int i, modes = 0;
struct detailed_mode_closure closure = {
.connector = connector,
- .edid = edid,
+ .drm_edid = drm_edid,
};
for (i = 0; i <= EDID_EST_TIMINGS; i++) {
@@ -3260,8 +3686,8 @@ add_established_modes(struct drm_connector *connector, const struct edid *edid)
}
}
- if (version_greater(edid, 1, 0))
- drm_for_each_detailed_block(edid, do_established_modes,
+ if (version_greater(drm_edid, 1, 0))
+ drm_for_each_detailed_block(drm_edid, do_established_modes,
&closure);
return modes + closure.modes;
@@ -3273,7 +3699,6 @@ do_standard_modes(const struct detailed_timing *timing, void *c)
struct detailed_mode_closure *closure = c;
const struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_connector *connector = closure->connector;
- const struct edid *edid = closure->edid;
int i;
if (!is_display_descriptor(timing, EDID_DETAIL_STD_MODES))
@@ -3283,7 +3708,7 @@ do_standard_modes(const struct detailed_timing *timing, void *c)
const struct std_timing *std = &data->data.timings[i];
struct drm_display_mode *newmode;
- newmode = drm_mode_std(connector, edid, std);
+ newmode = drm_mode_std(connector, closure->drm_edid, std);
if (newmode) {
drm_mode_probed_add(connector, newmode);
closure->modes++;
@@ -3296,28 +3721,28 @@ do_standard_modes(const struct detailed_timing *timing, void *c)
* using the appropriate standard (DMT, GTF, or CVT). Grab them from EDID and
* add them to the list.
*/
-static int
-add_standard_modes(struct drm_connector *connector, const struct edid *edid)
+static int add_standard_modes(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
int i, modes = 0;
struct detailed_mode_closure closure = {
.connector = connector,
- .edid = edid,
+ .drm_edid = drm_edid,
};
for (i = 0; i < EDID_STD_TIMINGS; i++) {
struct drm_display_mode *newmode;
- newmode = drm_mode_std(connector, edid,
- &edid->standard_timings[i]);
+ newmode = drm_mode_std(connector, drm_edid,
+ &drm_edid->edid->standard_timings[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
- if (version_greater(edid, 1, 0))
- drm_for_each_detailed_block(edid, do_standard_modes,
+ if (version_greater(drm_edid, 1, 0))
+ drm_for_each_detailed_block(drm_edid, do_standard_modes,
&closure);
/* XXX should also look for standard codes in VTB blocks */
@@ -3389,15 +3814,15 @@ do_cvt_mode(const struct detailed_timing *timing, void *c)
}
static int
-add_cvt_modes(struct drm_connector *connector, const struct edid *edid)
+add_cvt_modes(struct drm_connector *connector, const struct drm_edid *drm_edid)
{
struct detailed_mode_closure closure = {
.connector = connector,
- .edid = edid,
+ .drm_edid = drm_edid,
};
- if (version_greater(edid, 1, 2))
- drm_for_each_detailed_block(edid, do_cvt_mode, &closure);
+ if (version_greater(drm_edid, 1, 2))
+ drm_for_each_detailed_block(drm_edid, do_cvt_mode, &closure);
/* XXX should also look for CVT codes in VTB blocks */
@@ -3416,7 +3841,7 @@ do_detailed_mode(const struct detailed_timing *timing, void *c)
return;
newmode = drm_mode_detailed(closure->connector->dev,
- closure->edid, timing,
+ closure->drm_edid, timing,
closure->quirks);
if (!newmode)
return;
@@ -3439,38 +3864,44 @@ do_detailed_mode(const struct detailed_timing *timing, void *c)
/*
* add_detailed_modes - Add modes from detailed timings
* @connector: attached connector
- * @edid: EDID block to scan
+ * @drm_edid: EDID block to scan
* @quirks: quirks to apply
*/
-static int
-add_detailed_modes(struct drm_connector *connector, const struct edid *edid,
- u32 quirks)
+static int add_detailed_modes(struct drm_connector *connector,
+ const struct drm_edid *drm_edid, u32 quirks)
{
struct detailed_mode_closure closure = {
.connector = connector,
- .edid = edid,
+ .drm_edid = drm_edid,
.preferred = true,
.quirks = quirks,
};
- if (closure.preferred && !version_greater(edid, 1, 3))
+ if (closure.preferred && !version_greater(drm_edid, 1, 3))
closure.preferred =
- (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ (drm_edid->edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
- drm_for_each_detailed_block(edid, do_detailed_mode, &closure);
+ drm_for_each_detailed_block(drm_edid, do_detailed_mode, &closure);
return closure.modes;
}
-#define AUDIO_BLOCK 0x01
-#define VIDEO_BLOCK 0x02
-#define VENDOR_BLOCK 0x03
-#define SPEAKER_BLOCK 0x04
-#define HDR_STATIC_METADATA_BLOCK 0x6
-#define USE_EXTENDED_TAG 0x07
-#define EXT_VIDEO_CAPABILITY_BLOCK 0x00
-#define EXT_VIDEO_DATA_BLOCK_420 0x0E
-#define EXT_VIDEO_CAP_BLOCK_Y420CMDB 0x0F
+/* CTA-861-H Table 60 - CTA Tag Codes */
+#define CTA_DB_AUDIO 1
+#define CTA_DB_VIDEO 2
+#define CTA_DB_VENDOR 3
+#define CTA_DB_SPEAKER 4
+#define CTA_DB_EXTENDED_TAG 7
+
+/* CTA-861-H Table 62 - CTA Extended Tag Codes */
+#define CTA_EXT_DB_VIDEO_CAP 0
+#define CTA_EXT_DB_VENDOR 1
+#define CTA_EXT_DB_HDR_STATIC_METADATA 6
+#define CTA_EXT_DB_420_VIDEO_DATA 14
+#define CTA_EXT_DB_420_VIDEO_CAP_MAP 15
+#define CTA_EXT_DB_HF_EEODB 0x78
+#define CTA_EXT_DB_HF_SCDB 0x79
+
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
@@ -3478,25 +3909,27 @@ add_detailed_modes(struct drm_connector *connector, const struct edid *edid,
/*
* Search EDID for CEA extension block.
+ *
+ * FIXME: Prefer not returning pointers to raw EDID data.
*/
-const u8 *drm_find_edid_extension(const struct edid *edid,
+const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid,
int ext_id, int *ext_index)
{
const u8 *edid_ext = NULL;
int i;
/* No EDID or EDID extensions */
- if (!edid || !edid_extension_block_count(edid))
+ if (!drm_edid || !drm_edid_extension_block_count(drm_edid))
return NULL;
/* Find CEA extension */
- for (i = *ext_index; i < edid_extension_block_count(edid); i++) {
- edid_ext = edid_extension_block_data(edid, i);
+ for (i = *ext_index; i < drm_edid_extension_block_count(drm_edid); i++) {
+ edid_ext = drm_edid_extension_block_data(drm_edid, i);
if (edid_block_tag(edid_ext) == ext_id)
break;
}
- if (i >= edid_extension_block_count(edid))
+ if (i >= drm_edid_extension_block_count(drm_edid))
return NULL;
*ext_index = i + 1;
@@ -3504,30 +3937,29 @@ const u8 *drm_find_edid_extension(const struct edid *edid,
return edid_ext;
}
-static const u8 *drm_find_cea_extension(const struct edid *edid)
+/* Return true if the EDID has a CTA extension or a DisplayID CTA data block */
+static bool drm_edid_has_cta_extension(const struct drm_edid *drm_edid)
{
const struct displayid_block *block;
struct displayid_iter iter;
- const u8 *cea;
int ext_index = 0;
+ bool found = false;
/* Look for a top level CEA extension block */
- /* FIXME: make callers iterate through multiple CEA ext blocks? */
- cea = drm_find_edid_extension(edid, CEA_EXT, &ext_index);
- if (cea)
- return cea;
+ if (drm_find_edid_extension(drm_edid, CEA_EXT, &ext_index))
+ return true;
/* CEA blocks can also be found embedded in a DisplayID block */
- displayid_iter_edid_begin(edid, &iter);
+ displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_CTA) {
- cea = (const u8 *)block;
+ found = true;
break;
}
}
displayid_iter_end(&iter);
- return cea;
+ return found;
}
static __always_inline const struct drm_display_mode *cea_mode_for_vic(u8 vic)
@@ -3792,16 +4224,16 @@ static bool drm_valid_hdmi_vic(u8 vic)
return vic > 0 && vic < ARRAY_SIZE(edid_4k_modes);
}
-static int
-add_alternate_cea_modes(struct drm_connector *connector, const struct edid *edid)
+static int add_alternate_cea_modes(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *tmp;
LIST_HEAD(list);
int modes = 0;
- /* Don't add CEA modes if the CEA extension block is missing */
- if (!drm_find_cea_extension(edid))
+ /* Don't add CTA modes if the CTA extension block is missing */
+ if (!drm_edid_has_cta_extension(drm_edid))
return 0;
/*
@@ -4283,24 +4715,6 @@ out:
}
static int
-cea_db_payload_len(const u8 *db)
-{
- return db[0] & 0x1f;
-}
-
-static int
-cea_db_extended_tag(const u8 *db)
-{
- return db[1];
-}
-
-static int
-cea_db_tag(const u8 *db)
-{
- return db[0] >> 5;
-}
-
-static int
cea_revision(const u8 *cea)
{
/*
@@ -4313,125 +4727,318 @@ cea_revision(const u8 *cea)
return cea[1];
}
-static int
-cea_db_offsets(const u8 *cea, int *start, int *end)
+/*
+ * CTA Data Block iterator.
+ *
+ * Iterate through all CTA Data Blocks in both EDID CTA Extensions and DisplayID
+ * CTA Data Blocks.
+ *
+ * struct cea_db *db:
+ * struct cea_db_iter iter;
+ *
+ * cea_db_iter_edid_begin(edid, &iter);
+ * cea_db_iter_for_each(db, &iter) {
+ * // do stuff with db
+ * }
+ * cea_db_iter_end(&iter);
+ */
+struct cea_db_iter {
+ struct drm_edid_iter edid_iter;
+ struct displayid_iter displayid_iter;
+
+ /* Current Data Block Collection. */
+ const u8 *collection;
+
+ /* Current Data Block index in current collection. */
+ int index;
+
+ /* End index in current collection. */
+ int end;
+};
+
+/* CTA-861-H section 7.4 CTA Data BLock Collection */
+struct cea_db {
+ u8 tag_length;
+ u8 data[];
+} __packed;
+
+static int cea_db_tag(const struct cea_db *db)
{
- /* DisplayID CTA extension blocks and top-level CEA EDID
- * block header definitions differ in the following bytes:
- * 1) Byte 2 of the header specifies length differently,
- * 2) Byte 3 is only present in the CEA top level block.
- *
- * The different definitions for byte 2 follow.
- *
- * DisplayID CTA extension block defines byte 2 as:
- * Number of payload bytes
- *
- * CEA EDID block defines byte 2 as:
- * Byte number (decimal) within this block where the 18-byte
- * DTDs begin. If no non-DTD data is present in this extension
- * block, the value should be set to 04h (the byte after next).
- * If set to 00h, there are no DTDs present in this block and
- * no non-DTD data.
- */
- if (cea[0] == DATA_BLOCK_CTA) {
- /*
- * for_each_displayid_db() has already verified
- * that these stay within expected bounds.
- */
- *start = 3;
- *end = *start + cea[2];
- } else if (cea[0] == CEA_EXT) {
- /* Data block offset in CEA extension block */
- *start = 4;
- *end = cea[2];
- if (*end == 0)
- *end = 127;
- if (*end < 4 || *end > 127)
- return -ERANGE;
- } else {
- return -EOPNOTSUPP;
- }
+ return db->tag_length >> 5;
+}
- return 0;
+static int cea_db_payload_len(const void *_db)
+{
+ /* FIXME: Transition to passing struct cea_db * everywhere. */
+ const struct cea_db *db = _db;
+
+ return db->tag_length & 0x1f;
}
-static bool cea_db_is_hdmi_vsdb(const u8 *db)
+static const void *cea_db_data(const struct cea_db *db)
{
- if (cea_db_tag(db) != VENDOR_BLOCK)
- return false;
+ return db->data;
+}
- if (cea_db_payload_len(db) < 5)
- return false;
+static bool cea_db_is_extended_tag(const struct cea_db *db, int tag)
+{
+ return cea_db_tag(db) == CTA_DB_EXTENDED_TAG &&
+ cea_db_payload_len(db) >= 1 &&
+ db->data[0] == tag;
+}
- return oui(db[3], db[2], db[1]) == HDMI_IEEE_OUI;
+static bool cea_db_is_vendor(const struct cea_db *db, int vendor_oui)
+{
+ const u8 *data = cea_db_data(db);
+
+ return cea_db_tag(db) == CTA_DB_VENDOR &&
+ cea_db_payload_len(db) >= 3 &&
+ oui(data[2], data[1], data[0]) == vendor_oui;
}
-static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
+static void cea_db_iter_edid_begin(const struct drm_edid *drm_edid,
+ struct cea_db_iter *iter)
{
- if (cea_db_tag(db) != VENDOR_BLOCK)
- return false;
+ memset(iter, 0, sizeof(*iter));
- if (cea_db_payload_len(db) < 7)
- return false;
+ drm_edid_iter_begin(drm_edid, &iter->edid_iter);
+ displayid_iter_edid_begin(drm_edid, &iter->displayid_iter);
+}
+
+static const struct cea_db *
+__cea_db_iter_current_block(const struct cea_db_iter *iter)
+{
+ const struct cea_db *db;
+
+ if (!iter->collection)
+ return NULL;
+
+ db = (const struct cea_db *)&iter->collection[iter->index];
+
+ if (iter->index + sizeof(*db) <= iter->end &&
+ iter->index + sizeof(*db) + cea_db_payload_len(db) <= iter->end)
+ return db;
- return oui(db[3], db[2], db[1]) == HDMI_FORUM_IEEE_OUI;
+ return NULL;
}
-static bool cea_db_is_microsoft_vsdb(const u8 *db)
+/*
+ * References:
+ * - CTA-861-H section 7.3.3 CTA Extension Version 3
+ */
+static int cea_db_collection_size(const u8 *cta)
{
- if (cea_db_tag(db) != VENDOR_BLOCK)
- return false;
+ u8 d = cta[2];
- if (cea_db_payload_len(db) != 21)
- return false;
+ if (d < 4 || d > 127)
+ return 0;
- return oui(db[3], db[2], db[1]) == MICROSOFT_IEEE_OUI;
+ return d - 4;
}
-static bool cea_db_is_vcdb(const u8 *db)
+/*
+ * References:
+ * - VESA E-EDID v1.4
+ * - CTA-861-H section 7.3.3 CTA Extension Version 3
+ */
+static const void *__cea_db_iter_edid_next(struct cea_db_iter *iter)
{
- if (cea_db_tag(db) != USE_EXTENDED_TAG)
- return false;
+ const u8 *ext;
- if (cea_db_payload_len(db) != 2)
- return false;
+ drm_edid_iter_for_each(ext, &iter->edid_iter) {
+ int size;
- if (cea_db_extended_tag(db) != EXT_VIDEO_CAPABILITY_BLOCK)
- return false;
+ /* Only support CTA Extension revision 3+ */
+ if (ext[0] != CEA_EXT || cea_revision(ext) < 3)
+ continue;
- return true;
+ size = cea_db_collection_size(ext);
+ if (!size)
+ continue;
+
+ iter->index = 4;
+ iter->end = iter->index + size;
+
+ return ext;
+ }
+
+ return NULL;
}
-static bool cea_db_is_y420cmdb(const u8 *db)
+/*
+ * References:
+ * - DisplayID v1.3 Appendix C: CEA Data Block within a DisplayID Data Block
+ * - DisplayID v2.0 section 4.10 CTA DisplayID Data Block
+ *
+ * Note that the above do not specify any connection between DisplayID Data
+ * Block revision and CTA Extension versions.
+ */
+static const void *__cea_db_iter_displayid_next(struct cea_db_iter *iter)
{
- if (cea_db_tag(db) != USE_EXTENDED_TAG)
- return false;
+ const struct displayid_block *block;
- if (!cea_db_payload_len(db))
- return false;
+ displayid_iter_for_each(block, &iter->displayid_iter) {
+ if (block->tag != DATA_BLOCK_CTA)
+ continue;
- if (cea_db_extended_tag(db) != EXT_VIDEO_CAP_BLOCK_Y420CMDB)
- return false;
+ /*
+ * The displayid iterator has already verified the block bounds
+ * in displayid_iter_block().
+ */
+ iter->index = sizeof(*block);
+ iter->end = iter->index + block->num_bytes;
- return true;
+ return block;
+ }
+
+ return NULL;
}
-static bool cea_db_is_y420vdb(const u8 *db)
+static const struct cea_db *__cea_db_iter_next(struct cea_db_iter *iter)
{
- if (cea_db_tag(db) != USE_EXTENDED_TAG)
- return false;
+ const struct cea_db *db;
- if (!cea_db_payload_len(db))
- return false;
+ if (iter->collection) {
+ /* Current collection should always be valid. */
+ db = __cea_db_iter_current_block(iter);
+ if (WARN_ON(!db)) {
+ iter->collection = NULL;
+ return NULL;
+ }
- if (cea_db_extended_tag(db) != EXT_VIDEO_DATA_BLOCK_420)
- return false;
+ /* Next block in CTA Data Block Collection */
+ iter->index += sizeof(*db) + cea_db_payload_len(db);
- return true;
+ db = __cea_db_iter_current_block(iter);
+ if (db)
+ return db;
+ }
+
+ for (;;) {
+ /*
+ * Find the next CTA Data Block Collection. First iterate all
+ * the EDID CTA Extensions, then all the DisplayID CTA blocks.
+ *
+ * Per DisplayID v1.3 Appendix B: DisplayID as an EDID
+ * Extension, it's recommended that DisplayID extensions are
+ * exposed after all of the CTA Extensions.
+ */
+ iter->collection = __cea_db_iter_edid_next(iter);
+ if (!iter->collection)
+ iter->collection = __cea_db_iter_displayid_next(iter);
+
+ if (!iter->collection)
+ return NULL;
+
+ db = __cea_db_iter_current_block(iter);
+ if (db)
+ return db;
+ }
+}
+
+#define cea_db_iter_for_each(__db, __iter) \
+ while (((__db) = __cea_db_iter_next(__iter)))
+
+static void cea_db_iter_end(struct cea_db_iter *iter)
+{
+ displayid_iter_end(&iter->displayid_iter);
+ drm_edid_iter_end(&iter->edid_iter);
+
+ memset(iter, 0, sizeof(*iter));
+}
+
+static bool cea_db_is_hdmi_vsdb(const struct cea_db *db)
+{
+ return cea_db_is_vendor(db, HDMI_IEEE_OUI) &&
+ cea_db_payload_len(db) >= 5;
+}
+
+static bool cea_db_is_hdmi_forum_vsdb(const struct cea_db *db)
+{
+ return cea_db_is_vendor(db, HDMI_FORUM_IEEE_OUI) &&
+ cea_db_payload_len(db) >= 7;
+}
+
+static bool cea_db_is_hdmi_forum_eeodb(const void *db)
+{
+ return cea_db_is_extended_tag(db, CTA_EXT_DB_HF_EEODB) &&
+ cea_db_payload_len(db) >= 2;
+}
+
+static bool cea_db_is_microsoft_vsdb(const struct cea_db *db)
+{
+ return cea_db_is_vendor(db, MICROSOFT_IEEE_OUI) &&
+ cea_db_payload_len(db) == 21;
}
-#define for_each_cea_db(cea, i, start, end) \
- for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+static bool cea_db_is_vcdb(const struct cea_db *db)
+{
+ return cea_db_is_extended_tag(db, CTA_EXT_DB_VIDEO_CAP) &&
+ cea_db_payload_len(db) == 2;
+}
+
+static bool cea_db_is_hdmi_forum_scdb(const struct cea_db *db)
+{
+ return cea_db_is_extended_tag(db, CTA_EXT_DB_HF_SCDB) &&
+ cea_db_payload_len(db) >= 7;
+}
+
+static bool cea_db_is_y420cmdb(const struct cea_db *db)
+{
+ return cea_db_is_extended_tag(db, CTA_EXT_DB_420_VIDEO_CAP_MAP);
+}
+
+static bool cea_db_is_y420vdb(const struct cea_db *db)
+{
+ return cea_db_is_extended_tag(db, CTA_EXT_DB_420_VIDEO_DATA);
+}
+
+static bool cea_db_is_hdmi_hdr_metadata_block(const struct cea_db *db)
+{
+ return cea_db_is_extended_tag(db, CTA_EXT_DB_HDR_STATIC_METADATA) &&
+ cea_db_payload_len(db) >= 3;
+}
+
+/*
+ * Get the HF-EEODB override extension block count from EDID.
+ *
+ * The passed in EDID may be partially read, as long as it has at least two
+ * blocks (base block and one extension block) if EDID extension count is > 0.
+ *
+ * Note that this is *not* how you should parse CTA Data Blocks in general; this
+ * is only to handle partially read EDIDs. Normally, use the CTA Data Block
+ * iterators instead.
+ *
+ * References:
+ * - HDMI 2.1 section 10.3.6 HDMI Forum EDID Extension Override Data Block
+ */
+static int edid_hfeeodb_extension_block_count(const struct edid *edid)
+{
+ const u8 *cta;
+
+ /* No extensions according to base block, no HF-EEODB. */
+ if (!edid_extension_block_count(edid))
+ return 0;
+
+ /* HF-EEODB is always in the first EDID extension block only */
+ cta = edid_extension_block_data(edid, 0);
+ if (edid_block_tag(cta) != CEA_EXT || cea_revision(cta) < 3)
+ return 0;
+
+ /* Need to have the data block collection, and at least 3 bytes. */
+ if (cea_db_collection_size(cta) < 3)
+ return 0;
+
+ /*
+ * Sinks that include the HF-EEODB in their E-EDID shall include one and
+ * only one instance of the HF-EEODB in the E-EDID, occupying bytes 4
+ * through 6 of Block 1 of the E-EDID.
+ */
+ if (!cea_db_is_hdmi_forum_eeodb(&cta[4]))
+ return 0;
+
+ return cta[4 + 2];
+}
static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector,
const u8 *db)
@@ -4473,49 +5080,44 @@ static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector,
hdmi->y420_cmdb_map = map;
}
-static int
-add_cea_modes(struct drm_connector *connector, const struct edid *edid)
+static int add_cea_modes(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
- const u8 *cea = drm_find_cea_extension(edid);
- const u8 *db, *hdmi = NULL, *video = NULL;
- u8 dbl, hdmi_len, video_len = 0;
+ const struct cea_db *db;
+ struct cea_db_iter iter;
int modes = 0;
- if (cea && cea_revision(cea) >= 3) {
- int i, start, end;
-
- if (cea_db_offsets(cea, &start, &end))
- return 0;
-
- for_each_cea_db(cea, i, start, end) {
- db = &cea[i];
- dbl = cea_db_payload_len(db);
-
- if (cea_db_tag(db) == VIDEO_BLOCK) {
- video = db + 1;
- video_len = dbl;
- modes += do_cea_modes(connector, video, dbl);
- } else if (cea_db_is_hdmi_vsdb(db)) {
- hdmi = db;
- hdmi_len = dbl;
- } else if (cea_db_is_y420vdb(db)) {
- const u8 *vdb420 = &db[2];
-
- /* Add 4:2:0(only) modes present in EDID */
- modes += do_y420vdb_modes(connector,
- vdb420,
- dbl - 1);
- }
+ cea_db_iter_edid_begin(drm_edid, &iter);
+ cea_db_iter_for_each(db, &iter) {
+ const u8 *hdmi = NULL, *video = NULL;
+ u8 hdmi_len = 0, video_len = 0;
+
+ if (cea_db_tag(db) == CTA_DB_VIDEO) {
+ video = cea_db_data(db);
+ video_len = cea_db_payload_len(db);
+ modes += do_cea_modes(connector, video, video_len);
+ } else if (cea_db_is_hdmi_vsdb(db)) {
+ /* FIXME: Switch to use cea_db_data() */
+ hdmi = (const u8 *)db;
+ hdmi_len = cea_db_payload_len(db);
+ } else if (cea_db_is_y420vdb(db)) {
+ const u8 *vdb420 = cea_db_data(db) + 1;
+
+ /* Add 4:2:0(only) modes present in EDID */
+ modes += do_y420vdb_modes(connector, vdb420,
+ cea_db_payload_len(db) - 1);
}
- }
- /*
- * We parse the HDMI VSDB after having added the cea modes as we will
- * be patching their flags when the sink supports stereo 3D.
- */
- if (hdmi)
- modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
- video_len);
+ /*
+ * We parse the HDMI VSDB after having added the cea modes as we
+ * will be patching their flags when the sink supports stereo
+ * 3D.
+ */
+ if (hdmi)
+ modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len,
+ video, video_len);
+ }
+ cea_db_iter_end(&iter);
return modes;
}
@@ -4563,20 +5165,6 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
mode->clock = clock;
}
-static bool cea_db_is_hdmi_hdr_metadata_block(const u8 *db)
-{
- if (cea_db_tag(db) != USE_EXTENDED_TAG)
- return false;
-
- if (db[1] != HDR_STATIC_METADATA_BLOCK)
- return false;
-
- if (cea_db_payload_len(db) < 3)
- return false;
-
- return true;
-}
-
static uint8_t eotf_supported(const u8 *edid_ext)
{
return edid_ext[2] &
@@ -4654,15 +5242,15 @@ monitor_name(const struct detailed_timing *timing, void *data)
*res = timing->data.other_data.data.str.str;
}
-static int get_monitor_name(const struct edid *edid, char name[13])
+static int get_monitor_name(const struct drm_edid *drm_edid, char name[13])
{
const char *edid_name = NULL;
int mnl;
- if (!edid || !name)
+ if (!drm_edid || !name)
return 0;
- drm_for_each_detailed_block(edid, monitor_name, &edid_name);
+ drm_for_each_detailed_block(drm_edid, monitor_name, &edid_name);
for (mnl = 0; edid_name && mnl < 13; mnl++) {
if (edid_name[mnl] == 0x0a)
break;
@@ -4682,14 +5270,22 @@ static int get_monitor_name(const struct edid *edid, char name[13])
*/
void drm_edid_get_monitor_name(const struct edid *edid, char *name, int bufsize)
{
- int name_length;
- char buf[13];
+ int name_length = 0;
if (bufsize <= 0)
return;
- name_length = min(get_monitor_name(edid, buf), bufsize - 1);
- memcpy(name, buf, name_length);
+ if (edid) {
+ char buf[13];
+ struct drm_edid drm_edid = {
+ .edid = edid,
+ .size = edid_size(edid),
+ };
+
+ name_length = min(get_monitor_name(&drm_edid, buf), bufsize - 1);
+ memcpy(name, buf, name_length);
+ }
+
name[name_length] = '\0';
}
EXPORT_SYMBOL(drm_edid_get_monitor_name);
@@ -4709,82 +5305,70 @@ static void clear_eld(struct drm_connector *connector)
/*
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
- * @edid: EDID to parse
+ * @drm_edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
* HDCP and Port_ID ELD fields are left for the graphics driver to fill in.
*/
static void drm_edid_to_eld(struct drm_connector *connector,
- const struct edid *edid)
+ const struct drm_edid *drm_edid)
{
+ const struct drm_display_info *info = &connector->display_info;
+ const struct cea_db *db;
+ struct cea_db_iter iter;
uint8_t *eld = connector->eld;
- const u8 *cea;
- const u8 *db;
int total_sad_count = 0;
int mnl;
- int dbl;
clear_eld(connector);
- if (!edid)
+ if (!drm_edid)
return;
- cea = drm_find_cea_extension(edid);
- if (!cea) {
- DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
- return;
- }
-
- mnl = get_monitor_name(edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
+ mnl = get_monitor_name(drm_edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
DRM_DEBUG_KMS("ELD monitor %s\n", &eld[DRM_ELD_MONITOR_NAME_STRING]);
- eld[DRM_ELD_CEA_EDID_VER_MNL] = cea[1] << DRM_ELD_CEA_EDID_VER_SHIFT;
+ eld[DRM_ELD_CEA_EDID_VER_MNL] = info->cea_rev << DRM_ELD_CEA_EDID_VER_SHIFT;
eld[DRM_ELD_CEA_EDID_VER_MNL] |= mnl;
eld[DRM_ELD_VER] = DRM_ELD_VER_CEA861D;
- eld[DRM_ELD_MANUFACTURER_NAME0] = edid->mfg_id[0];
- eld[DRM_ELD_MANUFACTURER_NAME1] = edid->mfg_id[1];
- eld[DRM_ELD_PRODUCT_CODE0] = edid->prod_code[0];
- eld[DRM_ELD_PRODUCT_CODE1] = edid->prod_code[1];
+ eld[DRM_ELD_MANUFACTURER_NAME0] = drm_edid->edid->mfg_id[0];
+ eld[DRM_ELD_MANUFACTURER_NAME1] = drm_edid->edid->mfg_id[1];
+ eld[DRM_ELD_PRODUCT_CODE0] = drm_edid->edid->prod_code[0];
+ eld[DRM_ELD_PRODUCT_CODE1] = drm_edid->edid->prod_code[1];
- if (cea_revision(cea) >= 3) {
- int i, start, end;
+ cea_db_iter_edid_begin(drm_edid, &iter);
+ cea_db_iter_for_each(db, &iter) {
+ const u8 *data = cea_db_data(db);
+ int len = cea_db_payload_len(db);
int sad_count;
- if (cea_db_offsets(cea, &start, &end)) {
- start = 0;
- end = 0;
- }
-
- for_each_cea_db(cea, i, start, end) {
- db = &cea[i];
- dbl = cea_db_payload_len(db);
-
- switch (cea_db_tag(db)) {
- case AUDIO_BLOCK:
- /* Audio Data Block, contains SADs */
- sad_count = min(dbl / 3, 15 - total_sad_count);
- if (sad_count >= 1)
- memcpy(&eld[DRM_ELD_CEA_SAD(mnl, total_sad_count)],
- &db[1], sad_count * 3);
- total_sad_count += sad_count;
- break;
- case SPEAKER_BLOCK:
- /* Speaker Allocation Data Block */
- if (dbl >= 1)
- eld[DRM_ELD_SPEAKER] = db[1];
- break;
- case VENDOR_BLOCK:
- /* HDMI Vendor-Specific Data Block */
- if (cea_db_is_hdmi_vsdb(db))
- drm_parse_hdmi_vsdb_audio(connector, db);
- break;
- default:
- break;
- }
+ switch (cea_db_tag(db)) {
+ case CTA_DB_AUDIO:
+ /* Audio Data Block, contains SADs */
+ sad_count = min(len / 3, 15 - total_sad_count);
+ if (sad_count >= 1)
+ memcpy(&eld[DRM_ELD_CEA_SAD(mnl, total_sad_count)],
+ data, sad_count * 3);
+ total_sad_count += sad_count;
+ break;
+ case CTA_DB_SPEAKER:
+ /* Speaker Allocation Data Block */
+ if (len >= 1)
+ eld[DRM_ELD_SPEAKER] = data[0];
+ break;
+ case CTA_DB_VENDOR:
+ /* HDMI Vendor-Specific Data Block */
+ if (cea_db_is_hdmi_vsdb(db))
+ drm_parse_hdmi_vsdb_audio(connector, (const u8 *)db);
+ break;
+ default:
+ break;
}
}
+ cea_db_iter_end(&iter);
+
eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= total_sad_count << DRM_ELD_SAD_COUNT_SHIFT;
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
@@ -4800,6 +5384,40 @@ static void drm_edid_to_eld(struct drm_connector *connector,
drm_eld_size(eld), total_sad_count);
}
+static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
+ struct cea_sad **sads)
+{
+ const struct cea_db *db;
+ struct cea_db_iter iter;
+ int count = 0;
+
+ cea_db_iter_edid_begin(drm_edid, &iter);
+ cea_db_iter_for_each(db, &iter) {
+ if (cea_db_tag(db) == CTA_DB_AUDIO) {
+ int j;
+
+ count = cea_db_payload_len(db) / 3; /* SAD is 3B */
+ *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
+ if (!*sads)
+ return -ENOMEM;
+ for (j = 0; j < count; j++) {
+ const u8 *sad = &db->data[j * 3];
+
+ (*sads)[j].format = (sad[0] & 0x78) >> 3;
+ (*sads)[j].channels = sad[0] & 0x7;
+ (*sads)[j].freq = sad[1] & 0x7F;
+ (*sads)[j].byte2 = sad[2];
+ }
+ break;
+ }
+ }
+ cea_db_iter_end(&iter);
+
+ DRM_DEBUG_KMS("Found %d Short Audio Descriptors\n", count);
+
+ return count;
+}
+
/**
* drm_edid_to_sad - extracts SADs from EDID
* @edid: EDID to parse
@@ -4813,53 +5431,37 @@ static void drm_edid_to_eld(struct drm_connector *connector,
*/
int drm_edid_to_sad(const struct edid *edid, struct cea_sad **sads)
{
- int count = 0;
- int i, start, end, dbl;
- const u8 *cea;
-
- cea = drm_find_cea_extension(edid);
- if (!cea) {
- DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
- return 0;
- }
-
- if (cea_revision(cea) < 3) {
- DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return 0;
- }
-
- if (cea_db_offsets(cea, &start, &end)) {
- DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
- return -EPROTO;
- }
-
- for_each_cea_db(cea, i, start, end) {
- const u8 *db = &cea[i];
+ struct drm_edid drm_edid;
- if (cea_db_tag(db) == AUDIO_BLOCK) {
- int j;
+ return _drm_edid_to_sad(drm_edid_legacy_init(&drm_edid, edid), sads);
+}
+EXPORT_SYMBOL(drm_edid_to_sad);
- dbl = cea_db_payload_len(db);
+static int _drm_edid_to_speaker_allocation(const struct drm_edid *drm_edid,
+ u8 **sadb)
+{
+ const struct cea_db *db;
+ struct cea_db_iter iter;
+ int count = 0;
- count = dbl / 3; /* SAD is 3B */
- *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
- if (!*sads)
+ cea_db_iter_edid_begin(drm_edid, &iter);
+ cea_db_iter_for_each(db, &iter) {
+ if (cea_db_tag(db) == CTA_DB_SPEAKER &&
+ cea_db_payload_len(db) == 3) {
+ *sadb = kmemdup(db->data, cea_db_payload_len(db),
+ GFP_KERNEL);
+ if (!*sadb)
return -ENOMEM;
- for (j = 0; j < count; j++) {
- const u8 *sad = &db[1 + j * 3];
-
- (*sads)[j].format = (sad[0] & 0x78) >> 3;
- (*sads)[j].channels = sad[0] & 0x7;
- (*sads)[j].freq = sad[1] & 0x7F;
- (*sads)[j].byte2 = sad[2];
- }
+ count = cea_db_payload_len(db);
break;
}
}
+ cea_db_iter_end(&iter);
+
+ DRM_DEBUG_KMS("Found %d Speaker Allocation Data Blocks\n", count);
return count;
}
-EXPORT_SYMBOL(drm_edid_to_sad);
/**
* drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
@@ -4875,44 +5477,10 @@ EXPORT_SYMBOL(drm_edid_to_sad);
*/
int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb)
{
- int count = 0;
- int i, start, end, dbl;
- const u8 *cea;
-
- cea = drm_find_cea_extension(edid);
- if (!cea) {
- DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
- return 0;
- }
-
- if (cea_revision(cea) < 3) {
- DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return 0;
- }
-
- if (cea_db_offsets(cea, &start, &end)) {
- DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
- return -EPROTO;
- }
-
- for_each_cea_db(cea, i, start, end) {
- const u8 *db = &cea[i];
-
- if (cea_db_tag(db) == SPEAKER_BLOCK) {
- dbl = cea_db_payload_len(db);
+ struct drm_edid drm_edid;
- /* Speaker Allocation Data Block */
- if (dbl == 3) {
- *sadb = kmemdup(&db[1], dbl, GFP_KERNEL);
- if (!*sadb)
- return -ENOMEM;
- count = dbl;
- break;
- }
- }
- }
-
- return count;
+ return _drm_edid_to_speaker_allocation(drm_edid_legacy_init(&drm_edid, edid),
+ sadb);
}
EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
@@ -4957,6 +5525,28 @@ int drm_av_sync_delay(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_av_sync_delay);
+static bool _drm_detect_hdmi_monitor(const struct drm_edid *drm_edid)
+{
+ const struct cea_db *db;
+ struct cea_db_iter iter;
+ bool hdmi = false;
+
+ /*
+ * Because HDMI identifier is in Vendor Specific Block,
+ * search it from all data blocks of CEA extension.
+ */
+ cea_db_iter_edid_begin(drm_edid, &iter);
+ cea_db_iter_for_each(db, &iter) {
+ if (cea_db_is_hdmi_vsdb(db)) {
+ hdmi = true;
+ break;
+ }
+ }
+ cea_db_iter_end(&iter);
+
+ return hdmi;
+}
+
/**
* drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
@@ -4970,29 +5560,53 @@ EXPORT_SYMBOL(drm_av_sync_delay);
*/
bool drm_detect_hdmi_monitor(const struct edid *edid)
{
+ struct drm_edid drm_edid;
+
+ return _drm_detect_hdmi_monitor(drm_edid_legacy_init(&drm_edid, edid));
+}
+EXPORT_SYMBOL(drm_detect_hdmi_monitor);
+
+static bool _drm_detect_monitor_audio(const struct drm_edid *drm_edid)
+{
+ struct drm_edid_iter edid_iter;
+ const struct cea_db *db;
+ struct cea_db_iter iter;
const u8 *edid_ext;
- int i;
- int start_offset, end_offset;
+ bool has_audio = false;
- edid_ext = drm_find_cea_extension(edid);
- if (!edid_ext)
- return false;
+ drm_edid_iter_begin(drm_edid, &edid_iter);
+ drm_edid_iter_for_each(edid_ext, &edid_iter) {
+ if (edid_ext[0] == CEA_EXT) {
+ has_audio = edid_ext[3] & EDID_BASIC_AUDIO;
+ if (has_audio)
+ break;
+ }
+ }
+ drm_edid_iter_end(&edid_iter);
- if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
- return false;
+ if (has_audio) {
+ DRM_DEBUG_KMS("Monitor has basic audio support\n");
+ goto end;
+ }
- /*
- * Because HDMI identifier is in Vendor Specific Block,
- * search it from all data blocks of CEA extension.
- */
- for_each_cea_db(edid_ext, i, start_offset, end_offset) {
- if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
- return true;
+ cea_db_iter_edid_begin(drm_edid, &iter);
+ cea_db_iter_for_each(db, &iter) {
+ if (cea_db_tag(db) == CTA_DB_AUDIO) {
+ const u8 *data = cea_db_data(db);
+ int i;
+
+ for (i = 0; i < cea_db_payload_len(db); i += 3)
+ DRM_DEBUG_KMS("CEA audio format %d\n",
+ (data[i] >> 3) & 0xf);
+ has_audio = true;
+ break;
+ }
}
+ cea_db_iter_end(&iter);
- return false;
+end:
+ return has_audio;
}
-EXPORT_SYMBOL(drm_detect_hdmi_monitor);
/**
* drm_detect_monitor_audio - check monitor audio capability
@@ -5008,37 +5622,9 @@ EXPORT_SYMBOL(drm_detect_hdmi_monitor);
*/
bool drm_detect_monitor_audio(const struct edid *edid)
{
- const u8 *edid_ext;
- int i, j;
- bool has_audio = false;
- int start_offset, end_offset;
-
- edid_ext = drm_find_cea_extension(edid);
- if (!edid_ext)
- goto end;
+ struct drm_edid drm_edid;
- has_audio = (edid_ext[0] == CEA_EXT &&
- (edid_ext[3] & EDID_BASIC_AUDIO) != 0);
-
- if (has_audio) {
- DRM_DEBUG_KMS("Monitor has basic audio support\n");
- goto end;
- }
-
- if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
- goto end;
-
- for_each_cea_db(edid_ext, i, start_offset, end_offset) {
- if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
- has_audio = true;
- for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
- DRM_DEBUG_KMS("CEA audio format %d\n",
- (edid_ext[i + j] >> 3) & 0xf);
- goto end;
- }
- }
-end:
- return has_audio;
+ return _drm_detect_monitor_audio(drm_edid_legacy_init(&drm_edid, edid));
}
EXPORT_SYMBOL(drm_detect_monitor_audio);
@@ -5117,17 +5703,18 @@ static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
hdmi->y420_dc_modes = dc_mask;
}
-static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
- const u8 *hf_vsdb)
+/* Sink Capability Data Structure */
+static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
+ const u8 *hf_scds)
{
struct drm_display_info *display = &connector->display_info;
struct drm_hdmi_info *hdmi = &display->hdmi;
display->has_hdmi_infoframe = true;
- if (hf_vsdb[6] & 0x80) {
+ if (hf_scds[6] & 0x80) {
hdmi->scdc.supported = true;
- if (hf_vsdb[6] & 0x40)
+ if (hf_scds[6] & 0x40)
hdmi->scdc.read_request = true;
}
@@ -5140,9 +5727,9 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
* Lets check it out.
*/
- if (hf_vsdb[5]) {
+ if (hf_scds[5]) {
/* max clock is 5000 KHz times block value */
- u32 max_tmds_clock = hf_vsdb[5] * 5000;
+ u32 max_tmds_clock = hf_scds[5] * 5000;
struct drm_scdc *scdc = &hdmi->scdc;
if (max_tmds_clock > 340000) {
@@ -5155,42 +5742,42 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
scdc->scrambling.supported = true;
/* Few sinks support scrambling for clocks < 340M */
- if ((hf_vsdb[6] & 0x8))
+ if ((hf_scds[6] & 0x8))
scdc->scrambling.low_rates = true;
}
}
- if (hf_vsdb[7]) {
+ if (hf_scds[7]) {
u8 max_frl_rate;
u8 dsc_max_frl_rate;
u8 dsc_max_slices;
struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
- max_frl_rate = (hf_vsdb[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4;
+ max_frl_rate = (hf_scds[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4;
drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes,
&hdmi->max_frl_rate_per_lane);
- hdmi_dsc->v_1p2 = hf_vsdb[11] & DRM_EDID_DSC_1P2;
+ hdmi_dsc->v_1p2 = hf_scds[11] & DRM_EDID_DSC_1P2;
if (hdmi_dsc->v_1p2) {
- hdmi_dsc->native_420 = hf_vsdb[11] & DRM_EDID_DSC_NATIVE_420;
- hdmi_dsc->all_bpp = hf_vsdb[11] & DRM_EDID_DSC_ALL_BPP;
+ hdmi_dsc->native_420 = hf_scds[11] & DRM_EDID_DSC_NATIVE_420;
+ hdmi_dsc->all_bpp = hf_scds[11] & DRM_EDID_DSC_ALL_BPP;
- if (hf_vsdb[11] & DRM_EDID_DSC_16BPC)
+ if (hf_scds[11] & DRM_EDID_DSC_16BPC)
hdmi_dsc->bpc_supported = 16;
- else if (hf_vsdb[11] & DRM_EDID_DSC_12BPC)
+ else if (hf_scds[11] & DRM_EDID_DSC_12BPC)
hdmi_dsc->bpc_supported = 12;
- else if (hf_vsdb[11] & DRM_EDID_DSC_10BPC)
+ else if (hf_scds[11] & DRM_EDID_DSC_10BPC)
hdmi_dsc->bpc_supported = 10;
else
hdmi_dsc->bpc_supported = 0;
- dsc_max_frl_rate = (hf_vsdb[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
+ dsc_max_frl_rate = (hf_scds[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
&hdmi_dsc->max_frl_rate_per_lane);
- hdmi_dsc->total_chunk_kbytes = hf_vsdb[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
+ hdmi_dsc->total_chunk_kbytes = hf_scds[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
- dsc_max_slices = hf_vsdb[12] & DRM_EDID_DSC_MAX_SLICES;
+ dsc_max_slices = hf_scds[12] & DRM_EDID_DSC_MAX_SLICES;
switch (dsc_max_slices) {
case 1:
hdmi_dsc->max_slices = 1;
@@ -5228,7 +5815,7 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
}
}
- drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb);
+ drm_parse_ycbcr420_deep_color_info(connector, hf_scds);
}
static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
@@ -5332,48 +5919,55 @@ static void drm_parse_microsoft_vsdb(struct drm_connector *connector,
}
static void drm_parse_cea_ext(struct drm_connector *connector,
- const struct edid *edid)
+ const struct drm_edid *drm_edid)
{
struct drm_display_info *info = &connector->display_info;
+ struct drm_edid_iter edid_iter;
+ const struct cea_db *db;
+ struct cea_db_iter iter;
const u8 *edid_ext;
- int i, start, end;
- edid_ext = drm_find_cea_extension(edid);
- if (!edid_ext)
- return;
+ drm_edid_iter_begin(drm_edid, &edid_iter);
+ drm_edid_iter_for_each(edid_ext, &edid_iter) {
+ if (edid_ext[0] != CEA_EXT)
+ continue;
- info->cea_rev = edid_ext[1];
+ if (!info->cea_rev)
+ info->cea_rev = edid_ext[1];
- /* The existence of a CEA block should imply RGB support */
- info->color_formats = DRM_COLOR_FORMAT_RGB444;
+ if (info->cea_rev != edid_ext[1])
+ DRM_DEBUG_KMS("CEA extension version mismatch %u != %u\n",
+ info->cea_rev, edid_ext[1]);
- /* CTA DisplayID Data Block does not have byte #3 */
- if (edid_ext[0] == CEA_EXT) {
+ /* The existence of a CTA extension should imply RGB support */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (edid_ext[3] & EDID_CEA_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR444;
if (edid_ext[3] & EDID_CEA_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR422;
}
+ drm_edid_iter_end(&edid_iter);
- if (cea_db_offsets(edid_ext, &start, &end))
- return;
-
- for_each_cea_db(edid_ext, i, start, end) {
- const u8 *db = &edid_ext[i];
+ cea_db_iter_edid_begin(drm_edid, &iter);
+ cea_db_iter_for_each(db, &iter) {
+ /* FIXME: convert parsers to use struct cea_db */
+ const u8 *data = (const u8 *)db;
if (cea_db_is_hdmi_vsdb(db))
- drm_parse_hdmi_vsdb_video(connector, db);
- if (cea_db_is_hdmi_forum_vsdb(db))
- drm_parse_hdmi_forum_vsdb(connector, db);
- if (cea_db_is_microsoft_vsdb(db))
- drm_parse_microsoft_vsdb(connector, db);
- if (cea_db_is_y420cmdb(db))
- drm_parse_y420cmdb_bitmap(connector, db);
- if (cea_db_is_vcdb(db))
- drm_parse_vcdb(connector, db);
- if (cea_db_is_hdmi_hdr_metadata_block(db))
- drm_parse_hdr_metadata_block(connector, db);
+ drm_parse_hdmi_vsdb_video(connector, data);
+ else if (cea_db_is_hdmi_forum_vsdb(db) ||
+ cea_db_is_hdmi_forum_scdb(db))
+ drm_parse_hdmi_forum_scds(connector, data);
+ else if (cea_db_is_microsoft_vsdb(db))
+ drm_parse_microsoft_vsdb(connector, data);
+ else if (cea_db_is_y420cmdb(db))
+ drm_parse_y420cmdb_bitmap(connector, data);
+ else if (cea_db_is_vcdb(db))
+ drm_parse_vcdb(connector, data);
+ else if (cea_db_is_hdmi_hdr_metadata_block(db))
+ drm_parse_hdr_metadata_block(connector, data);
}
+ cea_db_iter_end(&iter);
}
static
@@ -5400,16 +5994,15 @@ void get_monitor_range(const struct detailed_timing *timing,
monitor_range->max_vfreq = range->max_vfreq;
}
-static
-void drm_get_monitor_range(struct drm_connector *connector,
- const struct edid *edid)
+static void drm_get_monitor_range(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
struct drm_display_info *info = &connector->display_info;
- if (!version_greater(edid, 1, 1))
+ if (!version_greater(drm_edid, 1, 1))
return;
- drm_for_each_detailed_block(edid, get_monitor_range,
+ drm_for_each_detailed_block(drm_edid, get_monitor_range,
&info->monitor_range);
DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
@@ -5469,12 +6062,13 @@ static void drm_parse_vesa_mso_data(struct drm_connector *connector,
info->mso_stream_count, info->mso_pixel_overlap);
}
-static void drm_update_mso(struct drm_connector *connector, const struct edid *edid)
+static void drm_update_mso(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
const struct displayid_block *block;
struct displayid_iter iter;
- displayid_iter_edid_begin(edid, &iter);
+ displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_2_VENDOR_SPECIFIC)
drm_parse_vesa_mso_data(connector, block);
@@ -5485,8 +6079,7 @@ static void drm_update_mso(struct drm_connector *connector, const struct edid *e
/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
* all of the values which would have been set from EDID
*/
-void
-drm_reset_display_info(struct drm_connector *connector)
+static void drm_reset_display_info(struct drm_connector *connector)
{
struct drm_display_info *info = &connector->display_info;
@@ -5513,18 +6106,20 @@ drm_reset_display_info(struct drm_connector *connector)
info->mso_pixel_overlap = 0;
}
-u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
+static u32 update_display_info(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
struct drm_display_info *info = &connector->display_info;
+ const struct edid *edid = drm_edid->edid;
- u32 quirks = edid_get_quirks(edid);
+ u32 quirks = edid_get_quirks(drm_edid);
drm_reset_display_info(connector);
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
- drm_get_monitor_range(connector, edid);
+ drm_get_monitor_range(connector, drm_edid);
if (edid->revision < 3)
goto out;
@@ -5533,7 +6128,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
goto out;
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
- drm_parse_cea_ext(connector, edid);
+ drm_parse_cea_ext(connector, drm_edid);
/*
* Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
@@ -5586,7 +6181,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR422;
- drm_update_mso(connector, edid);
+ drm_update_mso(connector, drm_edid);
out:
if (quirks & EDID_QUIRK_NON_DESKTOP) {
@@ -5673,13 +6268,13 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector,
}
static int add_displayid_detailed_modes(struct drm_connector *connector,
- const struct edid *edid)
+ const struct drm_edid *drm_edid)
{
const struct displayid_block *block;
struct displayid_iter iter;
int num_modes = 0;
- displayid_iter_edid_begin(edid, &iter);
+ displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING ||
block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING)
@@ -5690,25 +6285,27 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
return num_modes;
}
-static int drm_edid_connector_update(struct drm_connector *connector,
- const struct edid *edid)
+static int _drm_edid_connector_update(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
int num_modes = 0;
u32 quirks;
- if (edid == NULL) {
+ if (!drm_edid) {
+ drm_reset_display_info(connector);
clear_eld(connector);
return 0;
}
- drm_edid_to_eld(connector, edid);
-
/*
* CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
* To avoid multiple parsing of same block, lets parse that map
* from sink info, before parsing CEA modes.
*/
- quirks = drm_add_display_info(connector, edid);
+ quirks = update_display_info(connector, drm_edid);
+
+ /* Depends on info->cea_rev set by update_display_info() above */
+ drm_edid_to_eld(connector, drm_edid);
/*
* EDID spec says modes should be preferred in this order:
@@ -5724,15 +6321,15 @@ static int drm_edid_connector_update(struct drm_connector *connector,
*
* XXX order for additional mode types in extension blocks?
*/
- num_modes += add_detailed_modes(connector, edid, quirks);
- num_modes += add_cvt_modes(connector, edid);
- num_modes += add_standard_modes(connector, edid);
- num_modes += add_established_modes(connector, edid);
- num_modes += add_cea_modes(connector, edid);
- num_modes += add_alternate_cea_modes(connector, edid);
- num_modes += add_displayid_detailed_modes(connector, edid);
- if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
- num_modes += add_inferred_modes(connector, edid);
+ num_modes += add_detailed_modes(connector, drm_edid, quirks);
+ num_modes += add_cvt_modes(connector, drm_edid);
+ num_modes += add_standard_modes(connector, drm_edid);
+ num_modes += add_established_modes(connector, drm_edid);
+ num_modes += add_cea_modes(connector, drm_edid);
+ num_modes += add_alternate_cea_modes(connector, drm_edid);
+ num_modes += add_displayid_detailed_modes(connector, drm_edid);
+ if (drm_edid->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ num_modes += add_inferred_modes(connector, drm_edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
@@ -5752,6 +6349,156 @@ static int drm_edid_connector_update(struct drm_connector *connector,
return num_modes;
}
+static void _drm_update_tile_info(struct drm_connector *connector,
+ const struct drm_edid *drm_edid);
+
+static int _drm_edid_connector_property_update(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
+{
+ struct drm_device *dev = connector->dev;
+ int ret;
+
+ if (connector->edid_blob_ptr) {
+ const struct edid *old_edid = connector->edid_blob_ptr->data;
+
+ if (old_edid) {
+ if (!drm_edid_are_equal(drm_edid ? drm_edid->edid : NULL, old_edid)) {
+ connector->epoch_counter++;
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] EDID changed, epoch counter %llu\n",
+ connector->base.id, connector->name,
+ connector->epoch_counter);
+ }
+ }
+ }
+
+ ret = drm_property_replace_global_blob(dev,
+ &connector->edid_blob_ptr,
+ drm_edid ? drm_edid->size : 0,
+ drm_edid ? drm_edid->edid : NULL,
+ &connector->base,
+ dev->mode_config.edid_property);
+ if (ret) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] EDID property update failed (%d)\n",
+ connector->base.id, connector->name, ret);
+ goto out;
+ }
+
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.non_desktop_property,
+ connector->display_info.non_desktop);
+ if (ret) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Non-desktop property update failed (%d)\n",
+ connector->base.id, connector->name, ret);
+ goto out;
+ }
+
+ ret = drm_connector_set_tile_property(connector);
+ if (ret) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Tile property update failed (%d)\n",
+ connector->base.id, connector->name, ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * drm_edid_connector_update - Update connector information from EDID
+ * @connector: Connector
+ * @drm_edid: EDID
+ *
+ * Update the connector mode list, display info, ELD, HDR metadata, relevant
+ * properties, etc. from the passed in EDID.
+ *
+ * If EDID is NULL, reset the information.
+ *
+ * Return: The number of modes added or 0 if we couldn't find any.
+ */
+int drm_edid_connector_update(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
+{
+ int count;
+
+ /*
+ * FIXME: Reconcile the differences in override_edid handling between
+ * this and drm_connector_update_edid_property().
+ *
+ * If override_edid is set, and the EDID passed in here originates from
+ * drm_edid_read() and friends, it will be the override EDID, and there
+ * are no issues. drm_connector_update_edid_property() ignoring requests
+ * to set the EDID dates back to a time when override EDID was not
+ * handled at the low level EDID read.
+ *
+ * The only way the EDID passed in here can be different from the
+ * override EDID is when a driver passes in an EDID that does *not*
+ * originate from drm_edid_read() and friends, or passes in a stale
+ * cached version. This, in turn, is a question of when an override EDID
+ * set via debugfs should take effect.
+ */
+
+ count = _drm_edid_connector_update(connector, drm_edid);
+
+ _drm_update_tile_info(connector, drm_edid);
+
+ /* Note: Ignore errors for now. */
+ _drm_edid_connector_property_update(connector, drm_edid);
+
+ return count;
+}
+EXPORT_SYMBOL(drm_edid_connector_update);
+
+static int _drm_connector_update_edid_property(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
+{
+ /* ignore requests to set edid when overridden */
+ if (connector->override_edid)
+ return 0;
+
+ /*
+ * Set the display info, using edid if available, otherwise resetting
+ * the values to defaults. This duplicates the work done in
+ * drm_add_edid_modes, but that function is not consistently called
+ * before this one in all drivers and the computation is cheap enough
+ * that it seems better to duplicate it rather than attempt to ensure
+ * some arbitrary ordering of calls.
+ */
+ if (drm_edid)
+ update_display_info(connector, drm_edid);
+ else
+ drm_reset_display_info(connector);
+
+ _drm_update_tile_info(connector, drm_edid);
+
+ return _drm_edid_connector_property_update(connector, drm_edid);
+}
+
+/**
+ * drm_connector_update_edid_property - update the edid property of a connector
+ * @connector: drm connector
+ * @edid: new value of the edid property
+ *
+ * This function creates a new blob modeset object and assigns its id to the
+ * connector's edid property.
+ * Since we also parse tile information from EDID's displayID block, we also
+ * set the connector's tile property here. See drm_connector_set_tile_property()
+ * for more details.
+ *
+ * This function is deprecated. Use drm_edid_connector_update() instead.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_update_edid_property(struct drm_connector *connector,
+ const struct edid *edid)
+{
+ struct drm_edid drm_edid;
+
+ return _drm_connector_update_edid_property(connector,
+ drm_edid_legacy_init(&drm_edid, edid));
+}
+EXPORT_SYMBOL(drm_connector_update_edid_property);
+
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
@@ -5761,17 +6508,22 @@ static int drm_edid_connector_update(struct drm_connector *connector,
* &drm_display_info structure and ELD in @connector with any information which
* can be derived from the edid.
*
+ * This function is deprecated. Use drm_edid_connector_update() instead.
+ *
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
+ struct drm_edid drm_edid;
+
if (edid && !drm_edid_is_valid(edid)) {
drm_warn(connector->dev, "%s: EDID invalid.\n",
connector->name);
edid = NULL;
}
- return drm_edid_connector_update(connector, edid);
+ return _drm_edid_connector_update(connector,
+ drm_edid_legacy_init(&drm_edid, edid));
}
EXPORT_SYMBOL(drm_add_edid_modes);
@@ -6166,15 +6918,15 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
}
}
-void drm_update_tile_info(struct drm_connector *connector,
- const struct edid *edid)
+static void _drm_update_tile_info(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
const struct displayid_block *block;
struct displayid_iter iter;
connector->has_tile = false;
- displayid_iter_edid_begin(edid, &iter);
+ displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_TILED_DISPLAY)
drm_parse_tiled_block(connector, block);
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 72e982323a5e..a940024c8087 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -226,7 +226,7 @@ void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
container = drmm_kzalloc(dev, size, GFP_KERNEL);
if (!container)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOMEM);
encoder = container + offset;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5ad2b6a2778c..2d4cee6a10ff 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -43,6 +43,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -680,7 +681,11 @@ static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y,
schedule_work(&helper->damage_work);
}
-/* Convert memory region into area of scanlines and pixels per scanline */
+/*
+ * Convert memory region into area of scanlines and pixels per
+ * scanline. The parameters off and len must not reach beyond
+ * the end of the framebuffer.
+ */
static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len,
struct drm_rect *clip)
{
@@ -715,22 +720,29 @@ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off,
*/
void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist)
{
- unsigned long start, end, min, max;
+ unsigned long start, end, min_off, max_off;
struct fb_deferred_io_pageref *pageref;
struct drm_rect damage_area;
- min = ULONG_MAX;
- max = 0;
+ min_off = ULONG_MAX;
+ max_off = 0;
list_for_each_entry(pageref, pagereflist, list) {
start = pageref->offset;
end = start + PAGE_SIZE;
- min = min(min, start);
- max = max(max, end);
+ min_off = min(min_off, start);
+ max_off = max(max_off, end);
}
- if (min >= max)
+ if (min_off >= max_off)
return;
- drm_fb_helper_memory_range_to_clip(info, min, max - min, &damage_area);
+ /*
+ * As we can only track pages, we might reach beyond the end
+ * of the screen and account for non-existing scanlines. Hence,
+ * keep the covered memory area within the screen buffer.
+ */
+ max_off = min(max_off, info->screen_size);
+
+ drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area);
drm_fb_helper_damage(info, damage_area.x1, damage_area.y1,
drm_rect_width(&damage_area),
drm_rect_height(&damage_area));
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index a5026f617739..b6a0110eb64a 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -5,6 +5,7 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -169,8 +170,10 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i);
struct dma_fence *new;
- if (WARN_ON_ONCE(!obj))
- continue;
+ if (!obj) {
+ ret = -EINVAL;
+ goto error;
+ }
ret = dma_resv_get_singleton(obj->resv, usage, &new);
if (ret)
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index f36734c2c9e1..42abee9a0f4f 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -26,12 +26,22 @@
/**
* DOC: cma helpers
*
- * The Contiguous Memory Allocator reserves a pool of memory at early boot
- * that is used to service requests for large blocks of contiguous memory.
+ * The DRM GEM/CMA helpers are a means to provide buffer objects that are
+ * presented to the device as a contiguous chunk of memory. This is useful
+ * for devices that do not support scatter-gather DMA (either directly or
+ * by using an intimately attached IOMMU).
*
- * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
- * objects that are physically contiguous in memory. This is useful for
- * display drivers that are unable to map scattered buffers via an IOMMU.
+ * Despite the name, the DRM GEM/CMA helpers are not hardwired to use the
+ * Contiguous Memory Allocator (CMA).
+ *
+ * For devices that access the memory bus through an (external) IOMMU then
+ * the buffer objects are allocated using a traditional page-based
+ * allocator and may be scattered through physical memory. However they
+ * are contiguous in the IOVA space so appear contiguous to devices using
+ * them.
+ *
+ * For other devices then the helpers rely on CMA to provide buffer
+ * objects that are physically contiguous in memory.
*
* For GEM callback helpers in struct &drm_gem_object functions, see likewise
* named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps
@@ -111,8 +121,14 @@ error:
* @drm: DRM device
* @size: size of the object to allocate
*
- * This function creates a CMA GEM object and allocates a contiguous chunk of
- * memory as backing store.
+ * This function creates a CMA GEM object and allocates memory as backing store.
+ * The allocated memory will occupy a contiguous chunk of bus address space.
+ *
+ * For devices that are directly connected to the memory bus then the allocated
+ * memory will be physically contiguous. For devices that access through an
+ * IOMMU, then the allocated memory is not expected to be physically contiguous
+ * because having contiguous IOVAs is sufficient to meet a devices DMA
+ * requirements.
*
* Returns:
* A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
@@ -162,9 +178,12 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_create);
* @size: size of the object to allocate
* @handle: return location for the GEM handle
*
- * This function creates a CMA GEM object, allocating a physically contiguous
- * chunk of memory as backing store. The GEM object is then added to the list
- * of object associated with the given file and a handle to it is returned.
+ * This function creates a CMA GEM object, allocating a chunk of memory as
+ * backing store. The GEM object is then added to the list of object associated
+ * with the given file and a handle to it is returned.
+ *
+ * The allocated memory will occupy a contiguous chunk of bus address space.
+ * See drm_gem_cma_create() for more details.
*
* Returns:
* A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index f4619803acd0..61339a9cd010 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -53,7 +53,11 @@ MODULE_IMPORT_NS(DMA_BUF);
struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
- if (plane >= ARRAY_SIZE(fb->obj))
+ struct drm_device *dev = fb->dev;
+
+ if (drm_WARN_ON_ONCE(dev, plane >= ARRAY_SIZE(fb->obj)))
+ return NULL;
+ else if (drm_WARN_ON_ONCE(dev, !fb->obj[plane]))
return NULL;
return fb->obj[plane];
@@ -92,9 +96,9 @@ drm_gem_fb_init(struct drm_device *dev,
*/
void drm_gem_fb_destroy(struct drm_framebuffer *fb)
{
- size_t i;
+ unsigned int i;
- for (i = 0; i < ARRAY_SIZE(fb->obj); i++)
+ for (i = 0; i < fb->format->num_planes; i++)
drm_gem_object_put(fb->obj[i]);
drm_framebuffer_cleanup(fb);
@@ -329,24 +333,26 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
* The argument returns the addresses of the data stored in each BO. This
* is different from @map if the framebuffer's offsets field is non-zero.
*
+ * Both, @map and @data, must each refer to arrays with at least
+ * fb->format->num_planes elements.
+ *
* See drm_gem_fb_vunmap() for unmapping.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
-int drm_gem_fb_vmap(struct drm_framebuffer *fb,
- struct iosys_map map[static DRM_FORMAT_MAX_PLANES],
- struct iosys_map data[DRM_FORMAT_MAX_PLANES])
+int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
+ struct iosys_map *data)
{
struct drm_gem_object *obj;
unsigned int i;
int ret;
- for (i = 0; i < DRM_FORMAT_MAX_PLANES; ++i) {
+ for (i = 0; i < fb->format->num_planes; ++i) {
obj = drm_gem_fb_get_obj(fb, i);
if (!obj) {
- iosys_map_clear(&map[i]);
- continue;
+ ret = -EINVAL;
+ goto err_drm_gem_vunmap;
}
ret = drm_gem_vmap(obj, &map[i]);
if (ret)
@@ -354,7 +360,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb,
}
if (data) {
- for (i = 0; i < DRM_FORMAT_MAX_PLANES; ++i) {
+ for (i = 0; i < fb->format->num_planes; ++i) {
memcpy(&data[i], &map[i], sizeof(data[i]));
if (iosys_map_is_null(&data[i]))
continue;
@@ -385,10 +391,9 @@ EXPORT_SYMBOL(drm_gem_fb_vmap);
*
* See drm_gem_fb_vmap() for more information.
*/
-void drm_gem_fb_vunmap(struct drm_framebuffer *fb,
- struct iosys_map map[static DRM_FORMAT_MAX_PLANES])
+void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
{
- unsigned int i = DRM_FORMAT_MAX_PLANES;
+ unsigned int i = fb->format->num_planes;
struct drm_gem_object *obj;
while (i) {
@@ -403,6 +408,28 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);
+static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir,
+ unsigned int num_planes)
+{
+ struct dma_buf_attachment *import_attach;
+ struct drm_gem_object *obj;
+ int ret;
+
+ while (num_planes) {
+ --num_planes;
+ obj = drm_gem_fb_get_obj(fb, num_planes);
+ if (!obj)
+ continue;
+ import_attach = obj->import_attach;
+ if (!import_attach)
+ continue;
+ ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
+ if (ret)
+ drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n",
+ ret, num_planes, dir);
+ }
+}
+
/**
* drm_gem_fb_begin_cpu_access - prepares GEM buffer objects for CPU access
* @fb: the framebuffer
@@ -421,40 +448,27 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct
{
struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
- size_t i;
- int ret, ret2;
+ unsigned int i;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(fb->obj); ++i) {
+ for (i = 0; i < fb->format->num_planes; ++i) {
obj = drm_gem_fb_get_obj(fb, i);
- if (!obj)
- continue;
+ if (!obj) {
+ ret = -EINVAL;
+ goto err___drm_gem_fb_end_cpu_access;
+ }
import_attach = obj->import_attach;
if (!import_attach)
continue;
ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir);
if (ret)
- goto err_dma_buf_end_cpu_access;
+ goto err___drm_gem_fb_end_cpu_access;
}
return 0;
-err_dma_buf_end_cpu_access:
- while (i) {
- --i;
- obj = drm_gem_fb_get_obj(fb, i);
- if (!obj)
- continue;
- import_attach = obj->import_attach;
- if (!import_attach)
- continue;
- ret2 = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
- if (ret2) {
- drm_err(fb->dev,
- "dma_buf_end_cpu_access() failed during error handling: %d\n",
- ret2);
- }
- }
-
+err___drm_gem_fb_end_cpu_access:
+ __drm_gem_fb_end_cpu_access(fb, dir, i);
return ret;
}
EXPORT_SYMBOL(drm_gem_fb_begin_cpu_access);
@@ -472,23 +486,7 @@ EXPORT_SYMBOL(drm_gem_fb_begin_cpu_access);
*/
void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir)
{
- size_t i = ARRAY_SIZE(fb->obj);
- struct dma_buf_attachment *import_attach;
- struct drm_gem_object *obj;
- int ret;
-
- while (i) {
- --i;
- obj = drm_gem_fb_get_obj(fb, i);
- if (!obj)
- continue;
- import_attach = obj->import_attach;
- if (!import_attach)
- continue;
- ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
- if (ret)
- drm_err(fb->dev, "dma_buf_end_cpu_access() failed: %d\n", ret);
- }
+ __drm_gem_fb_end_cpu_access(fb, dir, fb->format->num_planes);
}
EXPORT_SYMBOL(drm_gem_fb_end_cpu_access);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 123045b58fec..d607043716d3 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -9,6 +9,7 @@
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
@@ -630,6 +631,24 @@ EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
* Helpers for struct drm_plane_helper_funcs
*/
+static void __drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ unsigned int num_planes)
+{
+ struct drm_gem_object *obj;
+ struct drm_gem_vram_object *gbo;
+ struct drm_framebuffer *fb = state->fb;
+
+ while (num_planes) {
+ --num_planes;
+ obj = drm_gem_fb_get_obj(fb, num_planes);
+ if (!obj)
+ continue;
+ gbo = drm_gem_vram_of_gem(obj);
+ drm_gem_vram_unpin(gbo);
+ }
+}
+
/**
* drm_gem_vram_plane_helper_prepare_fb() - \
* Implements &struct drm_plane_helper_funcs.prepare_fb
@@ -648,17 +667,22 @@ int
drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- size_t i;
+ struct drm_framebuffer *fb = new_state->fb;
struct drm_gem_vram_object *gbo;
+ struct drm_gem_object *obj;
+ unsigned int i;
int ret;
- if (!new_state->fb)
+ if (!fb)
return 0;
- for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) {
- if (!new_state->fb->obj[i])
- continue;
- gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ for (i = 0; i < fb->format->num_planes; ++i) {
+ obj = drm_gem_fb_get_obj(fb, i);
+ if (!obj) {
+ ret = -EINVAL;
+ goto err_drm_gem_vram_unpin;
+ }
+ gbo = drm_gem_vram_of_gem(obj);
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
goto err_drm_gem_vram_unpin;
@@ -671,11 +695,7 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
return 0;
err_drm_gem_vram_unpin:
- while (i) {
- --i;
- gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
- drm_gem_vram_unpin(gbo);
- }
+ __drm_gem_vram_plane_helper_cleanup_fb(plane, new_state, i);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
@@ -694,18 +714,12 @@ void
drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- size_t i;
- struct drm_gem_vram_object *gbo;
+ struct drm_framebuffer *fb = old_state->fb;
- if (!old_state->fb)
+ if (!fb)
return;
- for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) {
- if (!old_state->fb->obj[i])
- continue;
- gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]);
- drm_gem_vram_unpin(gbo);
- }
+ __drm_gem_vram_plane_helper_cleanup_fb(plane, old_state, fb->format->num_planes);
}
EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 51fcf1298023..8faad23dc1d8 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -83,7 +83,7 @@
*
* 1. Directly call VERSION to get the version and to match against the driver
* name returned by that ioctl. Note that SET_VERSION is not called, which
- * means the the unique name for the master node just opening is _not_ filled
+ * means the unique name for the master node just opening is _not_ filled
* out. This despite that with current drm device nodes are always bound to
* one device, and can't be runtime assigned like with drm 1.0.
* 2. Match driver name. If it mismatches, proceed to the next device node.
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 8be20080cd8d..0bf0fc1abf54 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
+#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include "drm_crtc_helper_internal.h"
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 9314f2ead79f..2f61f53d472f 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -5,6 +5,7 @@
* Copyright 2016 Noralf Trønnes
*/
+#include <linux/backlight.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
@@ -18,6 +19,7 @@
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
@@ -1199,6 +1201,13 @@ int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
size_t chunk;
int ret;
+ /* In __spi_validate, there's a validation that no partial transfers
+ * are accepted (xfer->len % w_size must be zero).
+ * Here we align max_chunk to multiple of 2 (16bits),
+ * to prevent transfers from being rejected.
+ */
+ max_chunk = ALIGN_DOWN(max_chunk, 2);
+
spi_message_init_with_transfers(&m, &tr, 1);
while (len) {
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 37b4b9f0e468..59b34f07cfce 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -25,6 +25,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 14b746f7ba97..304004fb80aa 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -34,6 +34,7 @@
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/export.h>
+#include <linux/fb.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
@@ -41,6 +42,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
#include <drm/drm_modes.h>
#include <drm/drm_print.h>
@@ -1328,6 +1330,10 @@ void drm_mode_prune_invalid(struct drm_device *dev,
list_for_each_entry_safe(mode, t, mode_list, head) {
if (mode->status != MODE_OK) {
list_del(&mode->head);
+ if (mode->type & DRM_MODE_TYPE_USERDEF) {
+ drm_warn(dev, "User-defined mode not supported: "
+ DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+ }
if (verbose) {
drm_mode_debug_printmodeline(mode);
DRM_DEBUG_KMS("Not using %s mode: %s\n",
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index da483125e063..0f08319453b2 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -23,6 +23,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 9a2cfab3a177..7bbcb999bb75 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -2,6 +2,8 @@
#include <linux/component.h>
#include <linux/export.h>
#include <linux/list.h>
+#include <linux/media-bus-format.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <drm/drm_bridge.h>
@@ -430,3 +432,64 @@ int drm_of_lvds_get_data_mapping(const struct device_node *port)
return -EINVAL;
}
EXPORT_SYMBOL_GPL(drm_of_lvds_get_data_mapping);
+
+/**
+ * drm_of_get_data_lanes_count - Get DSI/(e)DP data lane count
+ * @endpoint: DT endpoint node of the DSI/(e)DP source or sink
+ * @min: minimum supported number of data lanes
+ * @max: maximum supported number of data lanes
+ *
+ * Count DT "data-lanes" property elements and check for validity.
+ *
+ * Return:
+ * * min..max - positive integer count of "data-lanes" elements
+ * * -ve - the "data-lanes" property is missing or invalid
+ * * -EINVAL - the "data-lanes" property is unsupported
+ */
+int drm_of_get_data_lanes_count(const struct device_node *endpoint,
+ const unsigned int min, const unsigned int max)
+{
+ int ret;
+
+ ret = of_property_count_u32_elems(endpoint, "data-lanes");
+ if (ret < 0)
+ return ret;
+
+ if (ret < min || ret > max)
+ return -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count);
+
+/**
+ * drm_of_get_data_lanes_count_ep - Get DSI/(e)DP data lane count by endpoint
+ * @port: DT port node of the DSI/(e)DP source or sink
+ * @port_reg: identifier (value of reg property) of the parent port node
+ * @reg: identifier (value of reg property) of the endpoint node
+ * @min: minimum supported number of data lanes
+ * @max: maximum supported number of data lanes
+ *
+ * Count DT "data-lanes" property elements and check for validity.
+ * This variant uses endpoint specifier.
+ *
+ * Return:
+ * * min..max - positive integer count of "data-lanes" elements
+ * * -EINVAL - the "data-mapping" property is unsupported
+ * * -ENODEV - the "data-mapping" property is missing
+ */
+int drm_of_get_data_lanes_count_ep(const struct device_node *port,
+ int port_reg, int reg,
+ const unsigned int min,
+ const unsigned int max)
+{
+ struct device_node *endpoint;
+ int ret;
+
+ endpoint = of_graph_get_endpoint_by_regs(port, port_reg, reg);
+ ret = drm_of_get_data_lanes_count(endpoint, min, max);
+ of_node_put(endpoint);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count_ep);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 4e853acfd1e8..7a9eeed239f3 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -152,6 +152,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO NEXT */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Chuwi HiBook (CWI514) */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
@@ -187,7 +193,7 @@ static const struct dmi_system_id orientation_data[] = {
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
- * GPD Pocket, note that the the DMI data is less generic then
+ * GPD Pocket, note that the DMI data is less generic then
* it seems, devices with a board-vendor of "AMI Corporation"
* are quite rare, as are devices which have both board- *and*
* product-id set to "Default String"
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index e3f09f18110c..a3f180653b8b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -839,7 +839,7 @@ EXPORT_SYMBOL(drm_prime_pages_to_sg);
* @sgt: sg_table describing the buffer to check
*
* This helper calculates the contiguous size in the DMA address space
- * of the the buffer described by the provided sg_table.
+ * of the buffer described by the provided sg_table.
*
* This is useful for implementing
* &drm_gem_object_funcs.gem_prime_import_sg_table.
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 682359512996..bb427c5a4f1f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -354,6 +354,79 @@ drm_helper_probe_detect(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_helper_probe_detect);
+static int drm_helper_probe_get_modes(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ int count;
+
+ count = connector_funcs->get_modes(connector);
+
+ /*
+ * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
+ * override/firmware EDID.
+ */
+ if (count == 0 && connector->status == connector_status_connected)
+ count = drm_add_override_edid_modes(connector);
+
+ return count;
+}
+
+static int __drm_helper_update_and_validate(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+ int mode_flags = 0;
+ int ret;
+
+ drm_connector_list_update(connector);
+
+ if (connector->interlace_allowed)
+ mode_flags |= DRM_MODE_FLAG_INTERLACE;
+ if (connector->doublescan_allowed)
+ mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+ if (connector->stereo_allowed)
+ mode_flags |= DRM_MODE_FLAG_3D_MASK;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->status != MODE_OK)
+ continue;
+
+ mode->status = drm_mode_validate_driver(dev, mode);
+ if (mode->status != MODE_OK)
+ continue;
+
+ mode->status = drm_mode_validate_size(mode, maxX, maxY);
+ if (mode->status != MODE_OK)
+ continue;
+
+ mode->status = drm_mode_validate_flag(mode, mode_flags);
+ if (mode->status != MODE_OK)
+ continue;
+
+ ret = drm_mode_validate_pipeline(mode, connector, ctx,
+ &mode->status);
+ if (ret) {
+ drm_dbg_kms(dev,
+ "drm_mode_validate_pipeline failed: %d\n",
+ ret);
+
+ if (drm_WARN_ON_ONCE(dev, ret != -EDEADLK))
+ mode->status = MODE_ERROR;
+ else
+ return -EDEADLK;
+ }
+
+ if (mode->status != MODE_OK)
+ continue;
+ mode->status = drm_mode_validate_ycbcr420(mode, connector);
+ }
+
+ return 0;
+}
+
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
@@ -418,11 +491,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
- const struct drm_connector_helper_funcs *connector_funcs =
- connector->helper_private;
int count = 0, ret;
- int mode_flags = 0;
- bool verbose_prune = true;
enum drm_connector_status old_status;
struct drm_modeset_acquire_ctx ctx;
@@ -502,74 +571,54 @@ retry:
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, connector->name);
drm_connector_update_edid_property(connector, NULL);
- verbose_prune = false;
- goto prune;
+ drm_mode_prune_invalid(dev, &connector->modes, false);
+ goto exit;
}
- count = (*connector_funcs->get_modes)(connector);
-
- /*
- * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
- * override/firmware EDID.
- */
- if (count == 0 && connector->status == connector_status_connected)
- count = drm_add_override_edid_modes(connector);
+ count = drm_helper_probe_get_modes(connector);
if (count == 0 && (connector->status == connector_status_connected ||
- connector->status == connector_status_unknown))
+ connector->status == connector_status_unknown)) {
count = drm_add_modes_noedid(connector, 1024, 768);
- count += drm_helper_probe_add_cmdline_mode(connector);
- if (count == 0)
- goto prune;
- drm_connector_list_update(connector);
-
- if (connector->interlace_allowed)
- mode_flags |= DRM_MODE_FLAG_INTERLACE;
- if (connector->doublescan_allowed)
- mode_flags |= DRM_MODE_FLAG_DBLSCAN;
- if (connector->stereo_allowed)
- mode_flags |= DRM_MODE_FLAG_3D_MASK;
-
- list_for_each_entry(mode, &connector->modes, head) {
- if (mode->status != MODE_OK)
- continue;
-
- mode->status = drm_mode_validate_driver(dev, mode);
- if (mode->status != MODE_OK)
- continue;
-
- mode->status = drm_mode_validate_size(mode, maxX, maxY);
- if (mode->status != MODE_OK)
- continue;
-
- mode->status = drm_mode_validate_flag(mode, mode_flags);
- if (mode->status != MODE_OK)
- continue;
+ /*
+ * Section 4.2.2.6 (EDID Corruption Detection) of the DP 1.4a
+ * Link CTS specifies that 640x480 (the official "failsafe"
+ * mode) needs to be the default if there's no EDID.
+ */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
+ drm_set_preferred_mode(connector, 640, 480);
+ }
+ count += drm_helper_probe_add_cmdline_mode(connector);
+ if (count != 0) {
+ ret = __drm_helper_update_and_validate(connector, maxX, maxY, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ }
- ret = drm_mode_validate_pipeline(mode, connector, &ctx,
- &mode->status);
- if (ret) {
- drm_dbg_kms(dev,
- "drm_mode_validate_pipeline failed: %d\n",
- ret);
+ drm_mode_prune_invalid(dev, &connector->modes, true);
- if (drm_WARN_ON_ONCE(dev, ret != -EDEADLK)) {
- mode->status = MODE_ERROR;
- } else {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
+ /*
+ * Displayport spec section 5.2.1.2 ("Video Timing Format") says that
+ * all detachable sinks shall support 640x480 @60Hz as a fail safe
+ * mode. If all modes were pruned, perhaps because they need more
+ * lanes or a higher pixel clock than available, at least try to add
+ * in 640x480.
+ */
+ if (list_empty(&connector->modes) &&
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ count = drm_add_modes_noedid(connector, 640, 480);
+ ret = __drm_helper_update_and_validate(connector, maxX, maxY, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
}
-
- if (mode->status != MODE_OK)
- continue;
- mode->status = drm_mode_validate_ycbcr420(mode, connector);
+ drm_mode_prune_invalid(dev, &connector->modes, true);
}
-prune:
- drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
-
+exit:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -964,3 +1013,73 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
return changed;
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+/**
+ * drm_connector_helper_get_modes_from_ddc - Updates the connector's EDID
+ * property from the connector's
+ * DDC channel
+ * @connector: The connector
+ *
+ * Returns:
+ * The number of detected display modes.
+ *
+ * Uses a connector's DDC channel to retrieve EDID data and update the
+ * connector's EDID property and display modes. Drivers can use this
+ * function to implement struct &drm_connector_helper_funcs.get_modes
+ * for connectors with a DDC channel.
+ */
+int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector)
+{
+ struct edid *edid;
+ int count = 0;
+
+ if (!connector->ddc)
+ return 0;
+
+ edid = drm_get_edid(connector, connector->ddc);
+
+ // clears property if EDID is NULL
+ drm_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ count = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(drm_connector_helper_get_modes_from_ddc);
+
+/**
+ * drm_connector_helper_get_modes - Read EDID and update connector.
+ * @connector: The connector
+ *
+ * Read the EDID using drm_edid_read() (which requires that connector->ddc is
+ * set), and update the connector using the EDID.
+ *
+ * This can be used as the "default" connector helper .get_modes() hook if the
+ * driver does not need any special processing. This is sets the example what
+ * custom .get_modes() hooks should do regarding EDID read and connector update.
+ *
+ * Returns: Number of modes.
+ */
+int drm_connector_helper_get_modes(struct drm_connector *connector)
+{
+ const struct drm_edid *drm_edid;
+ int count;
+
+ drm_edid = drm_edid_read(connector);
+
+ /*
+ * Unconditionally update the connector. If the EDID was read
+ * successfully, fill in the connector information derived from the
+ * EDID. Otherwise, if the EDID is NULL, clear the connector
+ * information.
+ */
+ count = drm_edid_connector_update(connector, drm_edid);
+
+ drm_edid_free(drm_edid);
+
+ return count;
+}
+EXPORT_SYMBOL(drm_connector_helper_get_modes);
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 72989ed1baba..36633590ebf3 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -100,14 +100,12 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
- crtc);
- bool has_primary = crtc_state->plane_mask &
- drm_plane_mask(crtc->primary);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ int ret;
- /* We always want to have an active plane with an active CRTC */
- if (has_primary != crtc_state->enable)
- return -EINVAL;
+ ret = drm_atomic_helper_check_crtc_state(crtc_state, false);
+ if (ret)
+ return ret;
return drm_atomic_add_affected_planes(state, crtc);
}
@@ -227,7 +225,7 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ false, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 7e48dcd1bee4..0c2be8360525 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -184,6 +184,7 @@
*/
#include <linux/anon_inodes.h>
+#include <linux/dma-fence-unwrap.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
@@ -853,57 +854,12 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
&args->handle);
}
-
-/*
- * Try to flatten a dma_fence_chain into a dma_fence_array so that it can be
- * added as timeline fence to a chain again.
- */
-static int drm_syncobj_flatten_chain(struct dma_fence **f)
-{
- struct dma_fence_chain *chain = to_dma_fence_chain(*f);
- struct dma_fence *tmp, **fences;
- struct dma_fence_array *array;
- unsigned int count;
-
- if (!chain)
- return 0;
-
- count = 0;
- dma_fence_chain_for_each(tmp, &chain->base)
- ++count;
-
- fences = kmalloc_array(count, sizeof(*fences), GFP_KERNEL);
- if (!fences)
- return -ENOMEM;
-
- count = 0;
- dma_fence_chain_for_each(tmp, &chain->base)
- fences[count++] = dma_fence_get(tmp);
-
- array = dma_fence_array_create(count, fences,
- dma_fence_context_alloc(1),
- 1, false);
- if (!array)
- goto free_fences;
-
- dma_fence_put(*f);
- *f = &array->base;
- return 0;
-
-free_fences:
- while (count--)
- dma_fence_put(fences[count]);
-
- kfree(fences);
- return -ENOMEM;
-}
-
static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
struct drm_syncobj_transfer *args)
{
struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence *fence, *tmp;
struct dma_fence_chain *chain;
- struct dma_fence *fence;
int ret;
timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
@@ -912,13 +868,16 @@ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
}
ret = drm_syncobj_find_fence(file_private, args->src_handle,
args->src_point, args->flags,
- &fence);
+ &tmp);
if (ret)
goto err_put_timeline;
- ret = drm_syncobj_flatten_chain(&fence);
- if (ret)
- goto err_free_fence;
+ fence = dma_fence_unwrap_merge(tmp);
+ dma_fence_put(tmp);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto err_put_timeline;
+ }
chain = dma_fence_chain_alloc();
if (!chain) {
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index e957d4851dc0..f024dc93939e 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -69,7 +69,7 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__mips__)
+ defined(__mips__) || defined(__loongarch__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
tmp = pgprot_noncached(tmp);
else
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index 99fd15d1b366..a031c335bdb9 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -14,6 +14,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_property.h>
#include <drm/drm_writeback.h>
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index b5001db7a95c..8155d7e650f1 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -17,7 +17,9 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include "exynos_drm_crtc.h"
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index c04264f70ad1..7080cf7952ec 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -20,6 +20,7 @@
#include <video/of_videomode.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
@@ -800,31 +801,40 @@ static int exynos7_decon_resume(struct device *dev)
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the pclk [%d]\n",
ret);
- return ret;
+ goto err_pclk_enable;
}
ret = clk_prepare_enable(ctx->aclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the aclk [%d]\n",
ret);
- return ret;
+ goto err_aclk_enable;
}
ret = clk_prepare_enable(ctx->eclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the eclk [%d]\n",
ret);
- return ret;
+ goto err_eclk_enable;
}
ret = clk_prepare_enable(ctx->vclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the vclk [%d]\n",
ret);
- return ret;
+ goto err_vclk_enable;
}
return 0;
+
+err_vclk_enable:
+ clk_disable_unprepare(ctx->eclk);
+err_eclk_enable:
+ clk_disable_unprepare(ctx->aclk);
+err_aclk_enable:
+ clk_disable_unprepare(ctx->pclk);
+err_pclk_enable:
+ return ret;
}
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 27664f663c5a..4e3d3d5f6866 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -10,6 +10,7 @@
#include <linux/component.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 741323a2e6c3..378e5381978f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -7,6 +7,7 @@
* Contacts: Andrzej Hajda <a.hajda@samsung.com>
*/
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 424ea23eec32..16c539657f73 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -177,15 +177,15 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
DRM_COMPONENT_DRIVER
}, {
- DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
- DRM_COMPONENT_DRIVER
- }, {
DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
DRM_COMPONENT_DRIVER
}, {
+ DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
+ DRM_COMPONENT_DRIVER
+ }, {
DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
DRM_COMPONENT_DRIVER
}, {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 79fa3649185c..97f2dee2db29 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 02c97b9ca926..767afd2bfa82 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -15,6 +15,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index d5720fab510c..ae6636e6658e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -21,7 +21,9 @@
#include <video/of_videomode.h>
#include <video/samsung_fimd.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 9ae868935357..ea9f66037600 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -18,6 +18,7 @@
#include <linux/uaccess.h>
+#include <drm/drm_blend.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_mode.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 9e06f8e2a863..09ce28ee08d9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -26,6 +26,7 @@
#include <drm/drm_print.h>
#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
/* Sysreg registers for MIC */
#define DSD_CFG_MUX 0x1004
@@ -100,9 +101,7 @@ struct exynos_mic {
bool i80_mode;
struct videomode vm;
- struct drm_encoder *encoder;
struct drm_bridge bridge;
- struct drm_bridge *next_bridge;
bool enabled;
};
@@ -229,8 +228,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
writel(reg, mic->reg + MIC_OP);
}
-static void mic_disable(struct drm_bridge *bridge) { }
-
static void mic_post_disable(struct drm_bridge *bridge)
{
struct exynos_mic *mic = bridge->driver_private;
@@ -297,34 +294,30 @@ unlock:
mutex_unlock(&mic_mutex);
}
-static void mic_enable(struct drm_bridge *bridge) { }
-
-static int mic_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct exynos_mic *mic = bridge->driver_private;
-
- return drm_bridge_attach(bridge->encoder, mic->next_bridge,
- &mic->bridge, flags);
-}
-
static const struct drm_bridge_funcs mic_bridge_funcs = {
- .disable = mic_disable,
.post_disable = mic_post_disable,
.mode_set = mic_mode_set,
.pre_enable = mic_pre_enable,
- .enable = mic_enable,
- .attach = mic_attach,
};
static int exynos_mic_bind(struct device *dev, struct device *master,
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_LCD);
+ struct drm_encoder *e, *encoder = NULL;
+
+ drm_for_each_encoder(e, drm_dev)
+ if (e->possible_crtcs == drm_crtc_mask(&crtc->base))
+ encoder = e;
+ if (!encoder)
+ return -ENODEV;
mic->bridge.driver_private = mic;
- return 0;
+ return drm_bridge_attach(encoder, &mic->bridge, NULL, 0);
}
static void exynos_mic_unbind(struct device *dev, struct device *master,
@@ -388,7 +381,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_mic *mic;
- struct device_node *remote;
struct resource res;
int ret, i;
@@ -432,16 +424,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
}
}
- remote = of_graph_get_remote_node(dev->of_node, 1, 0);
- mic->next_bridge = of_drm_find_bridge(remote);
- if (IS_ERR(mic->next_bridge)) {
- DRM_DEV_ERROR(dev, "mic: Failed to find next bridge\n");
- ret = PTR_ERR(mic->next_bridge);
- goto err;
- }
-
- of_node_put(remote);
-
platform_set_drvdata(pdev, mic);
mic->bridge.funcs = &mic_bridge_funcs;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index df76bdee7dca..66e5f1e34044 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -7,6 +7,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane_helper.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 3a7851b7dc66..3c049fb658a3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e5662bdcbbde..4d56c8c799c5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e5204be86093..65260a658684 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -25,7 +25,10 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 8fe953d6e0a9..0cd527f0c146 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -12,6 +12,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 6bcd18c63c31..7ff1e5141150 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -192,18 +192,16 @@ static enum drm_connector_status cdv_intel_crt_detect(
static void cdv_intel_crt_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
- struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct gma_i2c_chan *ddc_bus = to_gma_i2c_chan(connector->ddc);
- psb_intel_i2c_destroy(gma_encoder->ddc_bus);
+ gma_i2c_destroy(ddc_bus);
drm_connector_cleanup(connector);
kfree(gma_connector);
}
static int cdv_intel_crt_get_modes(struct drm_connector *connector)
{
- struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- return psb_intel_ddc_get_modes(connector,
- &gma_encoder->ddc_bus->adapter);
+ return psb_intel_ddc_get_modes(connector, connector->ddc);
}
static int cdv_intel_crt_set_property(struct drm_connector *connector,
@@ -245,8 +243,10 @@ void cdv_intel_crt_init(struct drm_device *dev,
struct gma_connector *gma_connector;
struct gma_encoder *gma_encoder;
+ struct gma_i2c_chan *ddc_bus;
struct drm_connector *connector;
struct drm_encoder *encoder;
+ int ret;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder)
@@ -254,25 +254,31 @@ void cdv_intel_crt_init(struct drm_device *dev,
gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
if (!gma_connector)
- goto failed_connector;
+ goto err_free_encoder;
+
+ /* Set up the DDC bus. */
+ ddc_bus = gma_i2c_create(dev, GPIOA, "CRTDDC_A");
+ if (!ddc_bus) {
+ dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n");
+ goto err_free_connector;
+ }
connector = &gma_connector->base;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- drm_connector_init(dev, connector,
- &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &cdv_intel_crt_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &ddc_bus->base);
+ if (ret)
+ goto err_ddc_destroy;
encoder = &gma_encoder->base;
- drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
+ if (ret)
+ goto err_connector_cleanup;
gma_connector_attach_encoder(gma_connector, gma_encoder);
- /* Set up the DDC bus. */
- gma_encoder->ddc_bus = psb_intel_i2c_create(dev, GPIOA, "CRTDDC_A");
- if (!gma_encoder->ddc_bus) {
- dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n");
- goto failed_ddc;
- }
-
gma_encoder->type = INTEL_OUTPUT_ANALOG;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
@@ -282,11 +288,14 @@ void cdv_intel_crt_init(struct drm_device *dev,
&cdv_intel_crt_connector_helper_funcs);
return;
-failed_ddc:
- drm_encoder_cleanup(&gma_encoder->base);
+
+err_connector_cleanup:
drm_connector_cleanup(&gma_connector->base);
+err_ddc_destroy:
+ gma_i2c_destroy(ddc_bus);
+err_free_connector:
kfree(gma_connector);
-failed_connector:
+err_free_encoder:
kfree(gma_encoder);
return;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 9ee99a7d4fbe..bb2e9d64018a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -32,6 +32,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_simple_kms_helper.h>
#include "gma_display.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 8987e555e113..29ef45f14169 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -53,7 +53,6 @@ struct mid_intel_hdmi_priv {
bool has_hdmi_audio;
/* Should set this when detect hotplug */
bool hdmi_device_connected;
- struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
struct drm_device *dev;
};
@@ -130,7 +129,7 @@ static enum drm_connector_status cdv_hdmi_detect(
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
- edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
+ edid = drm_get_edid(connector, connector->ddc);
hdmi_priv->has_hdmi_sink = false;
hdmi_priv->has_hdmi_audio = false;
@@ -208,11 +207,10 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
*/
static int cdv_hdmi_get_modes(struct drm_connector *connector)
{
- struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct edid *edid = NULL;
int ret = 0;
- edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
+ edid = drm_get_edid(connector, connector->ddc);
if (edid) {
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -243,9 +241,9 @@ static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector,
static void cdv_hdmi_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
- struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct gma_i2c_chan *ddc_bus = to_gma_i2c_chan(connector->ddc);
- psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+ gma_i2c_destroy(ddc_bus);
drm_connector_cleanup(connector);
kfree(gma_connector);
}
@@ -278,37 +276,60 @@ void cdv_hdmi_init(struct drm_device *dev,
struct gma_encoder *gma_encoder;
struct gma_connector *gma_connector;
struct drm_connector *connector;
- struct drm_encoder *encoder;
struct mid_intel_hdmi_priv *hdmi_priv;
- int ddc_bus;
+ struct gma_i2c_chan *ddc_bus;
+ int ddc_reg;
+ int ret;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
-
if (!gma_encoder)
return;
- gma_connector = kzalloc(sizeof(struct gma_connector),
- GFP_KERNEL);
-
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
if (!gma_connector)
- goto err_connector;
+ goto err_free_encoder;
hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
-
if (!hdmi_priv)
- goto err_priv;
+ goto err_free_connector;
connector = &gma_connector->base;
connector->polled = DRM_CONNECTOR_POLL_HPD;
gma_connector->save = cdv_hdmi_save;
gma_connector->restore = cdv_hdmi_restore;
- encoder = &gma_encoder->base;
- drm_connector_init(dev, connector,
- &cdv_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_DVID);
+ switch (reg) {
+ case SDVOB:
+ ddc_reg = GPIOE;
+ gma_encoder->ddi_select = DDI0_SELECT;
+ break;
+ case SDVOC:
+ ddc_reg = GPIOD;
+ gma_encoder->ddi_select = DDI1_SELECT;
+ break;
+ default:
+ DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
+ goto err_free_hdmi_priv;
+ }
+
+ ddc_bus = gma_i2c_create(dev, ddc_reg,
+ (reg == SDVOB) ? "HDMIB" : "HDMIC");
+ if (!ddc_bus) {
+ dev_err(dev->dev, "No ddc adapter available!\n");
+ goto err_free_hdmi_priv;
+ }
- drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &cdv_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID,
+ &ddc_bus->base);
+ if (ret)
+ goto err_ddc_destroy;
+
+ ret = drm_simple_encoder_init(dev, &gma_encoder->base,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ goto err_connector_cleanup;
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_HDMI;
@@ -316,7 +337,7 @@ void cdv_hdmi_init(struct drm_device *dev,
hdmi_priv->has_hdmi_sink = false;
gma_encoder->dev_priv = hdmi_priv;
- drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
+ drm_encoder_helper_add(&gma_encoder->base, &cdv_hdmi_helper_funcs);
drm_connector_helper_add(connector,
&cdv_hdmi_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -327,38 +348,17 @@ void cdv_hdmi_init(struct drm_device *dev,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- switch (reg) {
- case SDVOB:
- ddc_bus = GPIOE;
- gma_encoder->ddi_select = DDI0_SELECT;
- break;
- case SDVOC:
- ddc_bus = GPIOD;
- gma_encoder->ddi_select = DDI1_SELECT;
- break;
- default:
- DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
- goto failed_ddc;
- break;
- }
-
- gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
- ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
-
- if (!gma_encoder->i2c_bus) {
- dev_err(dev->dev, "No ddc adapter available!\n");
- goto failed_ddc;
- }
-
- hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
hdmi_priv->dev = dev;
return;
-failed_ddc:
- drm_encoder_cleanup(encoder);
+err_connector_cleanup:
drm_connector_cleanup(connector);
-err_priv:
+err_ddc_destroy:
+ gma_i2c_destroy(ddc_bus);
+err_free_hdmi_priv:
+ kfree(hdmi_priv);
+err_free_connector:
kfree(gma_connector);
-err_connector:
+err_free_encoder:
kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 98d9f5483a7c..be6efcaaa3b3 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -298,11 +298,10 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int ret;
- ret = psb_intel_ddc_get_modes(connector, &gma_encoder->i2c_bus->adapter);
+ ret = psb_intel_ddc_get_modes(connector, connector->ddc);
if (ret)
return ret;
@@ -317,19 +316,13 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
return 0;
}
-/**
- * cdv_intel_lvds_destroy - unregister and free LVDS structures
- * @connector: connector to free
- *
- * Unregister the DDC bus for this connector then free the driver private
- * structure.
- */
static void cdv_intel_lvds_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+ gma_i2c_destroy(to_gma_i2c_chan(connector->ddc));
+ gma_i2c_destroy(gma_encoder->i2c_bus);
drm_connector_cleanup(connector);
kfree(gma_connector);
}
@@ -487,8 +480,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
struct drm_display_mode *scan;
struct drm_crtc *crtc;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct gma_i2c_chan *ddc_bus;
u32 lvds;
int pipe;
+ int ret;
u8 pin;
if (!dev_priv->lvds_enabled_in_vbt)
@@ -508,11 +503,11 @@ void cdv_intel_lvds_init(struct drm_device *dev,
gma_connector = kzalloc(sizeof(struct gma_connector),
GFP_KERNEL);
if (!gma_connector)
- goto failed_connector;
+ goto err_free_encoder;
lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
if (!lvds_priv)
- goto failed_lvds_priv;
+ goto err_free_connector;
gma_encoder->dev_priv = lvds_priv;
@@ -521,12 +516,24 @@ void cdv_intel_lvds_init(struct drm_device *dev,
gma_connector->restore = cdv_intel_lvds_restore;
encoder = &gma_encoder->base;
+ /* Set up the DDC bus. */
+ ddc_bus = gma_i2c_create(dev, GPIOC, "LVDSDDC_C");
+ if (!ddc_bus) {
+ dev_printk(KERN_ERR, dev->dev,
+ "DDC bus registration " "failed.\n");
+ goto err_free_lvds_priv;
+ }
- drm_connector_init(dev, connector,
- &cdv_intel_lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &cdv_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS,
+ &ddc_bus->base);
+ if (ret)
+ goto err_destroy_ddc;
- drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
+ if (ret)
+ goto err_connector_cleanup;
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
@@ -550,13 +557,11 @@ void cdv_intel_lvds_init(struct drm_device *dev,
* Set up I2C bus
* FIXME: distroy i2c_bus when exit
*/
- gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
- GPIOB,
- "LVDSBLC_B");
+ gma_encoder->i2c_bus = gma_i2c_create(dev, GPIOB, "LVDSBLC_B");
if (!gma_encoder->i2c_bus) {
dev_printk(KERN_ERR,
dev->dev, "I2C bus registration failed.\n");
- goto failed_blc_i2c;
+ goto err_encoder_cleanup;
}
gma_encoder->i2c_bus->slave_addr = 0x2C;
dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
@@ -571,23 +576,13 @@ void cdv_intel_lvds_init(struct drm_device *dev,
* if closed, act like it's not there for now
*/
- /* Set up the DDC bus. */
- gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
- GPIOC,
- "LVDSDDC_C");
- if (!gma_encoder->ddc_bus) {
- dev_printk(KERN_ERR, dev->dev,
- "DDC bus registration " "failed.\n");
- goto failed_ddc;
- }
-
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
mutex_lock(&dev->mode_config.mutex);
- psb_intel_ddc_get_modes(connector,
- &gma_encoder->ddc_bus->adapter);
+ psb_intel_ddc_get_modes(connector, &ddc_bus->base);
+
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
@@ -629,7 +624,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
if (!mode_dev->panel_fixed_mode) {
DRM_DEBUG
("Found no modes on the lvds, ignoring the LVDS\n");
- goto failed_find;
+ goto err_unlock;
}
/* setup PWM */
@@ -649,20 +644,19 @@ out:
mutex_unlock(&dev->mode_config.mutex);
return;
-failed_find:
+err_unlock:
mutex_unlock(&dev->mode_config.mutex);
- pr_err("Failed find\n");
- psb_intel_i2c_destroy(gma_encoder->ddc_bus);
-failed_ddc:
- pr_err("Failed DDC\n");
- psb_intel_i2c_destroy(gma_encoder->i2c_bus);
-failed_blc_i2c:
- pr_err("Failed BLC\n");
+ gma_i2c_destroy(gma_encoder->i2c_bus);
+err_encoder_cleanup:
drm_encoder_cleanup(encoder);
+err_connector_cleanup:
drm_connector_cleanup(connector);
+err_destroy_ddc:
+ gma_i2c_destroy(ddc_bus);
+err_free_lvds_priv:
kfree(lvds_priv);
-failed_lvds_priv:
+err_free_connector:
kfree(gma_connector);
-failed_connector:
+err_free_encoder:
kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 0ac6ea5fd3a1..aa3ecf771fd3 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -21,6 +21,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "framebuffer.h"
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 34ec3fca09ba..bd40c040a2c9 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -12,6 +12,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include "framebuffer.h"
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
index 5e1b4d70c317..9d02a7b6d9a3 100644
--- a/drivers/gpu/drm/gma500/intel_i2c.c
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -22,7 +22,7 @@
static int get_clock(void *data)
{
- struct psb_intel_i2c_chan *chan = data;
+ struct gma_i2c_chan *chan = data;
struct drm_device *dev = chan->drm_dev;
u32 val;
@@ -32,7 +32,7 @@ static int get_clock(void *data)
static int get_data(void *data)
{
- struct psb_intel_i2c_chan *chan = data;
+ struct gma_i2c_chan *chan = data;
struct drm_device *dev = chan->drm_dev;
u32 val;
@@ -42,7 +42,7 @@ static int get_data(void *data)
static void set_clock(void *data, int state_high)
{
- struct psb_intel_i2c_chan *chan = data;
+ struct gma_i2c_chan *chan = data;
struct drm_device *dev = chan->drm_dev;
u32 reserved = 0, clock_bits;
@@ -62,7 +62,7 @@ static void set_clock(void *data, int state_high)
static void set_data(void *data, int state_high)
{
- struct psb_intel_i2c_chan *chan = data;
+ struct gma_i2c_chan *chan = data;
struct drm_device *dev = chan->drm_dev;
u32 reserved = 0, data_bits;
@@ -83,7 +83,7 @@ static void set_data(void *data, int state_high)
}
/**
- * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * gma_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
* @dev: DRM device
* @reg: GPIO reg to use
* @name: name for this bus
@@ -102,21 +102,21 @@ static void set_data(void *data, int state_high)
* %GPIOH
* see PRM for details on how these different busses are used.
*/
-struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
- const u32 reg, const char *name)
+struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name)
{
- struct psb_intel_i2c_chan *chan;
+ struct gma_i2c_chan *chan;
- chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+ chan = kzalloc(sizeof(struct gma_i2c_chan), GFP_KERNEL);
if (!chan)
goto out_free;
chan->drm_dev = dev;
chan->reg = reg;
- snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
- chan->adapter.owner = THIS_MODULE;
- chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = dev->dev;
+ snprintf(chan->base.name, I2C_NAME_SIZE, "intel drm %s", name);
+ chan->base.owner = THIS_MODULE;
+ chan->base.algo_data = &chan->algo;
+ chan->base.dev.parent = dev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
@@ -125,9 +125,9 @@ struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
chan->algo.timeout = usecs_to_jiffies(2200);
chan->algo.data = chan;
- i2c_set_adapdata(&chan->adapter, chan);
+ i2c_set_adapdata(&chan->base, chan);
- if (i2c_bit_add_bus(&chan->adapter))
+ if (i2c_bit_add_bus(&chan->base))
goto out_free;
/* JJJ: raise SCL and SDA? */
@@ -143,16 +143,16 @@ out_free:
}
/**
- * psb_intel_i2c_destroy - unregister and free i2c bus resources
+ * gma_i2c_destroy - unregister and free i2c bus resources
* @chan: channel to free
*
* Unregister the adapter from the i2c layer, then free the structure.
*/
-void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
+void gma_i2c_destroy(struct gma_i2c_chan *chan)
{
if (!chan)
return;
- i2c_del_adapter(&chan->adapter);
+ i2c_del_adapter(&chan->base);
kfree(chan);
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 22398d34853a..6004390d647a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -8,6 +8,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include "framebuffer.h"
#include "gem.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index b5946a1cdcd5..95b7cb099e63 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <drm/drm.h>
+#include <drm/drm_edid.h>
#include <drm/drm_simple_kms_helper.h>
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index aed5de8f8245..4d98df189e10 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -13,6 +13,7 @@
#include <asm/intel-mid.h>
+#include <drm/drm_edid.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
@@ -293,12 +294,14 @@ void oaktrail_lvds_init(struct drm_device *dev,
{
struct gma_encoder *gma_encoder;
struct gma_connector *gma_connector;
+ struct gma_i2c_chan *ddc_bus;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct edid *edid;
struct i2c_adapter *i2c_adap;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ int ret;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder)
@@ -306,16 +309,20 @@ void oaktrail_lvds_init(struct drm_device *dev,
gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
if (!gma_connector)
- goto failed_connector;
+ goto err_free_encoder;
connector = &gma_connector->base;
encoder = &gma_encoder->base;
dev_priv->is_lvds_on = true;
- drm_connector_init(dev, connector,
- &psb_intel_lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
+ ret = drm_connector_init(dev, connector,
+ &psb_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret)
+ goto err_free_connector;
- drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
+ if (ret)
+ goto err_connector_cleanup;
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
@@ -353,16 +360,26 @@ void oaktrail_lvds_init(struct drm_device *dev,
edid = NULL;
mutex_lock(&dev->mode_config.mutex);
+
i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
if (i2c_adap)
edid = drm_get_edid(connector, i2c_adap);
+
if (edid == NULL && dev_priv->lpc_gpio_base) {
- oaktrail_lvds_i2c_init(encoder);
- if (gma_encoder->ddc_bus != NULL) {
- i2c_adap = &gma_encoder->ddc_bus->adapter;
+ ddc_bus = oaktrail_lvds_i2c_init(dev);
+ if (!IS_ERR(ddc_bus)) {
+ i2c_adap = &ddc_bus->base;
edid = drm_get_edid(connector, i2c_adap);
}
}
+
+ /*
+ * Due to the logic in probing for i2c buses above we do not know the
+ * i2c_adap until now. Hence we cannot use drm_connector_init_with_ddc()
+ * but must instead set connector->ddc manually here.
+ */
+ connector->ddc = i2c_adap;
+
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
@@ -395,7 +412,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
/* If we still don't have a mode after all that, give up. */
if (!mode_dev->panel_fixed_mode) {
dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
- goto failed_find;
+ goto err_unlock;
}
out:
@@ -403,21 +420,15 @@ out:
return;
-failed_find:
+err_unlock:
mutex_unlock(&dev->mode_config.mutex);
-
- dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
- if (gma_encoder->ddc_bus) {
- psb_intel_i2c_destroy(gma_encoder->ddc_bus);
- gma_encoder->ddc_bus = NULL;
- }
-
-/* failed_ddc: */
-
+ gma_i2c_destroy(to_gma_i2c_chan(connector->ddc));
drm_encoder_cleanup(encoder);
+err_connector_cleanup:
drm_connector_cleanup(connector);
+err_free_connector:
kfree(gma_connector);
-failed_connector:
+err_free_encoder:
kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
index d1ae91fcd224..06b5b2d70d48 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -65,7 +65,7 @@
static int get_clock(void *data)
{
- struct psb_intel_i2c_chan *chan = data;
+ struct gma_i2c_chan *chan = data;
u32 val;
val = LPC_READ_REG(chan, RGIO);
@@ -79,7 +79,7 @@ static int get_clock(void *data)
static int get_data(void *data)
{
- struct psb_intel_i2c_chan *chan = data;
+ struct gma_i2c_chan *chan = data;
u32 val;
val = LPC_READ_REG(chan, RGIO);
@@ -93,7 +93,7 @@ static int get_data(void *data)
static void set_clock(void *data, int state_high)
{
- struct psb_intel_i2c_chan *chan = data;
+ struct gma_i2c_chan *chan = data;
u32 val;
if (state_high) {
@@ -112,7 +112,7 @@ static void set_clock(void *data, int state_high)
static void set_data(void *data, int state_high)
{
- struct psb_intel_i2c_chan *chan = data;
+ struct gma_i2c_chan *chan = data;
u32 val;
if (state_high) {
@@ -129,23 +129,22 @@ static void set_data(void *data, int state_high)
}
}
-void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
+struct gma_i2c_chan *oaktrail_lvds_i2c_init(struct drm_device *dev)
{
- struct drm_device *dev = encoder->dev;
- struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct psb_intel_i2c_chan *chan;
+ struct gma_i2c_chan *chan;
+ int ret;
- chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+ chan = kzalloc(sizeof(struct gma_i2c_chan), GFP_KERNEL);
if (!chan)
- return;
+ return ERR_PTR(-ENOMEM);
chan->drm_dev = dev;
chan->reg = dev_priv->lpc_gpio_base;
- strncpy(chan->adapter.name, "gma500 LPC", I2C_NAME_SIZE - 1);
- chan->adapter.owner = THIS_MODULE;
- chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = dev->dev;
+ strncpy(chan->base.name, "gma500 LPC", I2C_NAME_SIZE - 1);
+ chan->base.owner = THIS_MODULE;
+ chan->base.algo_data = &chan->algo;
+ chan->base.dev.parent = dev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
@@ -154,16 +153,17 @@ void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
chan->algo.timeout = usecs_to_jiffies(2200);
chan->algo.data = chan;
- i2c_set_adapdata(&chan->adapter, chan);
+ i2c_set_adapdata(&chan->base, chan);
set_data(chan, 1);
set_clock(chan, 1);
udelay(50);
- if (i2c_bit_add_bus(&chan->adapter)) {
+ ret = i2c_bit_add_bus(&chan->base);
+ if (ret < 0) {
kfree(chan);
- return;
+ return ERR_PTR(ret);
}
- gma_encoder->ddc_bus = chan;
+ return chan;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 0ddfec1a0851..0ea3d23575f3 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -469,7 +469,7 @@ struct drm_psb_private {
struct drm_display_mode *sdvo_lvds_vbt_mode;
struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
- struct psb_intel_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
+ struct gma_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index db3e757328fe..8ccba116821b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -78,13 +78,14 @@ struct psb_intel_mode_device {
uint32_t saveBLC_PWM_CTL;
};
-struct psb_intel_i2c_chan {
- /* for getting at dev. private (mmio etc.) */
- struct drm_device *drm_dev;
- u32 reg; /* GPIO reg */
- struct i2c_adapter adapter;
+struct gma_i2c_chan {
+ struct i2c_adapter base;
struct i2c_algo_bit_data algo;
u8 slave_addr;
+
+ /* for getting at dev. private (mmio etc.) */
+ struct drm_device *drm_dev;
+ u32 reg; /* GPIO reg */
};
struct gma_encoder {
@@ -103,8 +104,7 @@ struct gma_encoder {
/* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
own set of output privates */
- struct psb_intel_i2c_chan *i2c_bus;
- struct psb_intel_i2c_chan *ddc_bus;
+ struct gma_i2c_chan *i2c_bus;
};
struct gma_connector {
@@ -175,10 +175,12 @@ struct gma_crtc {
container_of(x, struct gma_encoder, base)
#define to_psb_intel_framebuffer(x) \
container_of(x, struct psb_intel_framebuffer, base)
+#define to_gma_i2c_chan(x) \
+ container_of(x, struct gma_i2c_chan, base)
-struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
- const u32 reg, const char *name);
-void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
+struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name);
+void gma_i2c_destroy(struct gma_i2c_chan *chan);
int psb_intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
@@ -197,7 +199,7 @@ extern void oaktrail_lvds_init(struct drm_device *dev,
extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
extern void oaktrail_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
-extern void oaktrail_lvds_i2c_init(struct drm_encoder *encoder);
+struct gma_i2c_chan *oaktrail_lvds_i2c_init(struct drm_device *dev);
extern void mid_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int dsi_num);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index cad00380b386..7ee6c8ce103b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -49,8 +49,7 @@ struct psb_intel_lvds_priv {
uint32_t savePFIT_PGM_RATIOS;
uint32_t saveBLC_PWM_CTL;
- struct psb_intel_i2c_chan *i2c_bus;
- struct psb_intel_i2c_chan *ddc_bus;
+ struct gma_i2c_chan *i2c_bus;
};
@@ -90,7 +89,7 @@ static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+ struct gma_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
u8 out_buf[2];
unsigned int blc_i2c_brightness;
@@ -113,7 +112,7 @@ static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
out_buf[1] = (u8)blc_i2c_brightness;
- if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
+ if (i2c_transfer(&lvds_i2c_bus->base, msgs, 1) == 1) {
dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
dev_priv->lvds_bl->brightnesscmd,
blc_i2c_brightness);
@@ -492,12 +491,10 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
- struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
int ret = 0;
if (!IS_MRST(dev))
- ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter);
+ ret = psb_intel_ddc_get_modes(connector, connector->ddc);
if (ret)
return ret;
@@ -512,20 +509,12 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
return 0;
}
-/**
- * psb_intel_lvds_destroy - unregister and free LVDS structures
- * @connector: connector to free
- *
- * Unregister the DDC bus for this connector then free the driver private
- * structure.
- */
void psb_intel_lvds_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
- struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
+ struct gma_i2c_chan *ddc_bus = to_gma_i2c_chan(connector->ddc);
- psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+ gma_i2c_destroy(ddc_bus);
drm_connector_cleanup(connector);
kfree(gma_connector);
}
@@ -639,25 +628,28 @@ void psb_intel_lvds_init(struct drm_device *dev,
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct gma_i2c_chan *ddc_bus;
u32 lvds;
int pipe;
+ int ret;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder) {
dev_err(dev->dev, "gma_encoder allocation error\n");
return;
}
+ encoder = &gma_encoder->base;
gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
if (!gma_connector) {
dev_err(dev->dev, "gma_connector allocation error\n");
- goto failed_encoder;
+ goto err_free_encoder;
}
lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
if (!lvds_priv) {
dev_err(dev->dev, "LVDS private allocation error\n");
- goto failed_connector;
+ goto err_free_connector;
}
gma_encoder->dev_priv = lvds_priv;
@@ -666,12 +658,24 @@ void psb_intel_lvds_init(struct drm_device *dev,
gma_connector->save = psb_intel_lvds_save;
gma_connector->restore = psb_intel_lvds_restore;
- encoder = &gma_encoder->base;
- drm_connector_init(dev, connector,
- &psb_intel_lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
+ /* Set up the DDC bus. */
+ ddc_bus = gma_i2c_create(dev, GPIOC, "LVDSDDC_C");
+ if (!ddc_bus) {
+ dev_printk(KERN_ERR, dev->dev,
+ "DDC bus registration " "failed.\n");
+ goto err_free_lvds_priv;
+ }
- drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &psb_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS,
+ &ddc_bus->base);
+ if (ret)
+ goto err_ddc_destroy;
+
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
+ if (ret)
+ goto err_connector_cleanup;
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
@@ -695,11 +699,11 @@ void psb_intel_lvds_init(struct drm_device *dev,
* Set up I2C bus
* FIXME: distroy i2c_bus when exit
*/
- lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
+ lvds_priv->i2c_bus = gma_i2c_create(dev, GPIOB, "LVDSBLC_B");
if (!lvds_priv->i2c_bus) {
dev_printk(KERN_ERR,
dev->dev, "I2C bus registration failed.\n");
- goto failed_blc_i2c;
+ goto err_encoder_cleanup;
}
lvds_priv->i2c_bus->slave_addr = 0x2C;
dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus;
@@ -714,20 +718,13 @@ void psb_intel_lvds_init(struct drm_device *dev,
* if closed, act like it's not there for now
*/
- /* Set up the DDC bus. */
- lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
- if (!lvds_priv->ddc_bus) {
- dev_printk(KERN_ERR, dev->dev,
- "DDC bus registration " "failed.\n");
- goto failed_ddc;
- }
-
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
mutex_lock(&dev->mode_config.mutex);
- psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
+ psb_intel_ddc_get_modes(connector, &ddc_bus->base);
+
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
@@ -773,7 +770,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
/* If we still don't have a mode after all that, give up. */
if (!mode_dev->panel_fixed_mode) {
dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
- goto failed_find;
+ goto err_unlock;
}
/*
@@ -784,17 +781,20 @@ out:
mutex_unlock(&dev->mode_config.mutex);
return;
-failed_find:
+err_unlock:
mutex_unlock(&dev->mode_config.mutex);
- psb_intel_i2c_destroy(lvds_priv->ddc_bus);
-failed_ddc:
- psb_intel_i2c_destroy(lvds_priv->i2c_bus);
-failed_blc_i2c:
+ gma_i2c_destroy(lvds_priv->i2c_bus);
+err_encoder_cleanup:
drm_encoder_cleanup(encoder);
+err_connector_cleanup:
drm_connector_cleanup(connector);
-failed_connector:
+err_ddc_destroy:
+ gma_i2c_destroy(ddc_bus);
+err_free_lvds_priv:
+ kfree(lvds_priv);
+err_free_connector:
kfree(gma_connector);
-failed_encoder:
+err_free_encoder:
kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 60306780e16c..8be0ec340de5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -7,6 +7,8 @@
#include <linux/i2c.h>
+#include <drm/drm_edid.h>
+
#include "psb_intel_drv.h"
/**
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index ae051133e050..d0addd478815 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index 3f9d4b9a1e3d..8d1630b8edac 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -14,6 +14,7 @@
#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 1d556482bb46..a0d5aa727d58 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 1ab94620776f..61c29c2834e6 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -26,6 +26,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 4a8941fa0815..6d11e7938c83 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -69,56 +69,7 @@ static struct pci_driver hyperv_pci_driver = {
.remove = hyperv_pci_remove,
};
-static int hyperv_setup_gen1(struct hyperv_drm_device *hv)
-{
- struct drm_device *dev = &hv->dev;
- struct pci_dev *pdev;
- int ret;
-
- pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
- PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
- if (!pdev) {
- drm_err(dev, "Unable to find PCI Hyper-V video\n");
- return -ENODEV;
- }
-
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &hyperv_driver);
- if (ret) {
- drm_err(dev, "Not able to remove boot fb\n");
- return ret;
- }
-
- if (pci_request_region(pdev, 0, DRIVER_NAME) != 0)
- drm_warn(dev, "Cannot request framebuffer, boot fb still active?\n");
-
- if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0) {
- drm_err(dev, "Resource at bar 0 is not IORESOURCE_MEM\n");
- ret = -ENODEV;
- goto error;
- }
-
- hv->fb_base = pci_resource_start(pdev, 0);
- hv->fb_size = pci_resource_len(pdev, 0);
- if (!hv->fb_base) {
- drm_err(dev, "Resource not available\n");
- ret = -ENODEV;
- goto error;
- }
-
- hv->fb_size = min(hv->fb_size,
- (unsigned long)(hv->mmio_megabytes * 1024 * 1024));
- hv->vram = devm_ioremap(&pdev->dev, hv->fb_base, hv->fb_size);
- if (!hv->vram) {
- drm_err(dev, "Failed to map vram\n");
- ret = -ENOMEM;
- }
-
-error:
- pci_dev_put(pdev);
- return ret;
-}
-
-static int hyperv_setup_gen2(struct hyperv_drm_device *hv,
+static int hyperv_setup_vram(struct hyperv_drm_device *hv,
struct hv_device *hdev)
{
struct drm_device *dev = &hv->dev;
@@ -181,10 +132,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
goto err_hv_set_drv_data;
}
- if (efi_enabled(EFI_BOOT))
- ret = hyperv_setup_gen2(hv, hdev);
- else
- ret = hyperv_setup_gen1(hv);
+ ret = hyperv_setup_vram(hv, hdev);
if (ret)
goto err_vmbus_close;
@@ -225,29 +173,13 @@ static int hyperv_vmbus_remove(struct hv_device *hdev)
{
struct drm_device *dev = hv_get_drvdata(hdev);
struct hyperv_drm_device *hv = to_hv(dev);
- struct pci_dev *pdev;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
- /*
- * Free allocated MMIO memory only on Gen2 VMs.
- * On Gen1 VMs, release the PCI device
- */
- if (efi_enabled(EFI_BOOT)) {
- vmbus_free_mmio(hv->mem->start, hv->fb_size);
- } else {
- pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
- PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
- if (!pdev) {
- drm_err(dev, "Unable to find PCI Hyper-V video\n");
- return -ENODEV;
- }
- pci_release_region(pdev, 0);
- pci_dev_put(pdev);
- }
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
return 0;
}
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index e82b815f83a6..b8e64dd8d3a6 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -7,9 +7,11 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
@@ -123,8 +125,11 @@ static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
if (fb->format->format != DRM_FORMAT_XRGB8888)
return -EINVAL;
- if (fb->pitches[0] * fb->height > hv->fb_size)
+ if (fb->pitches[0] * fb->height > hv->fb_size) {
+ drm_err(&hv->dev, "fb size requested by %s for %dX%d (pitch %d) greater than %ld\n",
+ current->comm, fb->width, fb->height, fb->pitches[0], hv->fb_size);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
index c0155c6271bf..76a182a9a765 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -18,16 +18,16 @@
#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
+
+/* Support for VERSION_WIN7 is removed. #define is retained for reference. */
#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
-#define SYNTHVID_DEPTH_WIN7 16
#define SYNTHVID_DEPTH_WIN8 32
-#define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024)
+#define SYNTHVID_WIDTH_WIN8 1600
+#define SYNTHVID_HEIGHT_WIN8 1200
#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
-#define SYNTHVID_WIDTH_MAX_WIN7 1600
-#define SYNTHVID_HEIGHT_MAX_WIN7 1200
enum pipe_msg_type {
PIPE_MSG_INVALID,
@@ -496,12 +496,6 @@ int hyperv_connect_vsp(struct hv_device *hdev)
case VERSION_WIN8:
case VERSION_WIN8_1:
ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN8);
- if (!ret)
- break;
- fallthrough;
- case VERSION_WS2008:
- case VERSION_WIN7:
- ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN7);
break;
default:
ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
@@ -513,18 +507,15 @@ int hyperv_connect_vsp(struct hv_device *hdev)
goto error;
}
- if (hv->synthvid_version == SYNTHVID_VERSION_WIN7)
- hv->screen_depth = SYNTHVID_DEPTH_WIN7;
- else
- hv->screen_depth = SYNTHVID_DEPTH_WIN8;
+ hv->screen_depth = SYNTHVID_DEPTH_WIN8;
if (hyperv_version_ge(hv->synthvid_version, SYNTHVID_VERSION_WIN10)) {
ret = hyperv_get_supported_resolution(hdev);
if (ret)
drm_err(dev, "Failed to get supported resolution from host, use default\n");
} else {
- hv->screen_width_max = SYNTHVID_WIDTH_MAX_WIN7;
- hv->screen_height_max = SYNTHVID_HEIGHT_MAX_WIN7;
+ hv->screen_width_max = SYNTHVID_WIDTH_WIN8;
+ hv->screen_height_max = SYNTHVID_HEIGHT_WIN8;
}
hv->mmio_megabytes = hdev->channel->offermsg.offer.mmio_megabytes;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index b7ec6c374fbd..7c4455541dbb 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -7,6 +7,7 @@
#include <linux/component.h>
#include <linux/gpio/consumer.h>
#include <linux/hdmi.h>
+#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_data/tda9950.h>
#include <linux/irq.h>
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d2b18f03a33c..522ef9b4aff3 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -103,6 +103,7 @@ gt-y += \
gt/intel_gt_debugfs.o \
gt/intel_gt_engines_debugfs.o \
gt/intel_gt_irq.o \
+ gt/intel_gt_mcr.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_debugfs.o \
gt/intel_gt_pm_irq.o \
@@ -129,7 +130,7 @@ gt-y += \
gt/shmem_utils.o \
gt/sysfs_engines.o
# x86 intel-gtt module support
-gt-$(CONFIG_X86) += gt/intel_gt_gmch.o
+gt-$(CONFIG_X86) += gt/intel_ggtt_gmch.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -220,6 +221,7 @@ i915-y += \
display/intel_combo_phy.o \
display/intel_connector.o \
display/intel_crtc.o \
+ display/intel_crtc_state_dump.o \
display/intel_cursor.o \
display/intel_display.o \
display/intel_display_power.o \
@@ -242,6 +244,8 @@ i915-y += \
display/intel_hdcp.o \
display/intel_hotplug.o \
display/intel_lpe_audio.o \
+ display/intel_modeset_verify.o \
+ display/intel_modeset_setup.o \
display/intel_overlay.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
diff --git a/drivers/gpu/drm/i915/TODO.txt b/drivers/gpu/drm/i915/TODO.txt
index 81a82c9c203f..879b08ca32b3 100644
--- a/drivers/gpu/drm/i915/TODO.txt
+++ b/drivers/gpu/drm/i915/TODO.txt
@@ -37,5 +37,5 @@ Smaller things:
https://lore.kernel.org/linux-mm/20210301083320.943079-1-hch@lst.de/
-- tasklet helpers in i915_gem.h also look a bit misplaced and should
+- tasklet helpers in i915_tasklet.h also look a bit misplaced and should
probably be moved to tasklet headers.
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 5a957acebfd6..82ad8fe7440c 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -395,26 +395,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
- if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
- /*
- * This is a big fat ugly hack.
- *
- * Some machines in UEFI boot mode provide us a VBT that has 18
- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
- * unknown we fail to light up. Yet the same BIOS boots up with
- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
- * max, not what it tells us to use.
- *
- * Note: This will still be broken if the eDP panel is not lit
- * up by the BIOS, and thus we can't get the mode at module
- * load.
- */
- drm_dbg_kms(&dev_priv->drm,
- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
- }
+ if (intel_dp_is_edp(intel_dp))
+ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
}
static void
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 38014e0cc9ad..861dcd2eb890 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -28,7 +28,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
if (IS_BROADWELL(i915)) {
drm_WARN_ON(&i915->drm,
- snb_pcode_write(i915, DISPLAY_IPS_CONTROL,
+ snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
IPS_ENABLE | IPS_PCODE_CONTROL));
/*
* Quoting Art Runyan: "its not safe to expect any particular
@@ -62,7 +62,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
if (IS_BROADWELL(i915)) {
drm_WARN_ON(&i915->drm,
- snb_pcode_write(i915, DISPLAY_IPS_CONTROL, 0));
+ snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 7fe1a4e57654..592e5adfed8b 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 19bf717fd4cb..5dcfa7feffa9 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1862,7 +1862,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
u32 ths_prepare_ns, tclk_trail_ns;
@@ -2049,6 +2050,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
+ intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
+
mutex_lock(&dev->mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
@@ -2062,13 +2065,13 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
intel_backlight_setup(intel_connector, INVALID_PIPE);
- if (dev_priv->vbt.dsi.config->dual_link)
+ if (intel_connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
- intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+ intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index f0f0dfce27ce..6c9ee905f132 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -30,6 +30,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
+#include "intel_audio_regs.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
diff --git a/drivers/gpu/drm/i915/display/intel_audio_regs.h b/drivers/gpu/drm/i915/display/intel_audio_regs.h
new file mode 100644
index 000000000000..d1e5844e3484
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_audio_regs.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_AUDIO_REGS_H__
+#define __INTEL_AUDIO_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
+#define INTEL_AUDIO_DEVCL 0x808629FB
+#define INTEL_AUDIO_DEVBLC 0x80862801
+#define INTEL_AUDIO_DEVCTG 0x80862802
+
+#define G4X_AUD_CNTL_ST _MMIO(0x620B4)
+#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
+#define G4X_ELDV_DEVCTG (1 << 14)
+#define G4X_ELD_ADDR_MASK (0xf << 5)
+#define G4X_ELD_ACK (1 << 4)
+#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C)
+
+#define _IBX_HDMIW_HDMIEDID_A 0xE2050
+#define _IBX_HDMIW_HDMIEDID_B 0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
+ _IBX_HDMIW_HDMIEDID_B)
+#define _IBX_AUD_CNTL_ST_A 0xE20B4
+#define _IBX_AUD_CNTL_ST_B 0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
+ _IBX_AUD_CNTL_ST_B)
+#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
+#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
+#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0)
+#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
+#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
+
+#define _CPT_HDMIW_HDMIEDID_A 0xE5050
+#define _CPT_HDMIW_HDMIEDID_B 0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B)
+#define _CPT_AUD_CNTL_ST_A 0xE50B4
+#define _CPT_AUD_CNTL_ST_B 0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B)
+#define CPT_AUD_CNTRL_ST2 _MMIO(0xE50C0)
+
+#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
+#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
+#define VLV_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B)
+#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
+#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
+#define VLV_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B)
+#define VLV_AUD_CNTL_ST2 _MMIO(VLV_DISPLAY_BASE + 0x620C0)
+
+#define _IBX_AUD_CONFIG_A 0xe2000
+#define _IBX_AUD_CONFIG_B 0xe2100
+#define IBX_AUD_CFG(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B)
+#define _CPT_AUD_CONFIG_A 0xe5000
+#define _CPT_AUD_CONFIG_B 0xe5100
+#define CPT_AUD_CFG(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B)
+#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
+#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
+#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
+
+#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
+#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
+#define AUD_CONFIG_UPPER_N_SHIFT 20
+#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
+#define AUD_CONFIG_LOWER_N_SHIFT 4
+#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
+#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
+#define AUD_CONFIG_N(n) \
+ (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
+ (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
+#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+
+#define _HSW_AUD_CONFIG_A 0x65000
+#define _HSW_AUD_CONFIG_B 0x65100
+#define HSW_AUD_CFG(trans) _MMIO_TRANS(trans, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
+
+#define _HSW_AUD_MISC_CTRL_A 0x65010
+#define _HSW_AUD_MISC_CTRL_B 0x65110
+#define HSW_AUD_MISC_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
+
+#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
+#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
+#define HSW_AUD_M_CTS_ENABLE(trans) _MMIO_TRANS(trans, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
+#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
+#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
+#define AUD_CONFIG_M_MASK 0xfffff
+
+#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
+#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
+#define HSW_AUD_DIP_ELD_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define _HSW_AUD_DIG_CNVT_1 0x65080
+#define _HSW_AUD_DIG_CNVT_2 0x65180
+#define AUD_DIG_CNVT(trans) _MMIO_TRANS(trans, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define _HSW_AUD_EDID_DATA_A 0x65050
+#define _HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(trans) _MMIO_TRANS(trans, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
+#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
+#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
+#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
+#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
+#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
+
+#define _AUD_TCA_DP_2DOT0_CTRL 0x650bc
+#define _AUD_TCB_DP_2DOT0_CTRL 0x651bc
+#define AUD_DP_2DOT0_CTRL(trans) _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL)
+#define AUD_ENABLE_SDP_SPLIT REG_BIT(31)
+
+#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
+#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
+
+#define AUD_FREQ_CNTRL _MMIO(0x65900)
+#define AUD_PIN_BUF_CTL _MMIO(0x48414)
+#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+
+#define AUD_TS_CDCLK_M _MMIO(0x65ea0)
+#define AUD_TS_CDCLK_M_EN REG_BIT(31)
+#define AUD_TS_CDCLK_N _MMIO(0x65ea4)
+
+/* Display Audio Config Reg */
+#define AUD_CONFIG_BE _MMIO(0x65ef0)
+#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
+#define HBLANK_EARLY_ENABLE_TGL(pipe) (0x1 << (24 + (pipe)))
+#define HBLANK_START_COUNT_MASK(pipe) (0x7 << (3 + ((pipe) * 6)))
+#define HBLANK_START_COUNT(pipe, val) (((val) & 0x7) << (3 + ((pipe)) * 6))
+#define NUMBER_SAMPLES_PER_LINE_MASK(pipe) (0x3 << ((pipe) * 6))
+#define NUMBER_SAMPLES_PER_LINE(pipe, val) (((val) & 0x3) << ((pipe) * 6))
+
+#define HBLANK_START_COUNT_8 0
+#define HBLANK_START_COUNT_16 1
+#define HBLANK_START_COUNT_32 2
+#define HBLANK_START_COUNT_64 3
+#define HBLANK_START_COUNT_96 4
+#define HBLANK_START_COUNT_128 5
+
+#endif /* __INTEL_AUDIO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index c8e1fc53a881..110fc98ec280 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -3,6 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
+#include <linux/backlight.h>
#include <linux/kernel.h>
#include <linux/pwm.h>
#include <linux/string_helpers.h>
@@ -1159,9 +1160,10 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
}
-static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv)
+static u16 get_vbt_pwm_freq(struct intel_connector *connector)
{
- u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz;
if (pwm_freq_hz) {
drm_dbg_kms(&dev_priv->drm,
@@ -1181,7 +1183,7 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
+ u16 pwm_freq_hz = get_vbt_pwm_freq(connector);
u32 pwm;
if (!panel->backlight.pwm_funcs->hz_to_pwm) {
@@ -1218,11 +1220,11 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
* against this by letting the minimum be at most (arbitrarily chosen)
* 25% of the max.
*/
- min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
- if (min != dev_priv->vbt.backlight.min_brightness) {
+ min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64);
+ if (min != connector->panel.vbt.backlight.min_brightness) {
drm_dbg_kms(&dev_priv->drm,
"clamping VBT min backlight %d/255 to %d/255\n",
- dev_priv->vbt.backlight.min_brightness, min);
+ connector->panel.vbt.backlight.min_brightness, min);
}
/* vbt value is a coefficient in range [0..255] */
@@ -1411,7 +1413,7 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
- panel->backlight.controller = dev_priv->vbt.backlight.controller;
+ panel->backlight.controller = connector->panel.vbt.backlight.controller;
pwm_ctl = intel_de_read(dev_priv,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -1484,7 +1486,7 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
u32 level;
/* Get the right PWM chip for DSI backlight according to VBT */
- if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
desc = "PMIC";
} else {
@@ -1513,11 +1515,11 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
- get_vbt_pwm_freq(dev_priv), level);
+ get_vbt_pwm_freq(connector), level);
} else {
/* Set period from VBT frequency, leave other settings at 0. */
panel->backlight.pwm_state.period =
- NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv);
+ NSEC_PER_SEC / get_vbt_pwm_freq(connector);
}
drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
@@ -1602,7 +1604,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
struct intel_panel *panel = &connector->panel;
int ret;
- if (!dev_priv->vbt.backlight.present) {
+ if (!connector->panel.vbt.backlight.present) {
if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
drm_dbg_kms(&dev_priv->drm,
"no backlight present per VBT, but present per quirk\n");
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 0c5638f5b72b..51dde5bfd956 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,6 +25,7 @@
*
*/
+#include <drm/drm_edid.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
@@ -123,7 +124,7 @@ find_raw_section(const void *_bdb, enum bdb_block_id section_id)
* Offset from the start of BDB to the start of the
* block data (just past the block header).
*/
-static u32 block_offset(const void *bdb, enum bdb_block_id section_id)
+static u32 raw_block_offset(const void *bdb, enum bdb_block_id section_id)
{
const void *block;
@@ -135,7 +136,7 @@ static u32 block_offset(const void *bdb, enum bdb_block_id section_id)
}
/* size of the block excluding the header */
-static u32 block_size(const void *bdb, enum bdb_block_id section_id)
+static u32 raw_block_size(const void *bdb, enum bdb_block_id section_id)
{
const void *block;
@@ -232,7 +233,7 @@ static bool validate_lfp_data_ptrs(const void *bdb,
int data_block_size, lfp_data_size;
int i;
- data_block_size = block_size(bdb, BDB_LVDS_LFP_DATA);
+ data_block_size = raw_block_size(bdb, BDB_LVDS_LFP_DATA);
if (data_block_size == 0)
return false;
@@ -309,7 +310,7 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block)
u32 offset;
int i;
- offset = block_offset(bdb, BDB_LVDS_LFP_DATA);
+ offset = raw_block_offset(bdb, BDB_LVDS_LFP_DATA);
for (i = 0; i < 16; i++) {
if (ptrs->ptr[i].fp_timing.offset < offset ||
@@ -585,6 +586,14 @@ get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data,
return (const void *)data + ptrs->ptr[index].fp_timing.offset;
}
+static const struct lvds_pnp_id *
+get_lvds_pnp_id(const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ return (const void *)data + ptrs->ptr[index].panel_pnp_id.offset;
+}
+
static const struct bdb_lvds_lfp_data_tail *
get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
const struct bdb_lvds_lfp_data_ptrs *ptrs)
@@ -595,12 +604,16 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
return NULL;
}
-static int opregion_get_panel_type(struct drm_i915_private *i915)
+static int opregion_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
{
return intel_opregion_get_panel_type(i915);
}
-static int vbt_get_panel_type(struct drm_i915_private *i915)
+static int vbt_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
{
const struct bdb_lvds_options *lvds_options;
@@ -608,16 +621,71 @@ static int vbt_get_panel_type(struct drm_i915_private *i915)
if (!lvds_options)
return -1;
- if (lvds_options->panel_type > 0xf) {
+ if (lvds_options->panel_type > 0xf &&
+ lvds_options->panel_type != 0xff) {
drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n",
lvds_options->panel_type);
return -1;
}
+ if (devdata && devdata->child.handle == DEVICE_HANDLE_LFP2)
+ return lvds_options->panel_type2;
+
+ drm_WARN_ON(&i915->drm, devdata && devdata->child.handle != DEVICE_HANDLE_LFP1);
+
return lvds_options->panel_type;
}
-static int fallback_get_panel_type(struct drm_i915_private *i915)
+static int pnpid_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
+{
+ const struct bdb_lvds_lfp_data *data;
+ const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const struct lvds_pnp_id *edid_id;
+ struct lvds_pnp_id edid_id_nodate;
+ int i, best = -1;
+
+ if (!edid)
+ return -1;
+
+ edid_id = (const void *)&edid->mfg_id[0];
+
+ edid_id_nodate = *edid_id;
+ edid_id_nodate.mfg_week = 0;
+ edid_id_nodate.mfg_year = 0;
+
+ ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ if (!ptrs)
+ return -1;
+
+ data = find_section(i915, BDB_LVDS_LFP_DATA);
+ if (!data)
+ return -1;
+
+ for (i = 0; i < 16; i++) {
+ const struct lvds_pnp_id *vbt_id =
+ get_lvds_pnp_id(data, ptrs, i);
+
+ /* full match? */
+ if (!memcmp(vbt_id, edid_id, sizeof(*vbt_id)))
+ return i;
+
+ /*
+ * Accept a match w/o date if no full match is found,
+ * and the VBT entry does not specify a date.
+ */
+ if (best < 0 &&
+ !memcmp(vbt_id, &edid_id_nodate, sizeof(*vbt_id)))
+ best = i;
+ }
+
+ return best;
+}
+
+static int fallback_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
{
return 0;
}
@@ -625,14 +693,19 @@ static int fallback_get_panel_type(struct drm_i915_private *i915)
enum panel_type {
PANEL_TYPE_OPREGION,
PANEL_TYPE_VBT,
+ PANEL_TYPE_PNPID,
PANEL_TYPE_FALLBACK,
};
-static int get_panel_type(struct drm_i915_private *i915)
+static int get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
{
struct {
const char *name;
- int (*get_panel_type)(struct drm_i915_private *i915);
+ int (*get_panel_type)(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid);
int panel_type;
} panel_types[] = {
[PANEL_TYPE_OPREGION] = {
@@ -643,6 +716,10 @@ static int get_panel_type(struct drm_i915_private *i915)
.name = "VBT",
.get_panel_type = vbt_get_panel_type,
},
+ [PANEL_TYPE_PNPID] = {
+ .name = "PNPID",
+ .get_panel_type = pnpid_get_panel_type,
+ },
[PANEL_TYPE_FALLBACK] = {
.name = "fallback",
.get_panel_type = fallback_get_panel_type,
@@ -651,9 +728,10 @@ static int get_panel_type(struct drm_i915_private *i915)
int i;
for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
- panel_types[i].panel_type = panel_types[i].get_panel_type(i915);
+ panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, edid);
- drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf);
+ drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
+ panel_types[i].panel_type != 0xff);
if (panel_types[i].panel_type >= 0)
drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n",
@@ -662,7 +740,11 @@ static int get_panel_type(struct drm_i915_private *i915)
if (panel_types[PANEL_TYPE_OPREGION].panel_type >= 0)
i = PANEL_TYPE_OPREGION;
- else if (panel_types[PANEL_TYPE_VBT].panel_type >= 0)
+ else if (panel_types[PANEL_TYPE_VBT].panel_type == 0xff &&
+ panel_types[PANEL_TYPE_PNPID].panel_type >= 0)
+ i = PANEL_TYPE_PNPID;
+ else if (panel_types[PANEL_TYPE_VBT].panel_type != 0xff &&
+ panel_types[PANEL_TYPE_VBT].panel_type >= 0)
i = PANEL_TYPE_VBT;
else
i = PANEL_TYPE_FALLBACK;
@@ -673,26 +755,41 @@ static int get_panel_type(struct drm_i915_private *i915)
return panel_types[i].panel_type;
}
+static unsigned int panel_bits(unsigned int value, int panel_type, int num_bits)
+{
+ return (value >> (panel_type * num_bits)) & (BIT(num_bits) - 1);
+}
+
+static bool panel_bool(unsigned int value, int panel_type)
+{
+ return panel_bits(value, panel_type, 1);
+}
+
/* Parse general panel options */
static void
-parse_panel_options(struct drm_i915_private *i915)
+parse_panel_options(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_lvds_options *lvds_options;
- int panel_type;
+ int panel_type = panel->vbt.panel_type;
int drrs_mode;
lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
- i915->vbt.lvds_dither = lvds_options->pixel_dither;
+ panel->vbt.lvds_dither = lvds_options->pixel_dither;
- panel_type = get_panel_type(i915);
-
- i915->vbt.panel_type = panel_type;
+ /*
+ * Empirical evidence indicates the block size can be
+ * either 4,14,16,24+ bytes. For older VBTs no clear
+ * relationship between the block size vs. BDB version.
+ */
+ if (get_blocksize(lvds_options) < 16)
+ return;
- drrs_mode = (lvds_options->dps_panel_type_bits
- >> (panel_type * 2)) & MODE_MASK;
+ drrs_mode = panel_bits(lvds_options->dps_panel_type_bits,
+ panel_type, 2);
/*
* VBT has static DRRS = 0 and seamless DRRS = 2.
* The below piece of code is required to adjust vbt.drrs_type
@@ -700,16 +797,16 @@ parse_panel_options(struct drm_i915_private *i915)
*/
switch (drrs_mode) {
case 0:
- i915->vbt.drrs_type = DRRS_TYPE_STATIC;
+ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n");
break;
case 2:
- i915->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
+ panel->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
drm_dbg_kms(&i915->drm,
"DRRS supported mode is seamless\n");
break;
default:
- i915->vbt.drrs_type = DRRS_TYPE_NONE;
+ panel->vbt.drrs_type = DRRS_TYPE_NONE;
drm_dbg_kms(&i915->drm,
"DRRS not supported (VBT input)\n");
break;
@@ -718,13 +815,14 @@ parse_panel_options(struct drm_i915_private *i915)
static void
parse_lfp_panel_dtd(struct drm_i915_private *i915,
+ struct intel_panel *panel,
const struct bdb_lvds_lfp_data *lvds_lfp_data,
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs)
{
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
@@ -736,7 +834,7 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
- i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
drm_dbg_kms(&i915->drm,
"Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n",
@@ -749,20 +847,21 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
- i915->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+ panel->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
drm_dbg_kms(&i915->drm,
"VBT initial LVDS value %x\n",
- i915->vbt.bios_lvds_val);
+ panel->vbt.bios_lvds_val);
}
}
static void
-parse_lfp_data(struct drm_i915_private *i915)
+parse_lfp_data(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_tail *tail;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
@@ -772,24 +871,25 @@ parse_lfp_data(struct drm_i915_private *i915)
if (!data)
return;
- if (!i915->vbt.lfp_lvds_vbt_mode)
- parse_lfp_panel_dtd(i915, data, ptrs);
+ if (!panel->vbt.lfp_lvds_vbt_mode)
+ parse_lfp_panel_dtd(i915, panel, data, ptrs);
tail = get_lfp_data_tail(data, ptrs);
if (!tail)
return;
if (i915->vbt.version >= 188) {
- i915->vbt.seamless_drrs_min_refresh_rate =
+ panel->vbt.seamless_drrs_min_refresh_rate =
tail->seamless_drrs_min_refresh_rate[panel_type];
drm_dbg_kms(&i915->drm,
"Seamless DRRS min refresh rate: %d Hz\n",
- i915->vbt.seamless_drrs_min_refresh_rate);
+ panel->vbt.seamless_drrs_min_refresh_rate);
}
}
static void
-parse_generic_dtd(struct drm_i915_private *i915)
+parse_generic_dtd(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_generic_dtd *generic_dtd;
const struct generic_dtd_entry *dtd;
@@ -824,14 +924,14 @@ parse_generic_dtd(struct drm_i915_private *i915)
num_dtd = (get_blocksize(generic_dtd) -
sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
- if (i915->vbt.panel_type >= num_dtd) {
+ if (panel->vbt.panel_type >= num_dtd) {
drm_err(&i915->drm,
"Panel type %d not found in table of %d DTD's\n",
- i915->vbt.panel_type, num_dtd);
+ panel->vbt.panel_type, num_dtd);
return;
}
- dtd = &generic_dtd->dtd[i915->vbt.panel_type];
+ dtd = &generic_dtd->dtd[panel->vbt.panel_type];
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
@@ -874,15 +974,16 @@ parse_generic_dtd(struct drm_i915_private *i915)
"Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(panel_fixed_mode));
- i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
}
static void
-parse_lfp_backlight(struct drm_i915_private *i915)
+parse_lfp_backlight(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct lfp_backlight_data_entry *entry;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
u16 level;
backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT);
@@ -898,15 +999,15 @@ parse_lfp_backlight(struct drm_i915_private *i915)
entry = &backlight_data->data[panel_type];
- i915->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
- if (!i915->vbt.backlight.present) {
+ panel->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
+ if (!panel->vbt.backlight.present) {
drm_dbg_kms(&i915->drm,
"PWM backlight not present in VBT (type %u)\n",
entry->type);
return;
}
- i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
if (i915->vbt.version >= 191) {
size_t exp_size;
@@ -921,13 +1022,13 @@ parse_lfp_backlight(struct drm_i915_private *i915)
const struct lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
- i915->vbt.backlight.type = method->type;
- i915->vbt.backlight.controller = method->controller;
+ panel->vbt.backlight.type = method->type;
+ panel->vbt.backlight.controller = method->controller;
}
}
- i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
- i915->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+ panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+ panel->vbt.backlight.active_low_pwm = entry->active_low_pwm;
if (i915->vbt.version >= 234) {
u16 min_level;
@@ -948,28 +1049,29 @@ parse_lfp_backlight(struct drm_i915_private *i915)
drm_warn(&i915->drm, "Brightness min level > 255\n");
level = 255;
}
- i915->vbt.backlight.min_brightness = min_level;
+ panel->vbt.backlight.min_brightness = min_level;
- i915->vbt.backlight.brightness_precision_bits =
+ panel->vbt.backlight.brightness_precision_bits =
backlight_data->brightness_precision_bits[panel_type];
} else {
level = backlight_data->level[panel_type];
- i915->vbt.backlight.min_brightness = entry->min_brightness;
+ panel->vbt.backlight.min_brightness = entry->min_brightness;
}
drm_dbg_kms(&i915->drm,
"VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u, controller %u\n",
- i915->vbt.backlight.pwm_freq_hz,
- i915->vbt.backlight.active_low_pwm ? "low" : "high",
- i915->vbt.backlight.min_brightness,
+ panel->vbt.backlight.pwm_freq_hz,
+ panel->vbt.backlight.active_low_pwm ? "low" : "high",
+ panel->vbt.backlight.min_brightness,
level,
- i915->vbt.backlight.controller);
+ panel->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
static void
-parse_sdvo_panel_data(struct drm_i915_private *i915)
+parse_sdvo_panel_data(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_sdvo_panel_dtds *dtds;
struct drm_display_mode *panel_fixed_mode;
@@ -1002,7 +1104,7 @@ parse_sdvo_panel_data(struct drm_i915_private *i915)
fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]);
- i915->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
+ panel->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
drm_dbg_kms(&i915->drm,
"Found SDVO panel mode in BIOS VBT tables: " DRM_MODE_FMT "\n",
@@ -1181,6 +1283,17 @@ parse_driver_features(struct drm_i915_private *i915)
driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
i915->vbt.int_lvds_support = 0;
}
+}
+
+static void
+parse_panel_driver_features(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_driver_features *driver;
+
+ driver = find_section(i915, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
if (i915->vbt.version < 228) {
drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
@@ -1191,18 +1304,29 @@ parse_driver_features(struct drm_i915_private *i915)
* static DRRS is 0 and DRRS not supported is represented by
* driver->drrs_enabled=false
*/
- if (!driver->drrs_enabled)
- i915->vbt.drrs_type = DRRS_TYPE_NONE;
+ if (!driver->drrs_enabled && panel->vbt.drrs_type != DRRS_TYPE_NONE) {
+ /*
+ * FIXME Should DMRRS perhaps be treated as seamless
+ * but without the automatic downclocking?
+ */
+ if (driver->dmrrs_enabled)
+ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
+ else
+ panel->vbt.drrs_type = DRRS_TYPE_NONE;
+ }
- i915->vbt.psr.enable = driver->psr_enabled;
+ panel->vbt.psr.enable = driver->psr_enabled;
}
}
static void
-parse_power_conservation_features(struct drm_i915_private *i915)
+parse_power_conservation_features(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_lfp_power *power;
- u8 panel_type = i915->vbt.panel_type;
+ u8 panel_type = panel->vbt.panel_type;
+
+ panel->vbt.vrr = true; /* matches Windows behaviour */
if (i915->vbt.version < 228)
return;
@@ -1211,7 +1335,7 @@ parse_power_conservation_features(struct drm_i915_private *i915)
if (!power)
return;
- i915->vbt.psr.enable = power->psr & BIT(panel_type);
+ panel->vbt.psr.enable = panel_bool(power->psr, panel_type);
/*
* If DRRS is not supported, drrs_type has to be set to 0.
@@ -1219,34 +1343,47 @@ parse_power_conservation_features(struct drm_i915_private *i915)
* static DRRS is 0 and DRRS not supported is represented by
* power->drrs & BIT(panel_type)=false
*/
- if (!(power->drrs & BIT(panel_type)))
- i915->vbt.drrs_type = DRRS_TYPE_NONE;
+ if (!panel_bool(power->drrs, panel_type) && panel->vbt.drrs_type != DRRS_TYPE_NONE) {
+ /*
+ * FIXME Should DMRRS perhaps be treated as seamless
+ * but without the automatic downclocking?
+ */
+ if (panel_bool(power->dmrrs, panel_type))
+ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
+ else
+ panel->vbt.drrs_type = DRRS_TYPE_NONE;
+ }
if (i915->vbt.version >= 232)
- i915->vbt.edp.hobl = power->hobl & BIT(panel_type);
+ panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type);
+
+ if (i915->vbt.version >= 233)
+ panel->vbt.vrr = panel_bool(power->vrr_feature_enabled,
+ panel_type);
}
static void
-parse_edp(struct drm_i915_private *i915)
+parse_edp(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
const struct edp_fast_link_params *edp_link_params;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
edp = find_section(i915, BDB_EDP);
if (!edp)
return;
- switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ switch (panel_bits(edp->color_depth, panel_type, 2)) {
case EDP_18BPP:
- i915->vbt.edp.bpp = 18;
+ panel->vbt.edp.bpp = 18;
break;
case EDP_24BPP:
- i915->vbt.edp.bpp = 24;
+ panel->vbt.edp.bpp = 24;
break;
case EDP_30BPP:
- i915->vbt.edp.bpp = 30;
+ panel->vbt.edp.bpp = 30;
break;
}
@@ -1254,31 +1391,39 @@ parse_edp(struct drm_i915_private *i915)
edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->fast_link_params[panel_type];
- i915->vbt.edp.pps = *edp_pps;
+ panel->vbt.edp.pps = *edp_pps;
- switch (edp_link_params->rate) {
- case EDP_RATE_1_62:
- i915->vbt.edp.rate = DP_LINK_BW_1_62;
- break;
- case EDP_RATE_2_7:
- i915->vbt.edp.rate = DP_LINK_BW_2_7;
- break;
- default:
- drm_dbg_kms(&i915->drm,
- "VBT has unknown eDP link rate value %u\n",
- edp_link_params->rate);
- break;
+ if (i915->vbt.version >= 224) {
+ panel->vbt.edp.rate =
+ edp->edp_fast_link_training_rate[panel_type] * 20;
+ } else {
+ switch (edp_link_params->rate) {
+ case EDP_RATE_1_62:
+ panel->vbt.edp.rate = 162000;
+ break;
+ case EDP_RATE_2_7:
+ panel->vbt.edp.rate = 270000;
+ break;
+ case EDP_RATE_5_4:
+ panel->vbt.edp.rate = 540000;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+ "VBT has unknown eDP link rate value %u\n",
+ edp_link_params->rate);
+ break;
+ }
}
switch (edp_link_params->lanes) {
case EDP_LANE_1:
- i915->vbt.edp.lanes = 1;
+ panel->vbt.edp.lanes = 1;
break;
case EDP_LANE_2:
- i915->vbt.edp.lanes = 2;
+ panel->vbt.edp.lanes = 2;
break;
case EDP_LANE_4:
- i915->vbt.edp.lanes = 4;
+ panel->vbt.edp.lanes = 4;
break;
default:
drm_dbg_kms(&i915->drm,
@@ -1289,16 +1434,16 @@ parse_edp(struct drm_i915_private *i915)
switch (edp_link_params->preemphasis) {
case EDP_PREEMPHASIS_NONE:
- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
break;
case EDP_PREEMPHASIS_3_5dB:
- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
break;
case EDP_PREEMPHASIS_6dB:
- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
break;
case EDP_PREEMPHASIS_9_5dB:
- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
drm_dbg_kms(&i915->drm,
@@ -1309,16 +1454,16 @@ parse_edp(struct drm_i915_private *i915)
switch (edp_link_params->vswing) {
case EDP_VSWING_0_4V:
- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
break;
case EDP_VSWING_0_6V:
- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
break;
case EDP_VSWING_0_8V:
- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
break;
case EDP_VSWING_1_2V:
- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
drm_dbg_kms(&i915->drm,
@@ -1332,24 +1477,29 @@ parse_edp(struct drm_i915_private *i915)
/* Don't read from VBT if module parameter has valid value*/
if (i915->params.edp_vswing) {
- i915->vbt.edp.low_vswing =
+ panel->vbt.edp.low_vswing =
i915->params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
- i915->vbt.edp.low_vswing = vswing == 0;
+ panel->vbt.edp.low_vswing = vswing == 0;
}
}
- i915->vbt.edp.drrs_msa_timing_delay =
- (edp->sdrrs_msa_timing_delay >> (panel_type * 2)) & 3;
+ panel->vbt.edp.drrs_msa_timing_delay =
+ panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2);
+
+ if (i915->vbt.version >= 244)
+ panel->vbt.edp.max_link_rate =
+ edp->edp_max_port_link_rate[panel_type] * 20;
}
static void
-parse_psr(struct drm_i915_private *i915)
+parse_psr(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_psr *psr;
const struct psr_table *psr_table;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
psr = find_section(i915, BDB_PSR);
if (!psr) {
@@ -1359,11 +1509,11 @@ parse_psr(struct drm_i915_private *i915)
psr_table = &psr->psr_table[panel_type];
- i915->vbt.psr.full_link = psr_table->full_link;
- i915->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+ panel->vbt.psr.full_link = psr_table->full_link;
+ panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
/* Allowed VBT values goes from 0 to 15 */
- i915->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+ panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
/*
@@ -1374,13 +1524,13 @@ parse_psr(struct drm_i915_private *i915)
(DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
switch (psr_table->tp1_wakeup_time) {
case 0:
- i915->vbt.psr.tp1_wakeup_time_us = 500;
+ panel->vbt.psr.tp1_wakeup_time_us = 500;
break;
case 1:
- i915->vbt.psr.tp1_wakeup_time_us = 100;
+ panel->vbt.psr.tp1_wakeup_time_us = 100;
break;
case 3:
- i915->vbt.psr.tp1_wakeup_time_us = 0;
+ panel->vbt.psr.tp1_wakeup_time_us = 0;
break;
default:
drm_dbg_kms(&i915->drm,
@@ -1388,19 +1538,19 @@ parse_psr(struct drm_i915_private *i915)
psr_table->tp1_wakeup_time);
fallthrough;
case 2:
- i915->vbt.psr.tp1_wakeup_time_us = 2500;
+ panel->vbt.psr.tp1_wakeup_time_us = 2500;
break;
}
switch (psr_table->tp2_tp3_wakeup_time) {
case 0:
- i915->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 500;
break;
case 1:
- i915->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 100;
break;
case 3:
- i915->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 0;
break;
default:
drm_dbg_kms(&i915->drm,
@@ -1408,18 +1558,18 @@ parse_psr(struct drm_i915_private *i915)
psr_table->tp2_tp3_wakeup_time);
fallthrough;
case 2:
- i915->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
break;
}
} else {
- i915->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
- i915->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+ panel->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
if (i915->vbt.version >= 226) {
u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
- wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
+ wakeup_time = panel_bits(wakeup_time, panel_type, 2);
switch (wakeup_time) {
case 0:
wakeup_time = 500;
@@ -1435,62 +1585,64 @@ parse_psr(struct drm_i915_private *i915)
wakeup_time = 2500;
break;
}
- i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
} else {
/* Reusing PSR1 wakeup time for PSR2 in older VBTs */
- i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = i915->vbt.psr.tp2_tp3_wakeup_time_us;
+ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = panel->vbt.psr.tp2_tp3_wakeup_time_us;
}
}
static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
- u16 version, enum port port)
+ struct intel_panel *panel,
+ enum port port)
{
- if (!i915->vbt.dsi.config->dual_link || version < 197) {
- i915->vbt.dsi.bl_ports = BIT(port);
- if (i915->vbt.dsi.config->cabc_supported)
- i915->vbt.dsi.cabc_ports = BIT(port);
+ if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) {
+ panel->vbt.dsi.bl_ports = BIT(port);
+ if (panel->vbt.dsi.config->cabc_supported)
+ panel->vbt.dsi.cabc_ports = BIT(port);
return;
}
- switch (i915->vbt.dsi.config->dl_dcs_backlight_ports) {
+ switch (panel->vbt.dsi.config->dl_dcs_backlight_ports) {
case DL_DCS_PORT_A:
- i915->vbt.dsi.bl_ports = BIT(PORT_A);
+ panel->vbt.dsi.bl_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- i915->vbt.dsi.bl_ports = BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
- i915->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
break;
}
- if (!i915->vbt.dsi.config->cabc_supported)
+ if (!panel->vbt.dsi.config->cabc_supported)
return;
- switch (i915->vbt.dsi.config->dl_dcs_cabc_ports) {
+ switch (panel->vbt.dsi.config->dl_dcs_cabc_ports) {
case DL_DCS_PORT_A:
- i915->vbt.dsi.cabc_ports = BIT(PORT_A);
+ panel->vbt.dsi.cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- i915->vbt.dsi.cabc_ports = BIT(PORT_C);
+ panel->vbt.dsi.cabc_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
- i915->vbt.dsi.cabc_ports =
+ panel->vbt.dsi.cabc_ports =
BIT(PORT_A) | BIT(PORT_C);
break;
}
}
static void
-parse_mipi_config(struct drm_i915_private *i915)
+parse_mipi_config(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_mipi_config *start;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
enum port port;
/* parse MIPI blocks only if LFP type is MIPI */
@@ -1498,7 +1650,7 @@ parse_mipi_config(struct drm_i915_private *i915)
return;
/* Initialize this to undefined indicating no generic MIPI support */
- i915->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
+ panel->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
/* Block #40 is already parsed and panel_fixed_mode is
* stored in i915->lfp_lvds_vbt_mode
@@ -1525,17 +1677,17 @@ parse_mipi_config(struct drm_i915_private *i915)
pps = &start->pps[panel_type];
/* store as of now full data. Trim when we realise all is not needed */
- i915->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
- if (!i915->vbt.dsi.config)
+ panel->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
+ if (!panel->vbt.dsi.config)
return;
- i915->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
- if (!i915->vbt.dsi.pps) {
- kfree(i915->vbt.dsi.config);
+ panel->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
+ if (!panel->vbt.dsi.pps) {
+ kfree(panel->vbt.dsi.config);
return;
}
- parse_dsi_backlight_ports(i915, i915->vbt.version, port);
+ parse_dsi_backlight_ports(i915, panel, port);
/* FIXME is the 90 vs. 270 correct? */
switch (config->rotation) {
@@ -1544,25 +1696,25 @@ parse_mipi_config(struct drm_i915_private *i915)
* Most (all?) VBTs claim 0 degrees despite having
* an upside down panel, thus we do not trust this.
*/
- i915->vbt.dsi.orientation =
+ panel->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
break;
case ENABLE_ROTATION_90:
- i915->vbt.dsi.orientation =
+ panel->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
break;
case ENABLE_ROTATION_180:
- i915->vbt.dsi.orientation =
+ panel->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
break;
case ENABLE_ROTATION_270:
- i915->vbt.dsi.orientation =
+ panel->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
break;
}
/* We have mandatory mipi config blocks. Initialize as generic panel */
- i915->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+ panel->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
/* Find the sequence block and size for the given panel. */
@@ -1725,13 +1877,14 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
* Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
* skip all delay + gpio operands and stop at the first DSI packet op.
*/
-static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915)
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
- const u8 *data = i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ const u8 *data = panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
int index, len;
if (drm_WARN_ON(&i915->drm,
- !data || i915->vbt.dsi.seq_version != 1))
+ !data || panel->vbt.dsi.seq_version != 1))
return 0;
/* index = 1 to skip sequence byte */
@@ -1759,7 +1912,8 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915)
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
-static void fixup_mipi_sequences(struct drm_i915_private *i915)
+static void fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
u8 *init_otp;
int len;
@@ -1769,18 +1923,18 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915)
return;
/* Limit this to v1 vid-mode sequences */
- if (i915->vbt.dsi.config->is_cmd_mode ||
- i915->vbt.dsi.seq_version != 1)
+ if (panel->vbt.dsi.config->is_cmd_mode ||
+ panel->vbt.dsi.seq_version != 1)
return;
/* Only do this if there are otp and assert seqs and no deassert seq */
- if (!i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
- !i915->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
- i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+ !panel->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
return;
/* The deassert-sequence ends at the first DSI packet */
- len = get_init_otp_deassert_fragment_len(i915);
+ len = get_init_otp_deassert_fragment_len(i915, panel);
if (!len)
return;
@@ -1788,25 +1942,26 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915)
"Using init OTP fragment to deassert reset\n");
/* Copy the fragment, update seq byte and terminate it */
- init_otp = (u8 *)i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
- i915->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
- if (!i915->vbt.dsi.deassert_seq)
+ init_otp = (u8 *)panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ panel->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+ if (!panel->vbt.dsi.deassert_seq)
return;
- i915->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
- i915->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+ panel->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+ panel->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
/* Use the copy for deassert */
- i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
- i915->vbt.dsi.deassert_seq;
+ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+ panel->vbt.dsi.deassert_seq;
/* Replace the last byte of the fragment with init OTP seq byte */
init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
/* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
- i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+ panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
}
static void
-parse_mipi_sequence(struct drm_i915_private *i915)
+parse_mipi_sequence(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
const struct bdb_mipi_sequence *sequence;
const u8 *seq_data;
u32 seq_size;
@@ -1814,7 +1969,7 @@ parse_mipi_sequence(struct drm_i915_private *i915)
int index = 0;
/* Only our generic panel driver uses the sequence block. */
- if (i915->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+ if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
sequence = find_section(i915, BDB_MIPI_SEQUENCE);
@@ -1860,7 +2015,7 @@ parse_mipi_sequence(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm,
"Unsupported sequence %u\n", seq_id);
- i915->vbt.dsi.sequence[seq_id] = data + index;
+ panel->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
index = goto_next_sequence_v3(data, index, seq_size);
@@ -1873,18 +2028,18 @@ parse_mipi_sequence(struct drm_i915_private *i915)
}
}
- i915->vbt.dsi.data = data;
- i915->vbt.dsi.size = seq_size;
- i915->vbt.dsi.seq_version = sequence->version;
+ panel->vbt.dsi.data = data;
+ panel->vbt.dsi.size = seq_size;
+ panel->vbt.dsi.seq_version = sequence->version;
- fixup_mipi_sequences(i915);
+ fixup_mipi_sequences(i915, panel);
drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n");
return;
err:
kfree(data);
- memset(i915->vbt.dsi.sequence, 0, sizeof(i915->vbt.dsi.sequence));
+ memset(panel->vbt.dsi.sequence, 0, sizeof(panel->vbt.dsi.sequence));
}
static void
@@ -2343,10 +2498,10 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
if (port != PORT_A || DISPLAY_VER(i915) >= 12)
return;
- if (!(devdata->child.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING))
+ if (!intel_bios_encoder_supports_dvi(devdata))
return;
- is_hdmi = !(devdata->child.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT);
+ is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
drm_dbg_kms(&i915->drm, "VBT claims port A supports DVI%s, ignoring\n",
is_hdmi ? "/HDMI" : "");
@@ -2432,33 +2587,13 @@ static bool is_port_valid(struct drm_i915_private *i915, enum port port)
return true;
}
-static void parse_ddi_port(struct drm_i915_private *i915,
- struct intel_bios_encoder_data *devdata)
+static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
+ enum port port)
{
+ struct drm_i915_private *i915 = devdata->i915;
const struct child_device_config *child = &devdata->child;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
- enum port port;
-
- port = dvo_port_to_port(i915, child->dvo_port);
- if (port == PORT_NONE)
- return;
-
- if (!is_port_valid(i915, port)) {
- drm_dbg_kms(&i915->drm,
- "VBT reports port %c as supported, but that can't be true: skipping\n",
- port_name(port));
- return;
- }
-
- if (i915->vbt.ports[port]) {
- drm_dbg_kms(&i915->drm,
- "More than one child device for port %c in VBT, using the first.\n",
- port_name(port));
- return;
- }
-
- sanitize_device_type(devdata, port);
is_dvi = intel_bios_encoder_supports_dvi(devdata);
is_dp = intel_bios_encoder_supports_dp(devdata);
@@ -2476,12 +2611,6 @@ static void parse_ddi_port(struct drm_i915_private *i915,
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
- if (is_dvi)
- sanitize_ddc_pin(devdata, port);
-
- if (is_dp)
- sanitize_aux_ch(devdata, port);
-
hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata);
if (hdmi_level_shift >= 0) {
drm_dbg_kms(&i915->drm,
@@ -2513,6 +2642,39 @@ static void parse_ddi_port(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm,
"Port %c VBT DP max link rate: %d\n",
port_name(port), dp_max_link_rate);
+}
+
+static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ const struct child_device_config *child = &devdata->child;
+ enum port port;
+
+ port = dvo_port_to_port(i915, child->dvo_port);
+ if (port == PORT_NONE)
+ return;
+
+ if (!is_port_valid(i915, port)) {
+ drm_dbg_kms(&i915->drm,
+ "VBT reports port %c as supported, but that can't be true: skipping\n",
+ port_name(port));
+ return;
+ }
+
+ if (i915->vbt.ports[port]) {
+ drm_dbg_kms(&i915->drm,
+ "More than one child device for port %c in VBT, using the first.\n",
+ port_name(port));
+ return;
+ }
+
+ sanitize_device_type(devdata, port);
+
+ if (intel_bios_encoder_supports_dvi(devdata))
+ sanitize_ddc_pin(devdata, port);
+
+ if (intel_bios_encoder_supports_dp(devdata))
+ sanitize_aux_ch(devdata, port);
i915->vbt.ports[port] = devdata;
}
@@ -2525,12 +2687,18 @@ static bool has_ddi_port_info(struct drm_i915_private *i915)
static void parse_ddi_ports(struct drm_i915_private *i915)
{
struct intel_bios_encoder_data *devdata;
+ enum port port;
if (!has_ddi_port_info(i915))
return;
list_for_each_entry(devdata, &i915->vbt.display_devices, node)
- parse_ddi_port(i915, devdata);
+ parse_ddi_port(devdata);
+
+ for_each_port(port) {
+ if (i915->vbt.ports[port])
+ print_ddi_port(i915->vbt.ports[port], port);
+ }
}
static void
@@ -2638,15 +2806,6 @@ init_vbt_defaults(struct drm_i915_private *i915)
{
i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
- /* Default to having backlight */
- i915->vbt.backlight.present = true;
-
- /* LFP panel data */
- i915->vbt.lvds_dither = 1;
-
- /* SDVO panel data */
- i915->vbt.sdvo_lvds_vbt_mode = NULL;
-
/* general features */
i915->vbt.int_tv_support = 1;
i915->vbt.int_crt_support = 1;
@@ -2666,6 +2825,17 @@ init_vbt_defaults(struct drm_i915_private *i915)
i915->vbt.lvds_ssc_freq);
}
+/* Common defaults which may be overridden by VBT. */
+static void
+init_vbt_panel_defaults(struct intel_panel *panel)
+{
+ /* Default to having backlight */
+ panel->vbt.backlight.present = true;
+
+ /* LFP panel data */
+ panel->vbt.lvds_dither = true;
+}
+
/* Defaults to initialize only if there is no VBT. */
static void
init_vbt_missing_defaults(struct drm_i915_private *i915)
@@ -2952,17 +3122,7 @@ void intel_bios_init(struct drm_i915_private *i915)
/* Grab useful general definitions */
parse_general_features(i915);
parse_general_definitions(i915);
- parse_panel_options(i915);
- parse_generic_dtd(i915);
- parse_lfp_data(i915);
- parse_lfp_backlight(i915);
- parse_sdvo_panel_data(i915);
parse_driver_features(i915);
- parse_power_conservation_features(i915);
- parse_edp(i915);
- parse_psr(i915);
- parse_mipi_config(i915);
- parse_mipi_sequence(i915);
/* Depends on child device list */
parse_compression_parameters(i915);
@@ -2981,6 +3141,28 @@ out:
kfree(oprom_vbt);
}
+void intel_bios_init_panel(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
+{
+ init_vbt_panel_defaults(panel);
+
+ panel->vbt.panel_type = get_panel_type(i915, devdata, edid);
+
+ parse_panel_options(i915, panel);
+ parse_generic_dtd(i915, panel);
+ parse_lfp_data(i915, panel);
+ parse_lfp_backlight(i915, panel);
+ parse_sdvo_panel_data(i915, panel);
+ parse_panel_driver_features(i915, panel);
+ parse_power_conservation_features(i915, panel);
+ parse_edp(i915, panel);
+ parse_psr(i915, panel);
+ parse_mipi_config(i915, panel);
+ parse_mipi_sequence(i915, panel);
+}
+
/**
* intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
* @i915: i915 device instance
@@ -3000,19 +3182,22 @@ void intel_bios_driver_remove(struct drm_i915_private *i915)
list_del(&entry->node);
kfree(entry);
}
+}
- kfree(i915->vbt.sdvo_lvds_vbt_mode);
- i915->vbt.sdvo_lvds_vbt_mode = NULL;
- kfree(i915->vbt.lfp_lvds_vbt_mode);
- i915->vbt.lfp_lvds_vbt_mode = NULL;
- kfree(i915->vbt.dsi.data);
- i915->vbt.dsi.data = NULL;
- kfree(i915->vbt.dsi.pps);
- i915->vbt.dsi.pps = NULL;
- kfree(i915->vbt.dsi.config);
- i915->vbt.dsi.config = NULL;
- kfree(i915->vbt.dsi.deassert_seq);
- i915->vbt.dsi.deassert_seq = NULL;
+void intel_bios_fini_panel(struct intel_panel *panel)
+{
+ kfree(panel->vbt.sdvo_lvds_vbt_mode);
+ panel->vbt.sdvo_lvds_vbt_mode = NULL;
+ kfree(panel->vbt.lfp_lvds_vbt_mode);
+ panel->vbt.lfp_lvds_vbt_mode = NULL;
+ kfree(panel->vbt.dsi.data);
+ panel->vbt.dsi.data = NULL;
+ kfree(panel->vbt.dsi.pps);
+ panel->vbt.dsi.pps = NULL;
+ kfree(panel->vbt.dsi.config);
+ panel->vbt.dsi.config = NULL;
+ kfree(panel->vbt.dsi.deassert_seq);
+ panel->vbt.dsi.deassert_seq = NULL;
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 4709c4d29805..e47582b0de0a 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -33,9 +33,11 @@
#include <linux/types.h>
struct drm_i915_private;
+struct edid;
struct intel_bios_encoder_data;
struct intel_crtc_state;
struct intel_encoder;
+struct intel_panel;
enum port;
enum intel_backlight_type {
@@ -230,6 +232,11 @@ struct mipi_pps_data {
} __packed;
void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_init_panel(struct drm_i915_private *dev_priv,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid);
+void intel_bios_fini_panel(struct intel_panel *panel);
void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 37bd7b17f3d0..79269d2c476b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -78,7 +78,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
u16 dclk;
int ret;
- ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
&val, &val2);
if (ret)
@@ -104,7 +104,7 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
int ret;
int i;
- ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -123,7 +123,7 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
int ret;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index b2017d8161b4..6e80162632dd 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -800,7 +800,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- ret = snb_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
drm_err(&dev_priv->drm,
"failed to inform pcode about cdclk change\n");
@@ -828,7 +828,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
- snb_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_config->voltage_level);
intel_de_write(dev_priv, CDCLK_FREQ,
@@ -1086,7 +1086,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
drm_WARN_ON_ONCE(&dev_priv->drm,
IS_SKYLAKE(dev_priv) && vco == 8640000);
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
@@ -1132,7 +1132,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
intel_de_posting_read(dev_priv, CDCLK_CTL);
/* inform PCU of the change */
- snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
intel_update_cdclk(dev_priv);
@@ -1702,7 +1702,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
/* Inform power controller of upcoming frequency change. */
if (DISPLAY_VER(dev_priv) >= 11)
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
@@ -1711,7 +1711,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* BSpec requires us to wait up to 150usec, but that leads to
* timeouts; the 2ms used here is based on experiment.
*/
- ret = snb_pcode_write_timeout(dev_priv,
+ ret = snb_pcode_write_timeout(&dev_priv->uncore,
HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000, 150, 2);
if (ret) {
@@ -1774,7 +1774,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
if (DISPLAY_VER(dev_priv) >= 11) {
- ret = snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
} else {
/*
@@ -1783,7 +1783,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* FIXME: Waiting for the request completion could be delayed
* until the next PCODE request based on BSpec.
*/
- ret = snb_pcode_write_timeout(dev_priv,
+ ret = snb_pcode_write_timeout(&dev_priv->uncore,
HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_config->voltage_level,
150, 2);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 34128c9c635c..9583d17e858d 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -505,30 +505,19 @@ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- u32 val;
-
- val = intel_de_read(dev_priv, PIPECONF(pipe));
- val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
- val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- intel_de_write(dev_priv, PIPECONF(pipe), val);
+ /* update PIPECONF GAMMA_MODE */
+ i9xx_set_pipeconf(crtc_state);
}
static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- u32 val;
- val = intel_de_read(dev_priv, PIPECONF(pipe));
- val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
- val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- intel_de_write(dev_priv, PIPECONF(pipe), val);
+ /* update PIPECONF GAMMA_MODE */
+ ilk_set_pipeconf(crtc_state);
- intel_de_write_fw(dev_priv, PIPE_CSC_MODE(pipe),
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
crtc_state->csc_mode);
}
@@ -852,7 +841,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
/*
@@ -894,7 +883,7 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
/*
* When setting the auto-increment bit, the hardware seems to
@@ -1346,10 +1335,10 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
return -EINVAL;
}
- degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
- degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
- gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
+ degamma_length = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
+ gamma_length = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
+ degamma_tests = INTEL_INFO(dev_priv)->display.color.degamma_lut_tests;
+ gamma_tests = INTEL_INFO(dev_priv)->display.color.gamma_lut_tests;
if (check_lut_size(degamma_lut, degamma_length) ||
check_lut_size(gamma_lut, gamma_length))
@@ -1638,7 +1627,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
/*
* Enable 10bit gamma for D13
* ToDo: Extend to Logarithmic Gamma once the new UAPI
- * is acccepted and implemented by a userspace consumer
+ * is accepted and implemented by a userspace consumer
*/
else if (DISPLAY_VER(i915) >= 13)
gamma_mode |= GAMMA_MODE_MODE_10BIT;
@@ -1885,7 +1874,7 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
@@ -1928,7 +1917,7 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
@@ -1989,7 +1978,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
@@ -2040,7 +2029,7 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int i, hw_lut_size = ivb_lut_10_size(prec_index);
- int lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
@@ -2093,7 +2082,7 @@ static struct drm_property_blob *
icl_read_lut_multi_segment(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
@@ -2230,7 +2219,7 @@ static const struct intel_color_funcs ilk_color_funcs = {
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0;
+ bool has_ctm = INTEL_INFO(dev_priv)->display.color.degamma_lut_size != 0;
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
@@ -2261,7 +2250,7 @@ void intel_color_init(struct intel_crtc *crtc)
}
drm_crtc_enable_color_mgmt(&crtc->base,
- INTEL_INFO(dev_priv)->color.degamma_lut_size,
+ INTEL_INFO(dev_priv)->display.color.degamma_lut_size,
has_ctm,
- INTEL_INFO(dev_priv)->color.gamma_lut_size);
+ INTEL_INFO(dev_priv)->display.color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
new file mode 100644
index 000000000000..4ca6e9493ff2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_crtc_state_dump.h"
+#include "intel_display_types.h"
+#include "intel_hdmi.h"
+#include "intel_vrr.h"
+
+static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+ const struct drm_display_mode *mode)
+{
+ drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->type, mode->flags);
+}
+
+static void
+intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
+ const char *id, unsigned int lane_count,
+ const struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+
+ drm_dbg_kms(&i915->drm,
+ "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->data_m, m_n->data_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
+}
+
+static void
+intel_dump_infoframe(struct drm_i915_private *i915,
+ const union hdmi_infoframe *frame)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
+}
+
+static void
+intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
+ const struct drm_dp_vsc_sdp *vsc)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc);
+}
+
+#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
+
+static const char * const output_type_str[] = {
+ OUTPUT_TYPE(UNUSED),
+ OUTPUT_TYPE(ANALOG),
+ OUTPUT_TYPE(DVO),
+ OUTPUT_TYPE(SDVO),
+ OUTPUT_TYPE(LVDS),
+ OUTPUT_TYPE(TVOUT),
+ OUTPUT_TYPE(HDMI),
+ OUTPUT_TYPE(DP),
+ OUTPUT_TYPE(EDP),
+ OUTPUT_TYPE(DSI),
+ OUTPUT_TYPE(DDI),
+ OUTPUT_TYPE(DP_MST),
+};
+
+#undef OUTPUT_TYPE
+
+static void snprintf_output_types(char *buf, size_t len,
+ unsigned int output_types)
+{
+ char *str = buf;
+ int i;
+
+ str[0] = '\0';
+
+ for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
+ int r;
+
+ if ((output_types & BIT(i)) == 0)
+ continue;
+
+ r = snprintf(str, len, "%s%s",
+ str != buf ? "," : "", output_type_str[i]);
+ if (r >= len)
+ break;
+ str += r;
+ len -= r;
+
+ output_types &= ~BIT(i);
+ }
+
+ WARN_ON_ONCE(output_types != 0);
+}
+
+static const char * const output_format_str[] = {
+ [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
+ [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
+ [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
+};
+
+static const char *output_formats(enum intel_output_format format)
+{
+ if (format >= ARRAY_SIZE(output_format_str))
+ return "invalid";
+ return output_format_str[format];
+}
+
+static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (!fb) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ str_yes_no(plane_state->uapi.visible));
+ return;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height, &fb->format->format,
+ fb->modifier, str_yes_no(plane_state->uapi.visible));
+ drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id);
+ if (plane_state->uapi.visible)
+ drm_dbg_kms(&i915->drm,
+ "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
+}
+
+void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
+ struct intel_atomic_state *state,
+ const char *context)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ char buf[64];
+ int i;
+
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] enable: %s [%s]\n",
+ crtc->base.base.id, crtc->base.name,
+ str_yes_no(pipe_config->hw.enable), context);
+
+ if (!pipe_config->hw.enable)
+ goto dump_planes;
+
+ snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
+ drm_dbg_kms(&i915->drm,
+ "active: %s, output_types: %s (0x%x), output format: %s\n",
+ str_yes_no(pipe_config->hw.active),
+ buf, pipe_config->output_types,
+ output_formats(pipe_config->output_format));
+
+ drm_dbg_kms(&i915->drm,
+ "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
+ pipe_config->pipe_bpp, pipe_config->dither);
+
+ drm_dbg_kms(&i915->drm, "MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
+
+ drm_dbg_kms(&i915->drm,
+ "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+ transcoder_name(pipe_config->master_transcoder),
+ pipe_config->sync_mode_slaves_mask);
+
+ drm_dbg_kms(&i915->drm, "bigjoiner: %s, pipes: 0x%x\n",
+ intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
+ intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
+ pipe_config->bigjoiner_pipes);
+
+ drm_dbg_kms(&i915->drm, "splitter: %s, link count %d, overlap %d\n",
+ str_enabled_disabled(pipe_config->splitter.enable),
+ pipe_config->splitter.link_count,
+ pipe_config->splitter.pixel_overlap);
+
+ if (pipe_config->has_pch_encoder)
+ intel_dump_m_n_config(pipe_config, "fdi",
+ pipe_config->fdi_lanes,
+ &pipe_config->fdi_m_n);
+
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
+ intel_dump_m_n_config(pipe_config, "dp m_n",
+ pipe_config->lane_count,
+ &pipe_config->dp_m_n);
+ intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ pipe_config->lane_count,
+ &pipe_config->dp_m2_n2);
+ }
+
+ drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+
+ drm_dbg_kms(&i915->drm,
+ "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
+ pipe_config->infoframes.enable);
+
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
+ drm_dbg_kms(&i915->drm, "GCP: 0x%x\n",
+ pipe_config->infoframes.gcp);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(DP_SDP_VSC))
+ intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
+
+ drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ str_yes_no(pipe_config->vrr.enable),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
+ pipe_config->vrr.flipline,
+ intel_vrr_vmin_vblank_start(pipe_config),
+ intel_vrr_vmax_vblank_start(pipe_config));
+
+ drm_dbg_kms(&i915->drm, "requested mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.mode));
+ drm_dbg_kms(&i915->drm, "adjusted mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
+ intel_dump_crtc_timings(i915, &pipe_config->hw.adjusted_mode);
+ drm_dbg_kms(&i915->drm, "pipe mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
+ intel_dump_crtc_timings(i915, &pipe_config->hw.pipe_mode);
+ drm_dbg_kms(&i915->drm,
+ "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
+ pipe_config->pixel_rate);
+
+ drm_dbg_kms(&i915->drm, "linetime: %d, ips linetime: %d\n",
+ pipe_config->linetime, pipe_config->ips_linetime);
+
+ if (DISPLAY_VER(i915) >= 9)
+ drm_dbg_kms(&i915->drm,
+ "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
+
+ if (HAS_GMCH(i915))
+ drm_dbg_kms(&i915->drm,
+ "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
+ else
+ drm_dbg_kms(&i915->drm,
+ "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
+ DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
+ str_enabled_disabled(pipe_config->pch_pfit.enabled),
+ str_yes_no(pipe_config->pch_pfit.force_thru));
+
+ drm_dbg_kms(&i915->drm, "ips: %i, double wide: %i, drrs: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide,
+ pipe_config->has_drrs);
+
+ intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state);
+
+ if (IS_CHERRYVIEW(i915))
+ drm_dbg_kms(&i915->drm,
+ "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+ else
+ drm_dbg_kms(&i915->drm,
+ "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+
+ drm_dbg_kms(&i915->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
+ pipe_config->hw.degamma_lut ?
+ drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
+ pipe_config->hw.gamma_lut ?
+ drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
+
+dump_planes:
+ if (!state)
+ return;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe == crtc->pipe)
+ intel_dump_plane_state(plane_state);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h
new file mode 100644
index 000000000000..9399c35b7e5e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_CRTC_STATE_DUMP_H__
+#define __INTEL_CRTC_STATE_DUMP_H__
+
+struct intel_crtc_state;
+struct intel_atomic_state;
+
+void intel_crtc_state_dump(const struct intel_crtc_state *crtc_state,
+ struct intel_atomic_state *state,
+ const char *context);
+
+#endif /* __INTEL_CRTC_STATE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 8c80de877605..c2797ad2d313 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9e6fa59eabba..2330604b0bcc 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "intel_audio.h"
+#include "intel_audio_regs.h"
#include "intel_backlight.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
@@ -322,14 +323,10 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
}
-static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
{
int dotclock;
- /* CRT dotclock is determined via other means */
- if (pipe_config->has_pch_encoder)
- return;
-
if (intel_crtc_has_dp_encoder(pipe_config))
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
@@ -345,7 +342,17 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
+ return dotclock;
+}
+
+static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+{
+ /* CRT dotclock is determined via other means */
+ if (pipe_config->has_pch_encoder)
+ return;
+
+ pipe_config->hw.adjusted_mode.crtc_clock =
+ intel_crtc_dotclock(pipe_config);
}
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
@@ -455,6 +462,9 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_SELECT_PORT(port);
switch (crtc_state->pipe_bpp) {
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ fallthrough;
case 18:
temp |= TRANS_DDI_BPC_6;
break;
@@ -467,8 +477,6 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
case 36:
temp |= TRANS_DDI_BPC_12;
break;
- default:
- BUG();
}
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
@@ -478,6 +486,9 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ fallthrough;
case PIPE_A:
/* On Haswell, can only use the always-on power well for
* eDP when not using the panel fitter, and when not
@@ -494,9 +505,6 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
case PIPE_C:
temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
break;
- default:
- BUG();
- break;
}
}
@@ -3433,26 +3441,8 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
- if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
- /*
- * This is a big fat ugly hack.
- *
- * Some machines in UEFI boot mode provide us a VBT that has 18
- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
- * unknown we fail to light up. Yet the same BIOS boots up with
- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
- * max, not what it tells us to use.
- *
- * Note: This will still be broken if the eDP panel is not lit
- * up by the BIOS, and thus we can't get the mode at module
- * load.
- */
- drm_dbg_kms(&dev_priv->drm,
- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
- }
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
ddi_dotclock_get(pipe_config);
@@ -4189,7 +4179,7 @@ static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
if (port == PORT_D)
return HPD_PORT_A;
- if (HAS_PCH_MCC(dev_priv))
+ if (HAS_PCH_TGP(dev_priv))
return icl_hpd_pin(dev_priv, port);
return HPD_PORT_A + port - PORT_A;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 85f58dd3df72..006a2e979000 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -878,26 +878,6 @@ static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr3 = {
.num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr3),
};
-static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_hdmi[] = {
- /* NT mV Trans mV db */
- { .icl = { 0x6, 0x60, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */
- { .icl = { 0x6, 0x68, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
- { .icl = { 0xA, 0x73, 0x3F, 0x00, 0x00 } }, /* 650 650 0.0 ALS */
- { .icl = { 0xA, 0x78, 0x3F, 0x00, 0x00 } }, /* 800 800 0.0 */
- { .icl = { 0xB, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1000 1000 0.0 Re-timer */
- { .icl = { 0xB, 0x7F, 0x3B, 0x00, 0x04 } }, /* Full Red -1.5 */
- { .icl = { 0xB, 0x7F, 0x39, 0x00, 0x06 } }, /* Full Red -1.8 */
- { .icl = { 0xB, 0x7F, 0x37, 0x00, 0x08 } }, /* Full Red -2.0 CRLS */
- { .icl = { 0xB, 0x7F, 0x35, 0x00, 0x0A } }, /* Full Red -2.5 */
- { .icl = { 0xB, 0x7F, 0x33, 0x00, 0x0C } }, /* Full Red -3.0 */
-};
-
-static const struct intel_ddi_buf_trans adlp_combo_phy_trans_hdmi = {
- .entries = _adlp_combo_phy_trans_hdmi,
- .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_adlp_combo_phy_trans_hdmi) - 1,
-};
-
static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
@@ -953,9 +933,9 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_edp_h
{ .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
{ .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
{ .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
- { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
- { .icl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
- { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x63, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
@@ -1062,17 +1042,18 @@ bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
static bool use_edp_hobl(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
- return i915->vbt.edp.hobl && !intel_dp->hobl_failed;
+ return connector->panel.vbt.edp.hobl && !intel_dp->hobl_failed;
}
static bool use_edp_low_vswing(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
- return i915->vbt.edp.low_vswing;
+ return connector->panel.vbt.edp.low_vswing;
}
static const struct intel_ddi_buf_trans *
@@ -1556,7 +1537,7 @@ adlp_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&adlp_combo_phy_trans_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return adlp_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 806d50b302ab..a0f84cbe974f 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -87,6 +87,7 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -99,6 +100,8 @@
#include "intel_frontbuffer.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
+#include "intel_modeset_verify.h"
+#include "intel_modeset_setup.h"
#include "intel_overlay.h"
#include "intel_panel.h"
#include "intel_pch_display.h"
@@ -123,13 +126,9 @@
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
-static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
-static void intel_modeset_setup_hw_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx);
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
@@ -164,7 +163,7 @@ static void intel_modeset_setup_hw_state(struct drm_device *dev,
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-static void intel_update_watermarks(struct drm_i915_private *dev_priv)
+void intel_update_watermarks(struct drm_i915_private *dev_priv)
{
if (dev_priv->wm_disp->update_wm)
dev_priv->wm_disp->update_wm(dev_priv);
@@ -500,6 +499,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
i915_reg_t dpll_reg;
switch (dig_port->base.port) {
+ default:
+ MISSING_CASE(dig_port->base.port);
+ fallthrough;
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
dpll_reg = DPLL(0);
@@ -513,8 +515,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
port_mask = DPLL_PORTD_READY_MASK;
dpll_reg = DPIO_PHY_STATUS;
break;
- default:
- BUG();
}
if (intel_de_wait_for_register(dev_priv, dpll_reg,
@@ -730,10 +730,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0);
}
-static void
-intel_set_plane_visible(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state,
- bool visible)
+void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state,
+ bool visible)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -745,7 +744,7 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}
-static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
+void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_plane *plane;
@@ -780,7 +779,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
crtc->base.base.id, crtc->base.name);
intel_set_plane_visible(crtc_state, plane_state, false);
- fixup_plane_bitmasks(crtc_state);
+ intel_plane_fixup_bitmasks(crtc_state);
crtc_state->data_rate[plane->id] = 0;
crtc_state->data_rate_y[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
@@ -829,7 +828,7 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
}
static int
-__intel_display_resume(struct drm_device *dev,
+__intel_display_resume(struct drm_i915_private *i915,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -837,8 +836,8 @@ __intel_display_resume(struct drm_device *dev,
struct drm_crtc *crtc;
int i, ret;
- intel_modeset_setup_hw_state(dev, ctx);
- intel_vga_redisable(to_i915(dev));
+ intel_modeset_setup_hw_state(i915, ctx);
+ intel_vga_redisable(i915);
if (!state)
return 0;
@@ -858,12 +857,13 @@ __intel_display_resume(struct drm_device *dev,
}
/* ignore any reset values/BIOS leftovers in the WM registers */
- if (!HAS_GMCH(to_i915(dev)))
+ if (!HAS_GMCH(i915))
to_intel_atomic_state(state)->skip_intermediate_wm = true;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
- drm_WARN_ON(dev, ret == -EDEADLK);
+ drm_WARN_ON(&i915->drm, ret == -EDEADLK);
+
return ret;
}
@@ -936,56 +936,55 @@ void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
state->acquire_ctx = ctx;
}
-void intel_display_finish_reset(struct drm_i915_private *dev_priv)
+void intel_display_finish_reset(struct drm_i915_private *i915)
{
- struct drm_device *dev = &dev_priv->drm;
- struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+ struct drm_modeset_acquire_ctx *ctx = &i915->reset_ctx;
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915))
return;
/* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
+ if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
return;
- state = fetch_and_zero(&dev_priv->modeset_restore_state);
+ state = fetch_and_zero(&i915->modeset_restore_state);
if (!state)
goto unlock;
/* reset doesn't touch the display */
- if (!gpu_reset_clobbers_display(dev_priv)) {
+ if (!gpu_reset_clobbers_display(i915)) {
/* for testing only restore the display */
- ret = __intel_display_resume(dev, state, ctx);
+ ret = __intel_display_resume(i915, state, ctx);
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(&i915->drm,
"Restoring old state failed with %i\n", ret);
} else {
/*
* The display has been reset as well,
* so need a full re-initialization.
*/
- intel_pps_unlock_regs_wa(dev_priv);
- intel_modeset_init_hw(dev_priv);
- intel_init_clock_gating(dev_priv);
- intel_hpd_init(dev_priv);
+ intel_pps_unlock_regs_wa(i915);
+ intel_modeset_init_hw(i915);
+ intel_init_clock_gating(i915);
+ intel_hpd_init(i915);
- ret = __intel_display_resume(dev, state, ctx);
+ ret = __intel_display_resume(i915, state, ctx);
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(&i915->drm,
"Restoring old state failed with %i\n", ret);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_poll_disable(i915);
}
drm_atomic_state_put(state);
unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&i915->drm.mode_config.mutex);
- clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
+ clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
}
static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
@@ -2206,9 +2205,8 @@ static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
}
-static void
-modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
- struct intel_power_domain_mask *old_domains)
+void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *old_domains)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -2232,8 +2230,8 @@ modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
domain);
}
-static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
- struct intel_power_domain_mask *domains)
+void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+ struct intel_power_domain_mask *domains)
{
intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
&crtc->enabled_power_domains,
@@ -2413,89 +2411,6 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
i830_enable_pipe(dev_priv, pipe);
}
-static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct intel_encoder *encoder;
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_bw_state *bw_state =
- to_intel_bw_state(dev_priv->bw_obj.state);
- struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(dev_priv->cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(dev_priv->dbuf.obj.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane;
- struct drm_atomic_state *state;
- struct intel_crtc_state *temp_crtc_state;
- enum pipe pipe = crtc->pipe;
- int ret;
-
- if (!crtc_state->hw.active)
- return;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->uapi.visible)
- intel_plane_disable_noatomic(crtc, plane);
- }
-
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (!state) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to disable [CRTC:%d:%s], out of memory",
- crtc->base.base.id, crtc->base.name);
- return;
- }
-
- state->acquire_ctx = ctx;
-
- /* Everything's already locked, -EDEADLK can't happen. */
- temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
- ret = drm_atomic_add_affected_connectors(state, &crtc->base);
-
- drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
-
- dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
-
- drm_atomic_state_put(state);
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
- crtc->base.base.id, crtc->base.name);
-
- crtc->active = false;
- crtc->base.enabled = false;
-
- drm_WARN_ON(&dev_priv->drm,
- drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
- crtc_state->uapi.active = false;
- crtc_state->uapi.connector_mask = 0;
- crtc_state->uapi.encoder_mask = 0;
- intel_crtc_free_hw_state(crtc_state);
- memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
-
- for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
- encoder->base.crtc = NULL;
-
- intel_fbc_disable(crtc);
- intel_update_watermarks(dev_priv);
- intel_disable_shared_dpll(crtc_state);
-
- intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
-
- cdclk_state->min_cdclk[pipe] = 0;
- cdclk_state->min_voltage_level[pipe] = 0;
- cdclk_state->active_pipes &= ~BIT(pipe);
-
- dbuf_state->active_pipes &= ~BIT(pipe);
-
- bw_state->data_rate[pipe] = 0;
- bw_state->num_active_planes[pipe] = 0;
-}
/*
* turn all crtc's off, but do not adjust state
@@ -2528,45 +2443,6 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_encoder);
}
-/* Cross check the actual hw state with our own modeset state tracking (and it's
- * internal consistency). */
-static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
-
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.base.id, connector->base.name);
-
- if (connector->get_hw_state(connector)) {
- struct intel_encoder *encoder = intel_attached_encoder(connector);
-
- I915_STATE_WARN(!crtc_state,
- "connector enabled without attached crtc\n");
-
- if (!crtc_state)
- return;
-
- I915_STATE_WARN(!crtc_state->hw.active,
- "connector is active, but attached crtc isn't\n");
-
- if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
- return;
-
- I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
- "atomic encoder doesn't match attached encoder\n");
-
- I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
- "attached encoder crtc differs from connector crtc\n");
- } else {
- I915_STATE_WARN(crtc_state && crtc_state->hw.active,
- "attached crtc is active, but connector isn't\n");
- I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
- "best encoder set without crtc!\n");
- }
-}
-
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
{
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -2708,8 +2584,8 @@ static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state
intel_crtc_compute_pixel_rate(crtc_state);
}
-static void intel_encoder_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+void intel_encoder_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
encoder->get_config(encoder, crtc_state);
@@ -2811,9 +2687,11 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static int intel_crtc_compute_config(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int intel_crtc_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
int ret;
ret = intel_crtc_compute_pipe_src(crtc_state);
@@ -3135,14 +3013,18 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
intel_bigjoiner_adjust_pipe_src(pipe_config);
}
-static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
+void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 pipeconf = 0;
- /* we keep both pipes enabled on 830 */
- if (IS_I830(dev_priv))
+ /*
+ * - We keep both pipes enabled on 830
+ * - During modeset the pipe is still disabled and must remain so
+ * - During fastset the pipe is already enabled and must remain so
+ */
+ if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
pipeconf |= PIPECONF_ENABLE;
if (crtc_state->double_wide)
@@ -3157,6 +3039,10 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
PIPECONF_DITHER_TYPE_SP;
switch (crtc_state->pipe_bpp) {
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ MISSING_CASE(crtc_state->pipe_bpp);
+ fallthrough;
case 18:
pipeconf |= PIPECONF_BPC_6;
break;
@@ -3166,9 +3052,6 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
case 30:
pipeconf |= PIPECONF_BPC_10;
break;
- default:
- /* Case prevented by intel_choose_pipe_bpp_dither. */
- BUG();
}
}
@@ -3454,16 +3337,25 @@ out:
return ret;
}
-static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
+void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 val;
+ u32 val = 0;
- val = 0;
+ /*
+ * - During modeset the pipe is still disabled and must remain so
+ * - During fastset the pipe is already enabled and must remain so
+ */
+ if (!intel_crtc_needs_modeset(crtc_state))
+ val |= PIPECONF_ENABLE;
switch (crtc_state->pipe_bpp) {
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ MISSING_CASE(crtc_state->pipe_bpp);
+ fallthrough;
case 18:
val |= PIPECONF_BPC_6;
break;
@@ -3476,9 +3368,6 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
case 36:
val |= PIPECONF_BPC_12;
break;
- default:
- /* Case prevented by intel_choose_pipe_bpp_dither. */
- BUG();
}
if (crtc_state->dither)
@@ -3519,6 +3408,13 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
+ /*
+ * - During modeset the pipe is still disabled and must remain so
+ * - During fastset the pipe is already enabled and must remain so
+ */
+ if (!intel_crtc_needs_modeset(crtc_state))
+ val |= PIPECONF_ENABLE;
+
if (IS_HASWELL(dev_priv) && crtc_state->dither)
val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
@@ -4246,7 +4142,7 @@ out:
return active;
}
-static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
+bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
@@ -4980,45 +4876,12 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return 0;
}
-static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
-{
- struct intel_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- for_each_intel_connector_iter(connector, &conn_iter) {
- struct drm_connector_state *conn_state = connector->base.state;
- struct intel_encoder *encoder =
- to_intel_encoder(connector->base.encoder);
-
- if (conn_state->crtc)
- drm_connector_put(&connector->base);
-
- if (encoder) {
- struct intel_crtc *crtc =
- to_intel_crtc(encoder->base.crtc);
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- conn_state->best_encoder = &encoder->base;
- conn_state->crtc = &crtc->base;
- conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
-
- drm_connector_get(&connector->base);
- } else {
- conn_state->best_encoder = NULL;
- conn_state->crtc = NULL;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
-}
-
static int
compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
struct drm_connector *connector = conn_state->connector;
- struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_info *info = &connector->display_info;
int bpp;
@@ -5040,27 +4903,28 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
return -EINVAL;
}
- if (bpp < pipe_config->pipe_bpp) {
+ if (bpp < crtc_state->pipe_bpp) {
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
- "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+ "[CONNECTOR:%d:%s] Limiting display bpp to %d "
+ "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
connector->base.id, connector->name,
bpp, 3 * info->bpc,
3 * conn_state->max_requested_bpc,
- pipe_config->pipe_bpp);
+ crtc_state->pipe_bpp);
- pipe_config->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = bpp;
}
return 0;
}
static int
-compute_baseline_pipe_bpp(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+compute_baseline_pipe_bpp(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_atomic_state *state = pipe_config->uapi.state;
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
@@ -5073,16 +4937,16 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
else
bpp = 8*3;
- pipe_config->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = bpp;
/* Clamp display bpp to connector max bpp */
- for_each_new_connector_in_state(state, connector, connector_state, i) {
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
int ret;
if (connector_state->crtc != &crtc->base)
continue;
- ret = compute_sink_pipe_bpp(connector_state, pipe_config);
+ ret = compute_sink_pipe_bpp(connector_state, crtc_state);
if (ret)
return ret;
}
@@ -5090,310 +4954,6 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
return 0;
}
-static void intel_dump_crtc_timings(struct drm_i915_private *i915,
- const struct drm_display_mode *mode)
-{
- drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
- mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->type, mode->flags);
-}
-
-static void
-intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
- const char *id, unsigned int lane_count,
- const struct intel_link_m_n *m_n)
-{
- struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
-
- drm_dbg_kms(&i915->drm,
- "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- id, lane_count,
- m_n->data_m, m_n->data_n,
- m_n->link_m, m_n->link_n, m_n->tu);
-}
-
-static void
-intel_dump_infoframe(struct drm_i915_private *dev_priv,
- const union hdmi_infoframe *frame)
-{
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
-}
-
-static void
-intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
- const struct drm_dp_vsc_sdp *vsc)
-{
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
-}
-
-#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
-
-static const char * const output_type_str[] = {
- OUTPUT_TYPE(UNUSED),
- OUTPUT_TYPE(ANALOG),
- OUTPUT_TYPE(DVO),
- OUTPUT_TYPE(SDVO),
- OUTPUT_TYPE(LVDS),
- OUTPUT_TYPE(TVOUT),
- OUTPUT_TYPE(HDMI),
- OUTPUT_TYPE(DP),
- OUTPUT_TYPE(EDP),
- OUTPUT_TYPE(DSI),
- OUTPUT_TYPE(DDI),
- OUTPUT_TYPE(DP_MST),
-};
-
-#undef OUTPUT_TYPE
-
-static void snprintf_output_types(char *buf, size_t len,
- unsigned int output_types)
-{
- char *str = buf;
- int i;
-
- str[0] = '\0';
-
- for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
- int r;
-
- if ((output_types & BIT(i)) == 0)
- continue;
-
- r = snprintf(str, len, "%s%s",
- str != buf ? "," : "", output_type_str[i]);
- if (r >= len)
- break;
- str += r;
- len -= r;
-
- output_types &= ~BIT(i);
- }
-
- WARN_ON_ONCE(output_types != 0);
-}
-
-static const char * const output_format_str[] = {
- [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
- [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
- [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
-};
-
-static const char *output_formats(enum intel_output_format format)
-{
- if (format >= ARRAY_SIZE(output_format_str))
- return "invalid";
- return output_format_str[format];
-}
-
-static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
-
- if (!fb) {
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
- plane->base.base.id, plane->base.name,
- str_yes_no(plane_state->uapi.visible));
- return;
- }
-
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
- plane->base.base.id, plane->base.name,
- fb->base.id, fb->width, fb->height, &fb->format->format,
- fb->modifier, str_yes_no(plane_state->uapi.visible));
- drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id);
- if (plane_state->uapi.visible)
- drm_dbg_kms(&i915->drm,
- "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst));
-}
-
-static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *state,
- const char *context)
-{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- char buf[64];
- int i;
-
- drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
- crtc->base.base.id, crtc->base.name,
- str_yes_no(pipe_config->hw.enable), context);
-
- if (!pipe_config->hw.enable)
- goto dump_planes;
-
- snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
- drm_dbg_kms(&dev_priv->drm,
- "active: %s, output_types: %s (0x%x), output format: %s\n",
- str_yes_no(pipe_config->hw.active),
- buf, pipe_config->output_types,
- output_formats(pipe_config->output_format));
-
- drm_dbg_kms(&dev_priv->drm,
- "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
- transcoder_name(pipe_config->cpu_transcoder),
- pipe_config->pipe_bpp, pipe_config->dither);
-
- drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
- transcoder_name(pipe_config->mst_master_transcoder));
-
- drm_dbg_kms(&dev_priv->drm,
- "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
- transcoder_name(pipe_config->master_transcoder),
- pipe_config->sync_mode_slaves_mask);
-
- drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s, pipes: 0x%x\n",
- intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
- intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
- pipe_config->bigjoiner_pipes);
-
- drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
- str_enabled_disabled(pipe_config->splitter.enable),
- pipe_config->splitter.link_count,
- pipe_config->splitter.pixel_overlap);
-
- if (pipe_config->has_pch_encoder)
- intel_dump_m_n_config(pipe_config, "fdi",
- pipe_config->fdi_lanes,
- &pipe_config->fdi_m_n);
-
- if (intel_crtc_has_dp_encoder(pipe_config)) {
- intel_dump_m_n_config(pipe_config, "dp m_n",
- pipe_config->lane_count,
- &pipe_config->dp_m_n);
- intel_dump_m_n_config(pipe_config, "dp m2_n2",
- pipe_config->lane_count,
- &pipe_config->dp_m2_n2);
- }
-
- drm_dbg_kms(&dev_priv->drm, "framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
-
- drm_dbg_kms(&dev_priv->drm,
- "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
- pipe_config->has_audio, pipe_config->has_infoframe,
- pipe_config->infoframes.enable);
-
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
- drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
- pipe_config->infoframes.gcp);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
- intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
- intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
- intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
- intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
- intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(DP_SDP_VSC))
- intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
-
- drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
- str_yes_no(pipe_config->vrr.enable),
- pipe_config->vrr.vmin, pipe_config->vrr.vmax,
- pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
- pipe_config->vrr.flipline,
- intel_vrr_vmin_vblank_start(pipe_config),
- intel_vrr_vmax_vblank_start(pipe_config));
-
- drm_dbg_kms(&dev_priv->drm, "requested mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.mode));
- drm_dbg_kms(&dev_priv->drm, "adjusted mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
- intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
- drm_dbg_kms(&dev_priv->drm, "pipe mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
- intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
- drm_dbg_kms(&dev_priv->drm,
- "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
- pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
- pipe_config->pixel_rate);
-
- drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
- pipe_config->linetime, pipe_config->ips_linetime);
-
- if (DISPLAY_VER(dev_priv) >= 9)
- drm_dbg_kms(&dev_priv->drm,
- "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
-
- if (HAS_GMCH(dev_priv))
- drm_dbg_kms(&dev_priv->drm,
- "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
- else
- drm_dbg_kms(&dev_priv->drm,
- "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
- DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
- str_enabled_disabled(pipe_config->pch_pfit.enabled),
- str_yes_no(pipe_config->pch_pfit.force_thru));
-
- drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i, drrs: %i\n",
- pipe_config->ips_enabled, pipe_config->double_wide,
- pipe_config->has_drrs);
-
- intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
-
- if (IS_CHERRYVIEW(dev_priv))
- drm_dbg_kms(&dev_priv->drm,
- "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->cgm_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
- else
- drm_dbg_kms(&dev_priv->drm,
- "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->csc_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
-
- drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
- pipe_config->hw.degamma_lut ?
- drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
- pipe_config->hw.gamma_lut ?
- drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
-
-dump_planes:
- if (!state)
- return;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe == crtc->pipe)
- intel_dump_plane_state(plane_state);
- }
-}
-
static bool check_digital_port_conflicts(struct intel_atomic_state *state)
{
struct drm_device *dev = state->base.dev;
@@ -5500,27 +5060,6 @@ intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
}
-static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
-{
- if (intel_crtc_is_bigjoiner_slave(crtc_state))
- return;
-
- crtc_state->uapi.enable = crtc_state->hw.enable;
- crtc_state->uapi.active = crtc_state->hw.active;
- drm_WARN_ON(crtc_state->uapi.crtc->dev,
- drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
-
- crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
- crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
-
- drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
- crtc_state->hw.degamma_lut);
- drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
- crtc_state->hw.gamma_lut);
- drm_property_replace_blob(&crtc_state->uapi.ctm,
- crtc_state->hw.ctm);
-}
-
static void
copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
struct intel_crtc *slave_crtc)
@@ -5636,40 +5175,39 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
static int
intel_modeset_pipe_config(struct intel_atomic_state *state,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = pipe_config->uapi.crtc;
- struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int pipe_src_w, pipe_src_h;
int base_bpp, ret, i;
bool retry = true;
- pipe_config->cpu_transcoder =
- (enum transcoder) to_intel_crtc(crtc)->pipe;
+ crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->framestart_delay = 1;
+ crtc_state->framestart_delay = 1;
/*
* Sanitize sync polarity flags based on requested ones. If neither
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
- if (!(pipe_config->hw.adjusted_mode.flags &
+ if (!(crtc_state->hw.adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
- pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
- if (!(pipe_config->hw.adjusted_mode.flags &
+ if (!(crtc_state->hw.adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
- pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
- ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
- pipe_config);
+ ret = compute_baseline_pipe_bpp(state, crtc);
if (ret)
return ret;
- base_bpp = pipe_config->pipe_bpp;
+ base_bpp = crtc_state->pipe_bpp;
/*
* Determine the real pipe dimensions. Note that stereo modes can
@@ -5679,21 +5217,22 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_mode_get_hv_timing(&pipe_config->hw.mode,
+ drm_mode_get_hv_timing(&crtc_state->hw.mode,
&pipe_src_w, &pipe_src_h);
- drm_rect_init(&pipe_config->pipe_src, 0, 0,
+ drm_rect_init(&crtc_state->pipe_src, 0, 0,
pipe_src_w, pipe_src_h);
for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(connector_state->best_encoder);
- if (connector_state->crtc != crtc)
+ if (connector_state->crtc != &crtc->base)
continue;
- if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
+ if (!check_single_encoder_cloning(state, crtc, encoder)) {
drm_dbg_kms(&i915->drm,
- "rejecting invalid cloning configuration\n");
+ "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
+ encoder->base.base.id, encoder->base.name);
return -EINVAL;
}
@@ -5702,20 +5241,20 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
* hooks so that the hooks can use this information safely.
*/
if (encoder->compute_output_type)
- pipe_config->output_types |=
- BIT(encoder->compute_output_type(encoder, pipe_config,
+ crtc_state->output_types |=
+ BIT(encoder->compute_output_type(encoder, crtc_state,
connector_state));
else
- pipe_config->output_types |= BIT(encoder->type);
+ crtc_state->output_types |= BIT(encoder->type);
}
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
- pipe_config->port_clock = 0;
- pipe_config->pixel_multiplier = 1;
+ crtc_state->port_clock = 0;
+ crtc_state->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
- drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
+ drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
CRTC_STEREO_DOUBLE);
/* Pass our mode to the connectors and the CRTC to give them a chance to
@@ -5726,39 +5265,43 @@ encoder_retry:
struct intel_encoder *encoder =
to_intel_encoder(connector_state->best_encoder);
- if (connector_state->crtc != crtc)
+ if (connector_state->crtc != &crtc->base)
continue;
- ret = encoder->compute_config(encoder, pipe_config,
+ ret = encoder->compute_config(encoder, crtc_state,
connector_state);
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
+ encoder->base.base.id, encoder->base.name, ret);
return ret;
}
}
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
- if (!pipe_config->port_clock)
- pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
- * pipe_config->pixel_multiplier;
+ if (!crtc_state->port_clock)
+ crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
+ * crtc_state->pixel_multiplier;
- ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+ ret = intel_crtc_compute_config(state, crtc);
if (ret == -EDEADLK)
return ret;
if (ret == -EAGAIN) {
if (drm_WARN(&i915->drm, !retry,
- "loop in pipe configuration computation\n"))
+ "[CRTC:%d:%s] loop in pipe configuration computation\n",
+ crtc->base.base.id, crtc->base.name))
return -EINVAL;
- drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n",
+ crtc->base.base.id, crtc->base.name);
retry = false;
goto encoder_retry;
}
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
+ crtc->base.base.id, crtc->base.name, ret);
return ret;
}
@@ -5766,21 +5309,22 @@ encoder_retry:
* only enable it on 6bpc panels and when its not a compliance
* test requesting 6bpc video pattern.
*/
- pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
- !pipe_config->dither_force_disable;
+ crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
+ !crtc_state->dither_force_disable;
drm_dbg_kms(&i915->drm,
- "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
- base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+ "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
+ crtc->base.base.id, crtc->base.name,
+ base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
return 0;
}
static int
-intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
+intel_modeset_pipe_config_late(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector_state *conn_state;
struct drm_connector *connector;
int i;
@@ -5971,7 +5515,7 @@ static bool fastboot_enabled(struct drm_i915_private *dev_priv)
return false;
}
-static bool
+bool
intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset)
@@ -6077,6 +5621,28 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_TIMINGS(name) do { \
+ PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
+ PIPE_CONF_CHECK_I(name.crtc_htotal); \
+ PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
+ PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
+ PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
+ PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
+ PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
+ PIPE_CONF_CHECK_I(name.crtc_vtotal); \
+ PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
+ PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
+ PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
+ PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
+} while (0)
+
+#define PIPE_CONF_CHECK_RECT(name) do { \
+ PIPE_CONF_CHECK_I(name.x1); \
+ PIPE_CONF_CHECK_I(name.x2); \
+ PIPE_CONF_CHECK_I(name.y1); \
+ PIPE_CONF_CHECK_I(name.y2); \
+} while (0)
+
/* This is required for BDW+ where there is only one set of registers for
* switching between high and low RR.
* This macro can be used whenever a comparison has to be made between one
@@ -6173,7 +5739,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
+ PIPE_CONF_CHECK_I(hw.enable);
+ PIPE_CONF_CHECK_I(hw.active);
+
PIPE_CONF_CHECK_I(cpu_transcoder);
+ PIPE_CONF_CHECK_I(mst_master_transcoder);
PIPE_CONF_CHECK_BOOL(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
@@ -6194,33 +5764,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(framestart_delay);
PIPE_CONF_CHECK_I(msa_timing_delay);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
-
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
-
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
-
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
+ PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
+ PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
PIPE_CONF_CHECK_I(pixel_multiplier);
@@ -6264,18 +5809,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
if (!fastset) {
- PIPE_CONF_CHECK_I(pipe_src.x1);
- PIPE_CONF_CHECK_I(pipe_src.y1);
- PIPE_CONF_CHECK_I(pipe_src.x2);
- PIPE_CONF_CHECK_I(pipe_src.y2);
+ PIPE_CONF_CHECK_RECT(pipe_src);
PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
- if (current_config->pch_pfit.enabled) {
- PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
- PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
- PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
- PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
- }
+ PIPE_CONF_CHECK_RECT(pch_pfit.dst);
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
@@ -6379,8 +5916,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(splitter.link_count);
PIPE_CONF_CHECK_I(splitter.pixel_overlap);
- PIPE_CONF_CHECK_I(mst_master_transcoder);
-
PIPE_CONF_CHECK_BOOL(vrr.enable);
PIPE_CONF_CHECK_I(vrr.vmin);
PIPE_CONF_CHECK_I(vrr.vmax);
@@ -6396,295 +5931,13 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_CHECK_COLOR_LUT
+#undef PIPE_CONF_CHECK_TIMINGS
+#undef PIPE_CONF_CHECK_RECT
#undef PIPE_CONF_QUIRK
return ret;
}
-static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *pipe_config)
-{
- if (pipe_config->has_pch_encoder) {
- int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
- &pipe_config->fdi_m_n);
- int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
-
- /*
- * FDI already provided one idea for the dotclock.
- * Yell if the encoder disagrees.
- */
- drm_WARN(&dev_priv->drm,
- !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
- "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- fdi_dotclock, dotclock);
- }
-}
-
-static void verify_wm_state(struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_hw_state {
- struct skl_ddb_entry ddb[I915_MAX_PLANES];
- struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
- struct skl_pipe_wm wm;
- } *hw;
- const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct intel_plane *plane;
- u8 hw_enabled_slices;
-
- if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
- return;
-
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return;
-
- skl_pipe_wm_get_hw_state(crtc, &hw->wm);
-
- skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
-
- hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
-
- if (DISPLAY_VER(dev_priv) >= 11 &&
- hw_enabled_slices != dev_priv->dbuf.enabled_slices)
- drm_err(&dev_priv->drm,
- "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- dev_priv->dbuf.enabled_slices,
- hw_enabled_slices);
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- const struct skl_wm_level *hw_wm_level, *sw_wm_level;
-
- /* Watermarks */
- for (level = 0; level <= max_level; level++) {
- hw_wm_level = &hw->wm.planes[plane->id].wm[level];
- sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
-
- if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
- continue;
-
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name, level,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
- sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
-
- if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
-
- if (HAS_HW_SAGV_WM(dev_priv) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
-
- if (HAS_HW_SAGV_WM(dev_priv) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
- plane->base.base.id, plane->base.name,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- kfree(hw);
-}
-
-static void
-verify_connector_state(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_connector *connector;
- struct drm_connector_state *new_conn_state;
- int i;
-
- for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
- struct drm_encoder *encoder = connector->encoder;
- struct intel_crtc_state *crtc_state = NULL;
-
- if (new_conn_state->crtc != &crtc->base)
- continue;
-
- if (crtc)
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
-
- intel_connector_verify_state(crtc_state, new_conn_state);
-
- I915_STATE_WARN(new_conn_state->best_encoder != encoder,
- "connector's atomic encoder doesn't match legacy encoder\n");
- }
-}
-
-static void
-verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
-{
- struct intel_encoder *encoder;
- struct drm_connector *connector;
- struct drm_connector_state *old_conn_state, *new_conn_state;
- int i;
-
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- bool enabled = false, found = false;
- enum pipe pipe;
-
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
- encoder->base.base.id,
- encoder->base.name);
-
- for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
- new_conn_state, i) {
- if (old_conn_state->best_encoder == &encoder->base)
- found = true;
-
- if (new_conn_state->best_encoder != &encoder->base)
- continue;
- found = enabled = true;
-
- I915_STATE_WARN(new_conn_state->crtc !=
- encoder->base.crtc,
- "connector's crtc doesn't match encoder crtc\n");
- }
-
- if (!found)
- continue;
-
- I915_STATE_WARN(!!encoder->base.crtc != enabled,
- "encoder's enabled state mismatch "
- "(expected %i, found %i)\n",
- !!encoder->base.crtc, enabled);
-
- if (!encoder->base.crtc) {
- bool active;
-
- active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active,
- "encoder detached but still enabled on pipe %c.\n",
- pipe_name(pipe));
- }
- }
-}
-
-static void
-verify_crtc_state(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder;
- struct intel_crtc_state *pipe_config = old_crtc_state;
- struct drm_atomic_state *state = old_crtc_state->uapi.state;
- struct intel_crtc *master_crtc;
-
- __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
- intel_crtc_free_hw_state(old_crtc_state);
- intel_crtc_state_reset(old_crtc_state, crtc);
- old_crtc_state->uapi.state = state;
-
- drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
- crtc->base.name);
-
- pipe_config->hw.enable = new_crtc_state->hw.enable;
-
- intel_crtc_get_pipe_config(pipe_config);
-
- /* we keep both pipes enabled on 830 */
- if (IS_I830(dev_priv) && pipe_config->hw.active)
- pipe_config->hw.active = new_crtc_state->hw.active;
-
- I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
- "crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n",
- new_crtc_state->hw.active, pipe_config->hw.active);
-
- I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
- "transitional active state does not match atomic hw state "
- "(expected %i, found %i)\n",
- new_crtc_state->hw.active, crtc->active);
-
- master_crtc = intel_master_crtc(new_crtc_state);
-
- for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
- enum pipe pipe;
- bool active;
-
- active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != new_crtc_state->hw.active,
- "[ENCODER:%i] active %i with crtc active %i\n",
- encoder->base.base.id, active,
- new_crtc_state->hw.active);
-
- I915_STATE_WARN(active && master_crtc->pipe != pipe,
- "Encoder connected to wrong pipe %c\n",
- pipe_name(pipe));
-
- if (active)
- intel_encoder_get_config(encoder, pipe_config);
- }
-
- if (!new_crtc_state->hw.active)
- return;
-
- intel_pipe_config_sanity_check(dev_priv, pipe_config);
-
- if (!intel_pipe_config_compare(new_crtc_state,
- pipe_config, false)) {
- I915_STATE_WARN(1, "pipe state doesn't match!\n");
- intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
- intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
- }
-}
-
static void
intel_verify_planes(struct intel_atomic_state *state)
{
@@ -6698,167 +5951,6 @@ intel_verify_planes(struct intel_atomic_state *state)
plane_state->uapi.visible);
}
-static void
-verify_single_dpll_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
-{
- struct intel_dpll_hw_state dpll_hw_state;
- u8 pipe_mask;
- bool active;
-
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
-
- drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
-
- active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
-
- if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
- I915_STATE_WARN(!pll->on && pll->active_mask,
- "pll in active use but not on in sw tracking\n");
- I915_STATE_WARN(pll->on && !pll->active_mask,
- "pll is on but not used by any active pipe\n");
- I915_STATE_WARN(pll->on != active,
- "pll on state mismatch (expected %i, found %i)\n",
- pll->on, active);
- }
-
- if (!crtc) {
- I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
- "more active pll users than references: 0x%x vs 0x%x\n",
- pll->active_mask, pll->state.pipe_mask);
-
- return;
- }
-
- pipe_mask = BIT(crtc->pipe);
-
- if (new_crtc_state->hw.active)
- I915_STATE_WARN(!(pll->active_mask & pipe_mask),
- "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
- pipe_name(crtc->pipe), pll->active_mask);
- else
- I915_STATE_WARN(pll->active_mask & pipe_mask,
- "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
- pipe_name(crtc->pipe), pll->active_mask);
-
- I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
- "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
- pipe_mask, pll->state.pipe_mask);
-
- I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
- &dpll_hw_state,
- sizeof(dpll_hw_state)),
- "pll hw state mismatch\n");
-}
-
-static void
-verify_shared_dpll_state(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (new_crtc_state->shared_dpll)
- verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
-
- if (old_crtc_state->shared_dpll &&
- old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
- u8 pipe_mask = BIT(crtc->pipe);
- struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
-
- I915_STATE_WARN(pll->active_mask & pipe_mask,
- "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
- pipe_name(crtc->pipe), pll->active_mask);
- I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
- "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
- pipe_name(crtc->pipe), pll->state.pipe_mask);
- }
-}
-
-static void
-verify_mpllb_state(struct intel_atomic_state *state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- struct intel_mpllb_state mpllb_hw_state = { 0 };
- struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct intel_encoder *encoder;
-
- if (!IS_DG2(i915))
- return;
-
- if (!new_crtc_state->hw.active)
- return;
-
- encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
- intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
-
-#define MPLLB_CHECK(name) do { \
- if (mpllb_sw_state->name != mpllb_hw_state.name) { \
- pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
- "(expected 0x%08x, found 0x%08x)", \
- mpllb_sw_state->name, \
- mpllb_hw_state.name); \
- } \
-} while (0)
-
- MPLLB_CHECK(mpllb_cp);
- MPLLB_CHECK(mpllb_div);
- MPLLB_CHECK(mpllb_div2);
- MPLLB_CHECK(mpllb_fracn1);
- MPLLB_CHECK(mpllb_fracn2);
- MPLLB_CHECK(mpllb_sscen);
- MPLLB_CHECK(mpllb_sscstep);
-
- /*
- * ref_control is handled by the hardware/firemware and never
- * programmed by the software, but the proper values are supplied
- * in the bspec for verification purposes.
- */
- MPLLB_CHECK(ref_control);
-
-#undef MPLLB_CHECK
-}
-
-static void
-intel_modeset_verify_crtc(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
- return;
-
- verify_wm_state(crtc, new_crtc_state);
- verify_connector_state(state, crtc);
- verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
- verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
- verify_mpllb_state(state, new_crtc_state);
-}
-
-static void
-verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
-{
- int i;
-
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
- verify_single_dpll_state(dev_priv,
- &dev_priv->dpll.shared_dplls[i],
- NULL, NULL);
-}
-
-static void
-intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
- struct intel_atomic_state *state)
-{
- verify_encoder_state(dev_priv, state);
- verify_connector_state(state, NULL);
- verify_disabled_dpll_state(dev_priv);
-}
-
int intel_modeset_all_pipes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -6897,8 +5989,7 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state)
return 0;
}
-static void
-intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -7733,7 +6824,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (!new_crtc_state->hw.enable)
continue;
- ret = intel_modeset_pipe_config(state, new_crtc_state);
+ ret = intel_modeset_pipe_config(state, crtc);
if (ret)
goto fail;
@@ -7747,7 +6838,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- ret = intel_modeset_pipe_config_late(new_crtc_state);
+ ret = intel_modeset_pipe_config_late(state, crtc);
if (ret)
goto fail;
@@ -7871,9 +6962,9 @@ static int intel_atomic_check(struct drm_device *dev,
!new_crtc_state->update_pipe)
continue;
- intel_dump_pipe_config(new_crtc_state, state,
- intel_crtc_needs_modeset(new_crtc_state) ?
- "[modeset]" : "[fastset]");
+ intel_crtc_state_dump(new_crtc_state, state,
+ intel_crtc_needs_modeset(new_crtc_state) ?
+ "modeset" : "fastset");
}
return 0;
@@ -7888,7 +6979,7 @@ static int intel_atomic_check(struct drm_device *dev,
*/
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i)
- intel_dump_pipe_config(new_crtc_state, state, "[failed]");
+ intel_crtc_state_dump(new_crtc_state, state, "failed");
return ret;
}
@@ -8452,7 +7543,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
- modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
+ intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
}
}
@@ -8552,7 +7643,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
- modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
+ intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
@@ -9689,7 +8780,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_setup_outputs(i915);
drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
+ intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(i915);
drm_modeset_unlock_all(dev);
@@ -9842,580 +8933,17 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_posting_read(dev_priv, DPLL(pipe));
}
-static void
-intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
-
- if (DISPLAY_VER(dev_priv) >= 4)
- return;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_plane *plane =
- to_intel_plane(crtc->base.primary);
- struct intel_crtc *plane_crtc;
- enum pipe pipe;
-
- if (!plane->get_hw_state(plane, &pipe))
- continue;
-
- if (pipe == crtc->pipe)
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
- plane->base.base.id, plane->base.name);
-
- plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
- intel_plane_disable_noatomic(plane_crtc, plane);
- }
-}
-
-static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder;
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- return true;
-
- return false;
-}
-
-static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
-{
- struct drm_device *dev = encoder->base.dev;
- struct intel_connector *connector;
-
- for_each_connector_on_encoder(dev, &encoder->base, connector)
- return connector;
-
- return NULL;
-}
-
-static void intel_sanitize_crtc(struct intel_crtc *crtc,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
-
- if (crtc_state->hw.active) {
- struct intel_plane *plane;
-
- /* Disable everything but the primary plane */
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->uapi.visible &&
- plane->base.type != DRM_PLANE_TYPE_PRIMARY)
- intel_plane_disable_noatomic(crtc, plane);
- }
-
- /* Disable any background color/etc. set by the BIOS */
- intel_color_commit_noarm(crtc_state);
- intel_color_commit_arm(crtc_state);
- }
-
- /* Adjust the state of the output pipe according to whether we
- * have active connectors/encoders. */
- if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
- !intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_crtc_disable_noatomic(crtc, ctx);
-
- if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
- /*
- * We start out with underrun reporting disabled to avoid races.
- * For correct bookkeeping mark this on active crtcs.
- *
- * Also on gmch platforms we dont have any hardware bits to
- * disable the underrun reporting. Which means we need to start
- * out with underrun reporting disabled also on inactive pipes,
- * since otherwise we'll complain about the garbage we read when
- * e.g. coming up after runtime pm.
- *
- * No protection against concurrent access is required - at
- * worst a fifo underrun happens which also sets this to false.
- */
- crtc->cpu_fifo_underrun_disabled = true;
- /*
- * We track the PCH trancoder underrun reporting state
- * within the crtc. With crtc for pipe A housing the underrun
- * reporting state for PCH transcoder A, crtc for pipe B housing
- * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
- * and marking underrun reporting as disabled for the non-existing
- * PCH transcoders B and C would prevent enabling the south
- * error interrupt (see cpt_can_enable_serr_int()).
- */
- if (intel_has_pch_trancoder(dev_priv, crtc->pipe))
- crtc->pch_fifo_underrun_disabled = true;
- }
-}
-
-static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- /*
- * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
- * the hardware when a high res displays plugged in. DPLL P
- * divider is zero, and the pipe timings are bonkers. We'll
- * try to disable everything in that case.
- *
- * FIXME would be nice to be able to sanitize this state
- * without several WARNs, but for now let's take the easy
- * road.
- */
- return IS_SANDYBRIDGE(dev_priv) &&
- crtc_state->hw.active &&
- crtc_state->shared_dpll &&
- crtc_state->port_clock == 0;
-}
-
-static void intel_sanitize_encoder(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_connector *connector;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct intel_crtc_state *crtc_state = crtc ?
- to_intel_crtc_state(crtc->base.state) : NULL;
-
- /* We need to check both for a crtc link (meaning that the
- * encoder is active and trying to read from a pipe) and the
- * pipe itself being active. */
- bool has_active_crtc = crtc_state &&
- crtc_state->hw.active;
-
- if (crtc_state && has_bogus_dpll_config(crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
- "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
- pipe_name(crtc->pipe));
- has_active_crtc = false;
- }
-
- connector = intel_encoder_find_connector(encoder);
- if (connector && !has_active_crtc) {
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
- encoder->base.base.id,
- encoder->base.name);
-
- /* Connector is active, but has no active pipe. This is
- * fallout from our resume register restoring. Disable
- * the encoder manually again. */
- if (crtc_state) {
- struct drm_encoder *best_encoder;
-
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] manually disabled\n",
- encoder->base.base.id,
- encoder->base.name);
-
- /* avoid oopsing in case the hooks consult best_encoder */
- best_encoder = connector->base.state->best_encoder;
- connector->base.state->best_encoder = &encoder->base;
-
- /* FIXME NULL atomic state passed! */
- if (encoder->disable)
- encoder->disable(NULL, encoder, crtc_state,
- connector->base.state);
- if (encoder->post_disable)
- encoder->post_disable(NULL, encoder, crtc_state,
- connector->base.state);
-
- connector->base.state->best_encoder = best_encoder;
- }
- encoder->base.crtc = NULL;
-
- /* Inconsistent output/port/pipe state happens presumably due to
- * a bug in one of the get_hw_state functions. Or someplace else
- * in our code, like the register restore mess on resume. Clamp
- * things to off as a safer default. */
-
- connector->base.dpms = DRM_MODE_DPMS_OFF;
- connector->base.encoder = NULL;
- }
-
- /* notify opregion of the sanitized encoder state */
- intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
-
- if (HAS_DDI(dev_priv))
- intel_ddi_sanitize_encoder_pll_mapping(encoder);
-}
-
-/* FIXME read out full plane state for all planes */
-static void readout_plane_state(struct drm_i915_private *dev_priv)
-{
- struct intel_plane *plane;
- struct intel_crtc *crtc;
-
- for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state;
- enum pipe pipe = PIPE_A;
- bool visible;
-
- visible = plane->get_hw_state(plane, &pipe);
-
- crtc = intel_crtc_for_pipe(dev_priv, pipe);
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- intel_set_plane_visible(crtc_state, plane_state, visible);
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
- plane->base.base.id, plane->base.name,
- str_enabled_disabled(visible), pipe_name(pipe));
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- fixup_plane_bitmasks(crtc_state);
- }
-}
-
-static void intel_modeset_readout_hw_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(dev_priv->cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(dev_priv->dbuf.obj.state);
- enum pipe pipe;
- struct intel_crtc *crtc;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
- struct drm_connector_list_iter conn_iter;
- u8 active_pipes = 0;
-
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
- intel_crtc_free_hw_state(crtc_state);
- intel_crtc_state_reset(crtc_state, crtc);
-
- intel_crtc_get_pipe_config(crtc_state);
-
- crtc_state->hw.enable = crtc_state->hw.active;
-
- crtc->base.enabled = crtc_state->hw.enable;
- crtc->active = crtc_state->hw.active;
-
- if (crtc_state->hw.active)
- active_pipes |= BIT(crtc->pipe);
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] hw state readout: %s\n",
- crtc->base.base.id, crtc->base.name,
- str_enabled_disabled(crtc_state->hw.active));
- }
-
- cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
-
- readout_plane_state(dev_priv);
-
- for_each_intel_encoder(dev, encoder) {
- struct intel_crtc_state *crtc_state = NULL;
-
- pipe = 0;
-
- if (encoder->get_hw_state(encoder, &pipe)) {
- crtc = intel_crtc_for_pipe(dev_priv, pipe);
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- encoder->base.crtc = &crtc->base;
- intel_encoder_get_config(encoder, crtc_state);
-
- /* read out to slave crtc as well for bigjoiner */
- if (crtc_state->bigjoiner_pipes) {
- struct intel_crtc *slave_crtc;
-
- /* encoder should read be linked to bigjoiner master */
- WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
-
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(crtc_state)) {
- struct intel_crtc_state *slave_crtc_state;
-
- slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state);
- intel_encoder_get_config(encoder, slave_crtc_state);
- }
- }
- } else {
- encoder->base.crtc = NULL;
- }
-
- if (encoder->sync_state)
- encoder->sync_state(encoder, crtc_state);
-
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
- encoder->base.base.id, encoder->base.name,
- str_enabled_disabled(encoder->base.crtc),
- pipe_name(pipe));
- }
-
- intel_dpll_readout_hw_state(dev_priv);
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- for_each_intel_connector_iter(connector, &conn_iter) {
- if (connector->get_hw_state(connector)) {
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
-
- connector->base.dpms = DRM_MODE_DPMS_ON;
-
- encoder = intel_attached_encoder(connector);
- connector->base.encoder = &encoder->base;
-
- crtc = to_intel_crtc(encoder->base.crtc);
- crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
-
- if (crtc_state && crtc_state->hw.active) {
- /*
- * This has to be done during hardware readout
- * because anything calling .crtc_disable may
- * rely on the connector_mask being accurate.
- */
- crtc_state->uapi.connector_mask |=
- drm_connector_mask(&connector->base);
- crtc_state->uapi.encoder_mask |=
- drm_encoder_mask(&encoder->base);
- }
- } else {
- connector->base.dpms = DRM_MODE_DPMS_OFF;
- connector->base.encoder = NULL;
- }
- drm_dbg_kms(&dev_priv->drm,
- "[CONNECTOR:%d:%s] hw state readout: %s\n",
- connector->base.base.id, connector->base.name,
- str_enabled_disabled(connector->base.encoder));
- }
- drm_connector_list_iter_end(&conn_iter);
-
- for_each_intel_crtc(dev, crtc) {
- struct intel_bw_state *bw_state =
- to_intel_bw_state(dev_priv->bw_obj.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane;
- int min_cdclk = 0;
-
- if (crtc_state->hw.active) {
- /*
- * The initial mode needs to be set in order to keep
- * the atomic core happy. It wants a valid mode if the
- * crtc's enabled, so we do the above call.
- *
- * But we don't set all the derived state fully, hence
- * set a flag to indicate that a full recalculation is
- * needed on the next commit.
- */
- crtc_state->inherited = true;
-
- intel_crtc_update_active_timings(crtc_state);
-
- intel_crtc_copy_hw_to_uapi_state(crtc_state);
- }
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- /*
- * FIXME don't have the fb yet, so can't
- * use intel_plane_data_rate() :(
- */
- if (plane_state->uapi.visible)
- crtc_state->data_rate[plane->id] =
- 4 * crtc_state->pixel_rate;
- /*
- * FIXME don't have the fb yet, so can't
- * use plane->min_cdclk() :(
- */
- if (plane_state->uapi.visible && plane->min_cdclk) {
- if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
- crtc_state->min_cdclk[plane->id] =
- DIV_ROUND_UP(crtc_state->pixel_rate, 2);
- else
- crtc_state->min_cdclk[plane->id] =
- crtc_state->pixel_rate;
- }
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] min_cdclk %d kHz\n",
- plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id]);
- }
-
- if (crtc_state->hw.active) {
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (drm_WARN_ON(dev, min_cdclk < 0))
- min_cdclk = 0;
- }
-
- cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
- cdclk_state->min_voltage_level[crtc->pipe] =
- crtc_state->min_voltage_level;
-
- intel_bw_crtc_update(bw_state, crtc_state);
-
- intel_pipe_config_sanity_check(dev_priv, crtc_state);
- }
-}
-
-static void
-get_encoder_power_domains(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
-
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_crtc_state *crtc_state;
-
- if (!encoder->get_power_domains)
- continue;
-
- /*
- * MST-primary and inactive encoders don't have a crtc state
- * and neither of these require any power domain references.
- */
- if (!encoder->base.crtc)
- continue;
-
- crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
- encoder->get_power_domains(encoder, crtc_state);
- }
-}
-
-static void intel_early_display_was(struct drm_i915_private *dev_priv)
-{
- /*
- * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
- * Also known as Wa_14010480278.
- */
- if (IS_DISPLAY_VER(dev_priv, 10, 12))
- intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
- intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
-
- if (IS_HASWELL(dev_priv)) {
- /*
- * WaRsPkgCStateDisplayPMReq:hsw
- * System hang if this isn't done before disabling all planes!
- */
- intel_de_write(dev_priv, CHICKEN_PAR1_1,
- intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- }
-
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
- /* Display WA #1142:kbl,cfl,cml */
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
- KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
- intel_de_rmw(dev_priv, CHICKEN_MISC_2,
- KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
- KBL_ARB_FILL_SPARE_14);
- }
-}
-
-
-/* Scan out the current hw modeset state,
- * and sanitizes it to the current state
- */
-static void
-intel_modeset_setup_hw_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder;
- struct intel_crtc *crtc;
- intel_wakeref_t wakeref;
-
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
- intel_early_display_was(dev_priv);
- intel_modeset_readout_hw_state(dev);
-
- /* HW state is read out, now we need to sanitize this mess. */
- get_encoder_power_domains(dev_priv);
-
- intel_pch_sanitize(dev_priv);
-
- /*
- * intel_sanitize_plane_mapping() may need to do vblank
- * waits, so we need vblank interrupts restored beforehand.
- */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- drm_crtc_vblank_reset(&crtc->base);
-
- if (crtc_state->hw.active)
- intel_crtc_vblank_on(crtc_state);
- }
-
- intel_fbc_sanitize(dev_priv);
-
- intel_sanitize_plane_mapping(dev_priv);
-
- for_each_intel_encoder(dev, encoder)
- intel_sanitize_encoder(encoder);
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- intel_sanitize_crtc(crtc, ctx);
- intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
- }
-
- intel_modeset_update_connector_atomic_state(dev);
-
- intel_dpll_sanitize_state(dev_priv);
-
- if (IS_G4X(dev_priv)) {
- g4x_wm_get_hw_state(dev_priv);
- g4x_wm_sanitize(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_wm_get_hw_state(dev_priv);
- vlv_wm_sanitize(dev_priv);
- } else if (DISPLAY_VER(dev_priv) >= 9) {
- skl_wm_get_hw_state(dev_priv);
- skl_wm_sanitize(dev_priv);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_wm_get_hw_state(dev_priv);
- }
-
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_power_domain_mask put_domains;
-
- modeset_get_crtc_power_domains(crtc_state, &put_domains);
- if (drm_WARN_ON(dev, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
- modeset_put_crtc_power_domains(crtc, &put_domains);
- }
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
- intel_power_domains_sanitize_state(dev_priv);
-}
-
void intel_display_resume(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_atomic_state *state = i915->modeset_restore_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915))
return;
- dev_priv->modeset_restore_state = NULL;
+ i915->modeset_restore_state = NULL;
if (state)
state->acquire_ctx = &ctx;
@@ -10430,14 +8958,14 @@ void intel_display_resume(struct drm_device *dev)
}
if (!ret)
- ret = __intel_display_resume(dev, state, &ctx);
+ ret = __intel_display_resume(i915, state, &ctx);
- intel_enable_ipc(dev_priv);
+ intel_enable_ipc(i915);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(&i915->drm,
"Restoring old state failed with %i\n", ret);
if (state)
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 187910d94ec6..fa5371036239 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -56,6 +56,7 @@ struct intel_initial_plane_config;
struct intel_load_detect_pipe;
struct intel_plane;
struct intel_plane_state;
+struct intel_power_domain_mask;
struct intel_remapped_info;
struct intel_rotation_info;
struct pci_dev;
@@ -192,7 +193,7 @@ enum plane_id {
#define for_each_dbuf_slice(__dev_priv, __slice) \
for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
- for_each_if(INTEL_INFO(__dev_priv)->dbuf.slice_mask & BIT(__slice))
+ for_each_if(INTEL_INFO(__dev_priv)->display.dbuf.slice_mask & BIT(__slice))
#define for_each_dbuf_slice_in_mask(__dev_priv, __slice, __mask) \
for_each_dbuf_slice((__dev_priv), (__slice)) \
@@ -559,8 +560,15 @@ bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state);
u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state);
struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state);
+bool intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ const struct intel_crtc_state *pipe_config,
+ bool fastset);
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
void intel_plane_destroy(struct drm_plane *plane);
+void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
+void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -583,6 +591,8 @@ int intel_display_suspend(struct drm_device *dev);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
+void intel_encoder_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
@@ -635,6 +645,7 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config);
enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
@@ -652,10 +663,16 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state);
void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane);
+void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state,
+ bool visible);
+void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
void intel_display_driver_register(struct drm_i915_private *i915);
void intel_display_driver_unregister(struct drm_i915_private *i915);
+void intel_update_watermarks(struct drm_i915_private *i915);
+
/* modesetting */
bool intel_modeset_probe_defer(struct pci_dev *pdev);
void intel_modeset_init_hw(struct drm_i915_private *i915);
@@ -667,6 +684,10 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
int intel_modeset_all_pipes(struct intel_atomic_state *state);
+void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *old_domains);
+void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+ struct intel_power_domain_mask *domains);
/* modesetting asserts */
void assert_transcoder(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 452d773fd4e3..6c3954479047 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -590,6 +590,8 @@ static void intel_connector_info(struct seq_file *m,
seq_puts(m, "\tHDCP version: ");
intel_hdcp_info(m, intel_connector);
+ seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
+
intel_panel_info(m, intel_connector);
seq_printf(m, "\tmodes:\n");
@@ -2202,6 +2204,29 @@ static const struct file_operations i915_dsc_bpp_fops = {
.write = i915_dsc_bpp_write
};
+/*
+ * Returns the Current CRTC's bpc.
+ * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
+ */
+static int i915_current_bpc_show(struct seq_file *m, void *data)
+{
+ struct intel_crtc *crtc = to_intel_crtc(m->private);
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+ seq_printf(m, "Current: %u\n", crtc_state->pipe_bpp / 3);
+
+ drm_modeset_unlock(&crtc->base.mutex);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_current_bpc);
+
/**
* intel_connector_debugfs_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -2272,4 +2297,7 @@ void intel_crtc_debugfs_add(struct drm_crtc *crtc)
crtc_updates_add(crtc);
intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc));
+
+ debugfs_create_file("i915_current_bpc", 0444, crtc->debugfs_entry, crtc,
+ &i915_current_bpc_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 949edc983a16..589af257edeb 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -907,7 +907,9 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
if (!HAS_DISPLAY(dev_priv))
return 0;
- if (IS_DG1(dev_priv))
+ if (IS_DG2(dev_priv))
+ max_dc = 0;
+ else if (IS_DG1(dev_priv))
max_dc = 3;
else if (DISPLAY_VER(dev_priv) >= 12)
max_dc = 4;
@@ -1036,7 +1038,7 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
+ u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
enum dbuf_slice slice;
drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
@@ -1194,7 +1196,7 @@ static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
{
if (IS_HASWELL(dev_priv)) {
- if (snb_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+ if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(&dev_priv->drm,
"Failed to write to D_COMP\n");
} else {
@@ -1606,7 +1608,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 5be18eb94042..91cfd5890f46 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -474,7 +474,7 @@ static void icl_tc_cold_exit(struct drm_i915_private *i915)
int ret, tries = 0;
while (1) {
- ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0,
+ ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
250, 1);
if (ret != -EAGAIN || ++tries == 3)
break;
@@ -1739,7 +1739,7 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
* Spec states that we should timeout the request after 200us
* but the function below will timeout after 500us
*/
- ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
if (ret == 0) {
if (block &&
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 408152f9f46a..0da9b208d56e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -38,6 +38,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
@@ -279,6 +280,76 @@ struct intel_panel_bl_funcs {
u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
};
+enum drrs_type {
+ DRRS_TYPE_NONE,
+ DRRS_TYPE_STATIC,
+ DRRS_TYPE_SEAMLESS,
+};
+
+struct intel_vbt_panel_data {
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits */
+ unsigned int panel_type:4;
+ unsigned int lvds_dither:1;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+
+ bool vrr;
+
+ u8 seamless_drrs_min_refresh_rate;
+ enum drrs_type drrs_type;
+
+ struct {
+ int max_link_rate;
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+ int bpp;
+ struct edp_power_seq pps;
+ u8 drrs_msa_timing_delay;
+ bool low_vswing;
+ bool initialized;
+ bool hobl;
+ } edp;
+
+ struct {
+ bool enable;
+ bool full_link;
+ bool require_aux_wakeup;
+ int idle_frames;
+ int tp1_wakeup_time_us;
+ int tp2_tp3_wakeup_time_us;
+ int psr2_tp2_tp3_wakeup_time_us;
+ } psr;
+
+ struct {
+ u16 pwm_freq_hz;
+ u16 brightness_precision_bits;
+ bool present;
+ bool active_low_pwm;
+ u8 min_brightness; /* min_brightness/255 of max */
+ u8 controller; /* brightness controller number */
+ enum intel_backlight_type type;
+ } backlight;
+
+ /* MIPI DSI */
+ struct {
+ u16 panel_id;
+ struct mipi_config *config;
+ struct mipi_pps_data *pps;
+ u16 bl_ports;
+ u16 cabc_ports;
+ u8 seq_version;
+ u32 size;
+ u8 *data;
+ const u8 *sequence[MIPI_SEQ_MAX];
+ u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
+ enum drm_panel_orientation orientation;
+ } dsi;
+};
+
struct intel_panel {
struct list_head fixed_modes;
@@ -318,6 +389,8 @@ struct intel_panel {
const struct intel_panel_bl_funcs *pwm_funcs;
void (*power)(struct intel_connector *, bool enable);
} backlight;
+
+ struct intel_vbt_panel_data vbt;
};
struct intel_digital_port;
@@ -1474,6 +1547,7 @@ struct intel_pps {
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
+ bool initializing;
unsigned long last_power_on;
unsigned long last_backlight_off;
ktime_t panel_power_off_time;
@@ -1496,6 +1570,7 @@ struct intel_pps {
*/
bool pps_reset;
struct edp_power_seq pps_delays;
+ struct edp_power_seq bios_pps_delays;
};
struct intel_psr {
@@ -1727,13 +1802,14 @@ static inline enum dpio_channel
vlv_dig_port_to_channel(struct intel_digital_port *dig_port)
{
switch (dig_port->base.port) {
+ default:
+ MISSING_CASE(dig_port->base.port);
+ fallthrough;
case PORT_B:
case PORT_D:
return DPIO_CH0;
case PORT_C:
return DPIO_CH1;
- default:
- BUG();
}
}
@@ -1741,13 +1817,14 @@ static inline enum dpio_phy
vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
{
switch (dig_port->base.port) {
+ default:
+ MISSING_CASE(dig_port->base.port);
+ fallthrough;
case PORT_B:
case PORT_C:
return DPIO_PHY0;
case PORT_D:
return DPIO_PHY1;
- default:
- BUG();
}
}
@@ -1755,13 +1832,14 @@ static inline enum dpio_channel
vlv_pipe_to_channel(enum pipe pipe)
{
switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ fallthrough;
case PIPE_A:
case PIPE_C:
return DPIO_CH0;
case PIPE_B:
return DPIO_CH1;
- default:
- BUG();
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 257cf662f9f4..fa9ef591b885 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -52,6 +52,10 @@
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define DG2_DMC_PATH DMC_PATH(dg2, 2, 06)
+#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06)
+MODULE_FIRMWARE(DG2_DMC_PATH);
+
#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 16)
MODULE_FIRMWARE(ADLP_DMC_PATH);
@@ -244,9 +248,14 @@ struct stepping_info {
char substepping;
};
+static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
+{
+ return i915->dmc.dmc_info[dmc_id].payload;
+}
+
bool intel_dmc_has_payload(struct drm_i915_private *i915)
{
- return i915->dmc.dmc_info[DMC_FW_MAIN].payload;
+ return has_dmc_id_fw(i915, DMC_FW_MAIN);
}
static const struct stepping_info *
@@ -268,6 +277,85 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
}
+static void
+disable_flip_queue_event(struct drm_i915_private *i915,
+ i915_reg_t ctl_reg, i915_reg_t htp_reg)
+{
+ u32 event_ctl;
+ u32 event_htp;
+
+ event_ctl = intel_de_read(i915, ctl_reg);
+ event_htp = intel_de_read(i915, htp_reg);
+ if (event_ctl != (DMC_EVT_CTL_ENABLE |
+ DMC_EVT_CTL_RECURRING |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) ||
+ !event_htp) {
+ drm_dbg_kms(&i915->drm,
+ "Unexpected DMC event configuration (control %08x htp %08x)\n",
+ event_ctl, event_htp);
+ return;
+ }
+
+ intel_de_write(i915, ctl_reg,
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE));
+ intel_de_write(i915, htp_reg, 0);
+}
+
+static bool
+get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
+ i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
+{
+ switch (dmc_id) {
+ case DMC_FW_MAIN:
+ if (DISPLAY_VER(i915) == 12) {
+ *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
+ *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
+
+ return true;
+ }
+ break;
+ case DMC_FW_PIPEA ... DMC_FW_PIPED:
+ if (IS_DG2(i915)) {
+ *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
+ *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
+
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+static void
+disable_all_flip_queue_events(struct drm_i915_private *i915)
+{
+ int dmc_id;
+
+ /* TODO: check if the following applies to all D13+ platforms. */
+ if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
+ return;
+
+ for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) {
+ i915_reg_t ctl_reg;
+ i915_reg_t htp_reg;
+
+ if (!has_dmc_id_fw(i915, dmc_id))
+ continue;
+
+ if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg))
+ continue;
+
+ disable_flip_queue_event(i915, ctl_reg, htp_reg);
+ }
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @dev_priv: i915 drm device.
@@ -308,6 +396,13 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
dev_priv->dmc.dc_state = 0;
gen9_set_dc_state_debugmask(dev_priv);
+
+ /*
+ * Flip queue events need to be disabled before enabling DC5/6.
+ * i915 doesn't use the flip queue feature, so disable it already
+ * here.
+ */
+ disable_all_flip_queue_events(dev_priv);
}
void assert_dmc_loaded(struct drm_i915_private *i915)
@@ -374,6 +469,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
}
}
+static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
+ const u32 *mmioaddr, u32 mmio_count,
+ int header_ver, u8 dmc_id)
+{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ u32 start_range, end_range;
+ int i;
+
+ if (dmc_id >= DMC_FW_MAX) {
+ drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
+ return false;
+ }
+
+ if (header_ver == 1) {
+ start_range = DMC_MMIO_START_RANGE;
+ end_range = DMC_MMIO_END_RANGE;
+ } else if (dmc_id == DMC_FW_MAIN) {
+ start_range = TGL_MAIN_MMIO_START;
+ end_range = TGL_MAIN_MMIO_END;
+ } else if (DISPLAY_VER(i915) >= 13) {
+ start_range = ADLP_PIPE_MMIO_START;
+ end_range = ADLP_PIPE_MMIO_END;
+ } else if (DISPLAY_VER(i915) >= 12) {
+ start_range = TGL_PIPE_MMIO_START(dmc_id);
+ end_range = TGL_PIPE_MMIO_END(dmc_id);
+ } else {
+ drm_warn(&i915->drm, "Unknown mmio range for sanity check");
+ return false;
+ }
+
+ for (i = 0; i < mmio_count; i++) {
+ if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
+ return false;
+ }
+
+ return true;
+}
+
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
size_t rem_size, u8 dmc_id)
@@ -443,6 +576,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
return 0;
}
+ if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
+ dmc_header->header_ver, dmc_id)) {
+ drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
+ return 0;
+ }
+
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
@@ -688,7 +827,11 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
*/
intel_dmc_runtime_pm_get(dev_priv);
- if (IS_ALDERLAKE_P(dev_priv)) {
+ if (IS_DG2(dev_priv)) {
+ dmc->fw_path = DG2_DMC_PATH;
+ dmc->required_version = DG2_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_P(dev_priv)) {
dmc->fw_path = ADLP_DMC_PATH;
dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index d65e698832eb..238620b55966 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -10,13 +10,70 @@
#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
+
+#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
+#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
+
+#define __PIPEDMC_REG_MMIO_BASE(i915, dmc_id) \
+ ((DISPLAY_VER(i915) >= 13 ? _ADLP_PIPEDMC_REG_MMIO_BASE_A : \
+ _TGL_PIPEDMC_REG_MMIO_BASE_A) + \
+ 0x400 * ((dmc_id) - 1))
+
+#define __DMC_REG_MMIO_BASE 0x8f000
+
+#define _DMC_REG_MMIO_BASE(i915, dmc_id) \
+ ((dmc_id) == DMC_FW_MAIN ? __DMC_REG_MMIO_BASE : \
+ __PIPEDMC_REG_MMIO_BASE(i915, dmc_id))
+
+#define _DMC_REG(i915, dmc_id, reg) \
+ ((reg) - __DMC_REG_MMIO_BASE + _DMC_REG_MMIO_BASE(i915, dmc_id))
+
+#define _DMC_EVT_HTP_0 0x8f004
+
+#define DMC_EVT_HTP(i915, dmc_id, handler) \
+ _MMIO(_DMC_REG(i915, dmc_id, _DMC_EVT_HTP_0) + 4 * (handler))
+
+#define _DMC_EVT_CTL_0 0x8f034
+
+#define DMC_EVT_CTL(i915, dmc_id, handler) \
+ _MMIO(_DMC_REG(i915, dmc_id, _DMC_EVT_CTL_0) + 4 * (handler))
+
+#define DMC_EVT_CTL_ENABLE REG_BIT(31)
+#define DMC_EVT_CTL_RECURRING REG_BIT(30)
+#define DMC_EVT_CTL_TYPE_MASK REG_GENMASK(17, 16)
+#define DMC_EVT_CTL_TYPE_LEVEL_0 0
+#define DMC_EVT_CTL_TYPE_LEVEL_1 1
+#define DMC_EVT_CTL_TYPE_EDGE_1_0 2
+#define DMC_EVT_CTL_TYPE_EDGE_0_1 3
+
+#define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8)
+#define DMC_EVT_CTL_EVENT_ID_FALSE 0x01
+/* An event handler scheduled to run at a 1 kHz frequency. */
+#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf
+
#define DMC_HTP_ADDR_SKL 0x00500034
#define DMC_SSP_BASE _MMIO(0x8F074)
#define DMC_HTP_SKL _MMIO(0x8F004)
#define DMC_LAST_WRITE _MMIO(0x8F034)
#define DMC_LAST_WRITE_VALUE 0xc003b400
#define DMC_MMIO_START_RANGE 0x80000
-#define DMC_MMIO_END_RANGE 0x8FFFF
+#define DMC_MMIO_END_RANGE 0x8FFFF
+#define DMC_V1_MMIO_START_RANGE 0x80000
+#define TGL_MAIN_MMIO_START 0x8F000
+#define TGL_MAIN_MMIO_END 0x8FFFF
+#define _TGL_PIPEA_MMIO_START 0x92000
+#define _TGL_PIPEA_MMIO_END 0x93FFF
+#define _TGL_PIPEB_MMIO_START 0x96000
+#define _TGL_PIPEB_MMIO_END 0x97FFF
+#define ADLP_PIPE_MMIO_START 0x5F000
+#define ADLP_PIPE_MMIO_END 0x5FFFF
+
+#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
+ _TGL_PIPEB_MMIO_START)
+
+#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
+ _TGL_PIPEB_MMIO_END)
+
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index e4a79c11fd25..32292c0be2bd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -40,6 +40,7 @@
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
@@ -388,13 +389,23 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
}
+static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
+{
+ u32 voltage;
+
+ voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
+
+ return voltage == VOLTAGE_INFO_0_85V;
+}
+
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
+ if (intel_phy_is_combo(dev_priv, phy) &&
+ (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
return 540000;
return 810000;
@@ -402,12 +413,48 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
static int ehl_max_source_rate(struct intel_dp *intel_dp)
{
- if (intel_dp_is_edp(intel_dp))
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+
+ if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
return 540000;
return 810000;
}
+static int dg1_max_source_rate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
+ return 540000;
+
+ return 810000;
+}
+
+static int vbt_max_link_rate(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int max_rate;
+
+ max_rate = intel_bios_dp_max_link_rate(encoder);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ struct intel_connector *connector = intel_dp->attached_connector;
+ int edp_max_rate = connector->panel.vbt.edp.max_link_rate;
+
+ if (max_rate && edp_max_rate)
+ max_rate = min(max_rate, edp_max_rate);
+ else if (edp_max_rate)
+ max_rate = edp_max_rate;
+ }
+
+ return max_rate;
+}
+
static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
@@ -429,7 +476,6 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
162000, 270000
};
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
const int *source_rates;
int size, max_rate = 0, vbt_max_rate;
@@ -445,7 +491,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
max_rate = dg2_max_source_rate(intel_dp);
else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
- max_rate = 810000;
+ max_rate = dg1_max_source_rate(intel_dp);
else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
@@ -465,7 +511,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
size = ARRAY_SIZE(g4x_rates);
}
- vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
+ vbt_max_rate = vbt_max_link_rate(intel_dp);
if (max_rate && vbt_max_rate)
max_rate = min(max_rate, vbt_max_rate);
else if (vbt_max_rate)
@@ -658,7 +704,6 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
*/
bits_per_pixel = (link_clock * lane_count * 8) /
intel_dp_mode_to_fec_clock(mode_clock);
- drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
@@ -667,9 +712,6 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
if (bigjoiner)
max_bpp_small_joiner_ram *= 2;
- drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
- max_bpp_small_joiner_ram);
-
/*
* Greatest allowed DSC BPP = MIN (output BPP from available Link BW
* check, output bpp from small joiner RAM check)
@@ -681,7 +723,6 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
i915->max_cdclk_freq * 48 /
intel_dp_mode_to_fec_clock(mode_clock);
- drm_dbg_kms(&i915->drm, "Max big joiner bpp: %u\n", max_bpp_bigjoiner);
bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
}
@@ -1220,11 +1261,12 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
if (intel_dp_is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
- dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
+ intel_connector->panel.vbt.edp.bpp &&
+ intel_connector->panel.vbt.edp.bpp < bpp) {
drm_dbg_kms(&dev_priv->drm,
"clamping bpp for eDP panel to BIOS-provided %i\n",
- dev_priv->vbt.edp.bpp);
- bpp = dev_priv->vbt.edp.bpp;
+ intel_connector->panel.vbt.edp.bpp);
+ bpp = intel_connector->panel.vbt.edp.bpp;
}
}
@@ -1880,7 +1922,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
}
if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
- pipe_config->msa_timing_delay = i915->vbt.edp.drrs_msa_timing_delay;
+ pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
pipe_config->has_drrs = true;
@@ -2710,6 +2752,33 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
DRM_MODE_ARG(mode));
}
+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_bpp, connector->panel.vbt.edp.bpp);
+ connector->panel.vbt.edp.bpp = pipe_bpp;
+ }
+}
+
static void intel_edp_mso_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -2824,9 +2893,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_max_sink_lane_count(intel_dp);
- intel_dp_set_common_rates(intel_dp);
- intel_dp_reset_max_link_params(intel_dp);
-
/* Read the eDP DSC DPCD registers */
if (DISPLAY_VER(dev_priv) >= 10)
intel_dp_get_dsc_sink_cap(intel_dp);
@@ -4524,7 +4590,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
edid = intel_dp_get_edid(intel_dp);
connector->detect_edid = edid;
- vrr_capable = intel_vrr_is_capable(&connector->base);
+ vrr_capable = intel_vrr_is_capable(connector);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
@@ -5129,6 +5195,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
enum pipe pipe = INVALID_PIPE;
struct edid *edid;
@@ -5185,8 +5252,12 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
+ intel_bios_init_panel(dev_priv, &intel_connector->panel,
+ encoder->devdata, IS_ERR(edid) ? NULL : edid);
+
intel_panel_add_edid_fixed_modes(intel_connector,
- dev_priv->vbt.drrs_type != DRRS_TYPE_NONE);
+ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE,
+ intel_vrr_is_capable(intel_connector));
/* MSO requires information from the EDID */
intel_edp_mso_init(intel_dp);
@@ -5228,6 +5299,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_edp_add_properties(intel_dp);
+ intel_pps_init_late(intel_dp);
+
return true;
out_vdd_off:
@@ -5308,11 +5381,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
type = DRM_MODE_CONNECTOR_DisplayPort;
}
- intel_dp_set_source_rates(intel_dp);
intel_dp_set_default_sink_rates(intel_dp);
intel_dp_set_default_max_sink_lane_count(intel_dp);
- intel_dp_set_common_rates(intel_dp);
- intel_dp_reset_max_link_params(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
@@ -5340,16 +5410,19 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
- /* init MST on ports that can support it */
- intel_dp_mst_encoder_init(dig_port,
- intel_connector->base.base.id);
-
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
- intel_dp_mst_encoder_cleanup(dig_port);
goto fail;
}
+ intel_dp_set_source_rates(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
+ intel_dp_reset_max_link_params(intel_dp);
+
+ /* init MST on ports that can support it */
+ intel_dp_mst_encoder_init(dig_port,
+ intel_connector->base.base.id);
+
intel_dp_add_properties(intel_dp, connector);
if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index d457e17bdc57..a54902c713a3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -29,6 +29,7 @@ struct link_config_limits {
int min_bpp, max_bpp;
};
+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits);
@@ -63,6 +64,7 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index fb6cf30ee628..c92d5bb2326a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -370,7 +370,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
int ret;
ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
- i915->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
+ panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
&current_level, &current_mode);
if (ret < 0)
return ret;
@@ -454,7 +454,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
case INTEL_DP_AUX_BACKLIGHT_AUTO:
- switch (i915->vbt.backlight.type) {
+ switch (panel->vbt.backlight.type) {
case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
try_vesa_interface = true;
break;
@@ -466,7 +466,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
}
break;
case INTEL_DP_AUX_BACKLIGHT_ON:
- if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
+ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
try_intel_interface = true;
try_vesa_interface = true;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 061b277e5ce7..14d2a64193b2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -839,6 +839,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret) {
+ drm_dp_mst_put_port_malloc(port);
intel_connector_free(intel_connector);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 6eef0b8a91eb..5262f16b45ac 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -933,7 +933,17 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- return 0;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ return intel_compute_shared_dplls(state, crtc, encoder);
}
static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -944,21 +954,12 @@ static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- int ret;
if (DISPLAY_VER(dev_priv) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- ret = intel_reserve_shared_dplls(state, crtc, encoder);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return ret;
- }
-
- return 0;
+ return intel_reserve_shared_dplls(state, crtc, encoder);
}
static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1125,39 +1126,26 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
- return 0;
+ return intel_compute_shared_dplls(state, crtc, NULL);
}
static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- int ret;
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!crtc_state->has_pch_encoder)
return 0;
- ret = intel_reserve_shared_dplls(state, crtc, NULL);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return ret;
- }
-
- return 0;
+ return intel_reserve_shared_dplls(state, crtc, NULL);
}
void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
@@ -1198,7 +1186,6 @@ void chv_compute_dpll(struct intel_crtc_state *crtc_state)
static int chv_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit = &intel_limits_chv;
@@ -1206,10 +1193,8 @@ static int chv_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
chv_compute_dpll(crtc_state);
@@ -1219,7 +1204,6 @@ static int chv_crtc_compute_clock(struct intel_atomic_state *state,
static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit = &intel_limits_vlv;
@@ -1228,7 +1212,6 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -1270,11 +1253,8 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
@@ -1306,11 +1286,8 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
@@ -1342,11 +1319,8 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
@@ -1380,11 +1354,8 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
@@ -1436,6 +1407,7 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ int ret;
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
@@ -1448,7 +1420,14 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->hw.enable)
return 0;
- return i915->dpll_funcs->crtc_compute_clock(state, crtc);
+ ret = i915->dpll_funcs->crtc_compute_clock(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+
+ return 0;
}
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -1457,6 +1436,7 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ int ret;
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
@@ -1469,7 +1449,14 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
if (!i915->dpll_funcs->crtc_get_shared_dpll)
return 0;
- return i915->dpll_funcs->crtc_get_shared_dpll(state, crtc);
+ ret = i915->dpll_funcs->crtc_get_shared_dpll(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+
+ return 0;
}
void
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 22f55574a35c..118598c9a809 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -90,6 +90,9 @@ struct intel_shared_dpll_funcs {
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
+ int (*compute_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
int (*get_dplls)(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
@@ -514,6 +517,13 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
udelay(200);
}
+static int ibx_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ return 0;
+}
+
static int ibx_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -578,6 +588,7 @@ static const struct dpll_info pch_plls[] = {
static const struct intel_dpll_mgr pch_pll_mgr = {
.dpll_info = pch_plls,
+ .compute_dplls = ibx_compute_dpll,
.get_dplls = ibx_get_dpll,
.put_dplls = intel_put_dpll,
.dump_hw_state = ibx_dump_hw_state,
@@ -894,33 +905,35 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static struct intel_shared_dpll *
-hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int
+hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
- u32 val;
unsigned int p, n2, r2;
hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
- val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
-
- crtc_state->dpll_hw_state.wrpll = val;
+ crtc_state->dpll_hw_state.wrpll =
+ WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_WRPLL2) |
- BIT(DPLL_ID_WRPLL1));
+ return 0;
+}
- if (!pll)
- return NULL;
+static struct intel_shared_dpll *
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
- return pll;
+ return intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_WRPLL2) |
+ BIT(DPLL_ID_WRPLL1));
}
static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
@@ -963,6 +976,24 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
return (refclk * n / 10) / (p * r) * 2;
}
+static int
+hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ int clock = crtc_state->port_clock;
+
+ switch (clock / 2) {
+ case 81000:
+ case 135000:
+ case 270000:
+ return 0;
+ default:
+ drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
+ clock);
+ return -EINVAL;
+ }
+}
+
static struct intel_shared_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
@@ -982,8 +1013,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
pll_id = DPLL_ID_LCPLL_2700;
break;
default:
- drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
- clock);
+ MISSING_CASE(clock / 2);
return NULL;
}
@@ -1019,18 +1049,28 @@ static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
return link_clock * 2;
}
-static struct intel_shared_dpll *
-hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int
+hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
- return NULL;
+ return -EINVAL;
+
+ crtc_state->dpll_hw_state.spll =
+ SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
- crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
- SPLL_REF_MUXED_SSC;
+ return 0;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
BIT(DPLL_ID_SPLL));
@@ -1060,6 +1100,23 @@ static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
return link_clock * 2;
}
+static int hsw_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return hsw_ddi_wrpll_compute_dpll(state, crtc);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ return hsw_ddi_lcpll_compute_dpll(crtc_state);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ return hsw_ddi_spll_compute_dpll(state, crtc);
+ else
+ return -EINVAL;
+}
+
static int hsw_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -1153,6 +1210,7 @@ static const struct dpll_info hsw_plls[] = {
static const struct intel_dpll_mgr hsw_pll_mgr = {
.dpll_info = hsw_plls,
+ .compute_dplls = hsw_compute_dpll,
.get_dplls = hsw_get_dpll,
.put_dplls = intel_put_dpll,
.update_ref_clks = hsw_update_dpll_ref_clks,
@@ -1545,10 +1603,8 @@ skip_remaining_dividers:
break;
}
- if (!ctx.p) {
- DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+ if (!ctx.p)
return -EINVAL;
- }
/*
* gcc incorrectly analyses that these can be used without being
@@ -1741,23 +1797,28 @@ static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
return link_clock * 2;
}
-static int skl_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int skl_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
- int ret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- ret = skl_ddi_hdmi_pll_dividers(crtc_state);
+ return skl_ddi_hdmi_pll_dividers(crtc_state);
else if (intel_crtc_has_dp_encoder(crtc_state))
- ret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
+ return skl_ddi_dp_set_dpll_hw_state(crtc_state);
else
- ret = -EINVAL;
- if (ret)
- return ret;
+ return -EINVAL;
+}
+
+static int skl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_shared_dpll *pll;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
pll = intel_find_shared_dpll(state, crtc,
@@ -1834,6 +1895,7 @@ static const struct dpll_info skl_plls[] = {
static const struct intel_dpll_mgr skl_pll_mgr = {
.dpll_info = skl_plls,
+ .compute_dplls = skl_compute_dpll,
.get_dplls = skl_get_dpll,
.put_dplls = intel_put_dpll,
.update_ref_clks = skl_update_dpll_ref_clks,
@@ -2081,19 +2143,14 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct dpll *clk_div)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
/* Calculate HDMI div */
/*
* FIXME: tie the following calculation into
* i9xx_crtc_compute_clock
*/
- if (!bxt_find_best_dpll(crtc_state, clk_div)) {
- drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
- crtc_state->port_clock,
- pipe_name(crtc->pipe));
+ if (!bxt_find_best_dpll(crtc_state, clk_div))
return -EINVAL;
- }
drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
@@ -2225,6 +2282,21 @@ static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
}
+static int bxt_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
+ else
+ return -EINVAL;
+}
+
static int bxt_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -2234,16 +2306,6 @@ static int bxt_get_dpll(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
- int ret;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- ret = bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
- else if (intel_crtc_has_dp_encoder(crtc_state))
- ret = bxt_ddi_dp_set_dpll_hw_state(crtc_state);
- else
- ret = -EINVAL;
- if (ret)
- return ret;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
@@ -2302,6 +2364,7 @@ static const struct dpll_info bxt_plls[] = {
static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
+ .compute_dplls = bxt_compute_dpll,
.get_dplls = bxt_get_dpll,
.put_dplls = intel_put_dpll,
.update_ref_clks = bxt_update_dpll_ref_clks,
@@ -2396,7 +2459,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
}
/*
- * Display WA #22010492432: ehl, tgl, adl-p
+ * Display WA #22010492432: ehl, tgl, adl-s, adl-p
* Program half of the nominal DCO divider fraction value.
*/
static bool
@@ -2404,7 +2467,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
- IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
+ IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->dpll.ref_clks.nssc == 38400;
}
@@ -2809,11 +2872,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
pll_state, is_dkl);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to find divisors for clock %d\n", clock);
+ if (ret)
return ret;
- }
m1div = 2;
m2div_int = dco_khz / (refclk_khz * m1div);
@@ -2823,12 +2883,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
m2div_int = dco_khz / (refclk_khz * m1div);
}
- if (m2div_int > 255) {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to find mdiv for clock %d\n",
- clock);
+ if (m2div_int > 255)
return -EINVAL;
- }
}
m2div_rem = dco_khz % (refclk_khz * m1div);
@@ -3119,18 +3175,15 @@ static u32 intel_get_hti_plls(struct drm_i915_private *i915)
return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
}
-static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_wrpll_params pll_params = { };
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum port port = encoder->port;
- unsigned long dpll_mask;
+ struct skl_wrpll_params pll_params = {};
int ret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
@@ -3139,14 +3192,26 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Could not calculate combo PHY PLL state.\n");
+ if (ret)
return ret;
- }
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+ return 0;
+}
+
+static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ enum port port = encoder->port;
+ unsigned long dpll_mask;
+
if (IS_ALDERLAKE_S(dev_priv)) {
dpll_mask =
BIT(DPLL_ID_DG1_DPLL3) |
@@ -3183,12 +3248,8 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
dpll_mask);
- if (!port_dpll->pll) {
- drm_dbg_kms(&dev_priv->drm,
- "No combo PHY PLL found for [ENCODER:%d:%s]\n",
- encoder->base.base.id, encoder->base.name);
+ if (!port_dpll->pll)
return -EINVAL;
- }
intel_reference_shared_dpll(state, crtc,
port_dpll->pll, &port_dpll->hw_state);
@@ -3198,47 +3259,55 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
return 0;
}
-static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_wrpll_params pll_params = { };
- struct icl_port_dpll *port_dpll;
- enum intel_dpll_id dpll_id;
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ struct skl_wrpll_params pll_params = {};
int ret;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Could not calculate TBT PLL state.\n");
+ if (ret)
return ret;
- }
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
+ ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ enum intel_dpll_id dpll_id;
+ int ret;
+
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
BIT(DPLL_ID_ICL_TBTPLL));
- if (!port_dpll->pll) {
- drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
+ if (!port_dpll->pll)
return -EINVAL;
- }
intel_reference_shared_dpll(state, crtc,
port_dpll->pll, &port_dpll->hw_state);
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
- ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Could not calculate MG PHY PLL state.\n");
- goto err_unreference_tbt_pll;
- }
-
dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
encoder->port));
port_dpll->pll = intel_find_shared_dpll(state, crtc,
@@ -3246,7 +3315,6 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
BIT(dpll_id));
if (!port_dpll->pll) {
ret = -EINVAL;
- drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
goto err_unreference_tbt_pll;
}
intel_reference_shared_dpll(state, crtc,
@@ -3263,6 +3331,23 @@ err_unreference_tbt_pll:
return ret;
}
+static int icl_compute_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ return icl_compute_combo_phy_dpll(state, crtc);
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return icl_compute_tc_phy_dplls(state, crtc);
+
+ MISSING_CASE(phy);
+
+ return 0;
+}
+
static int icl_get_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -3943,6 +4028,7 @@ static const struct dpll_info icl_plls[] = {
static const struct intel_dpll_mgr icl_pll_mgr = {
.dpll_info = icl_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
@@ -3959,6 +4045,7 @@ static const struct dpll_info ehl_plls[] = {
static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
@@ -3987,6 +4074,7 @@ static const struct dpll_info tgl_plls[] = {
static const struct intel_dpll_mgr tgl_pll_mgr = {
.dpll_info = tgl_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
@@ -4003,6 +4091,7 @@ static const struct dpll_info rkl_plls[] = {
static const struct intel_dpll_mgr rkl_pll_mgr = {
.dpll_info = rkl_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
@@ -4019,6 +4108,7 @@ static const struct dpll_info dg1_plls[] = {
static const struct intel_dpll_mgr dg1_pll_mgr = {
.dpll_info = dg1_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
@@ -4035,6 +4125,7 @@ static const struct dpll_info adls_plls[] = {
static const struct intel_dpll_mgr adls_pll_mgr = {
.dpll_info = adls_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
@@ -4054,6 +4145,7 @@ static const struct dpll_info adlp_plls[] = {
static const struct intel_dpll_mgr adlp_pll_mgr = {
.dpll_info = adlp_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
@@ -4119,6 +4211,33 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
}
/**
+ * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
+ * @state: atomic state
+ * @crtc: CRTC to compute DPLLs for
+ * @encoder: encoder
+ *
+ * This function computes the DPLL state for the given CRTC and encoder.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
+ *
+ * Returns:
+ * 0 on success, negative error code on falure.
+ */
+int intel_compute_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
+ return -EINVAL;
+
+ return dpll_mgr->compute_dplls(state, crtc, encoder);
+}
+
+/**
* intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
* @state: atomic state
* @crtc: CRTC to reserve DPLLs for
@@ -4330,3 +4449,91 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
hw_state->fp1);
}
}
+
+static void
+verify_single_dpll_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_dpll_hw_state dpll_hw_state;
+ u8 pipe_mask;
+ bool active;
+
+ memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+ drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
+
+ active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
+
+ if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
+ I915_STATE_WARN(!pll->on && pll->active_mask,
+ "pll in active use but not on in sw tracking\n");
+ I915_STATE_WARN(pll->on && !pll->active_mask,
+ "pll is on but not used by any active pipe\n");
+ I915_STATE_WARN(pll->on != active,
+ "pll on state mismatch (expected %i, found %i)\n",
+ pll->on, active);
+ }
+
+ if (!crtc) {
+ I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
+ "more active pll users than references: 0x%x vs 0x%x\n",
+ pll->active_mask, pll->state.pipe_mask);
+
+ return;
+ }
+
+ pipe_mask = BIT(crtc->pipe);
+
+ if (new_crtc_state->hw.active)
+ I915_STATE_WARN(!(pll->active_mask & pipe_mask),
+ "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+ else
+ I915_STATE_WARN(pll->active_mask & pipe_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+
+ I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
+ "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
+ pipe_mask, pll->state.pipe_mask);
+
+ I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
+ &dpll_hw_state,
+ sizeof(dpll_hw_state)),
+ "pll hw state mismatch\n");
+}
+
+void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (new_crtc_state->shared_dpll)
+ verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
+ crtc, new_crtc_state);
+
+ if (old_crtc_state->shared_dpll &&
+ old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
+ u8 pipe_mask = BIT(crtc->pipe);
+ struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
+
+ I915_STATE_WARN(pll->active_mask & pipe_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+ I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
+ "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
+ pipe_name(crtc->pipe), pll->state.pipe_mask);
+ }
+}
+
+void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(i915, &i915->dpll.shared_dplls[i],
+ NULL, NULL);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index f7c96a1f13c8..3247dc300ae4 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -336,6 +336,9 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+int intel_compute_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
int intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
@@ -365,4 +368,9 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
+void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state);
+void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915);
+
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index fb0e7e79e0cd..ac587647e1f5 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -4,6 +4,7 @@
*/
#include "gem/i915_gem_domain.h"
+#include "gem/i915_gem_internal.h"
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
@@ -127,8 +128,12 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
struct i915_vma *vma;
void __iomem *iomem;
struct i915_gem_ww_ctx ww;
+ u64 pin_flags = 0;
int err;
+ if (i915_gem_object_is_stolen(dpt->obj))
+ pin_flags |= PIN_MAPPABLE;
+
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
atomic_inc(&i915->gpu_error.pending_fb_pin);
@@ -138,7 +143,7 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
continue;
vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
- HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
+ pin_flags);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
continue;
@@ -248,10 +253,13 @@ intel_dpt_create(struct intel_framebuffer *fb)
size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
- if (HAS_LMEM(i915))
- dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
- else
+ dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
dpt_obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
+ drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
+ dpt_obj = i915_gem_object_create_internal(i915, size);
+ }
if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 166caf293f7b..7da4a9cbe4ba 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -217,9 +217,6 @@ static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
{
struct intel_crtc *crtc;
- if (dev_priv->vbt.drrs_type != DRRS_TYPE_SEAMLESS)
- return;
-
for_each_intel_crtc(&dev_priv->drm, crtc) {
unsigned int frontbuffer_bits;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 389a8c24cdc1..35e121cd226c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -102,7 +102,7 @@ intel_dsi_get_panel_orientation(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum drm_panel_orientation orientation;
- orientation = dev_priv->vbt.dsi.orientation;
+ orientation = connector->panel.vbt.dsi.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 7d234429e71e..1bc7118c56a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -160,12 +160,10 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
- if (dev_priv->vbt.backlight.brightness_precision_bits > 8)
- panel->backlight.max = (1 << dev_priv->vbt.backlight.brightness_precision_bits) - 1;
+ if (panel->vbt.backlight.brightness_precision_bits > 8)
+ panel->backlight.max = (1 << panel->vbt.backlight.brightness_precision_bits) - 1;
else
panel->backlight.max = PANEL_PWM_MAX_VALUE;
@@ -185,11 +183,10 @@ static const struct intel_panel_bl_funcs dcs_bl_funcs = {
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
struct intel_panel *panel = &intel_connector->panel;
- if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
+ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
return -ENODEV;
if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index dd24aef925f2..75e8cc4337c9 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -240,9 +240,10 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
+static void vlv_exec_gpio(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct gpio_map *map;
u16 pconf0, padval;
u32 tmp;
@@ -256,7 +257,7 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
map = &vlv_gpio_table[gpio_index];
- if (dev_priv->vbt.dsi.seq_version >= 3) {
+ if (connector->panel.vbt.dsi.seq_version >= 3) {
/* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
port = IOSF_PORT_GPIO_NC;
} else {
@@ -287,14 +288,15 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
}
-static void chv_exec_gpio(struct drm_i915_private *dev_priv,
+static void chv_exec_gpio(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u16 cfg0, cfg1;
u16 family_num;
u8 port;
- if (dev_priv->vbt.dsi.seq_version >= 3) {
+ if (connector->panel.vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
/* XXX: it's unclear whether 255->57 is part of SE. */
gpio_index -= CHV_GPIO_IDX_START_SE;
@@ -340,9 +342,10 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
}
-static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
+static void bxt_exec_gpio(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
/* XXX: this table is a quick ugly hack. */
static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
@@ -366,9 +369,11 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
gpiod_set_value(gpio_desc, value);
}
-static void icl_exec_gpio(struct drm_i915_private *dev_priv,
+static void icl_exec_gpio(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
}
@@ -376,18 +381,19 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
u8 gpio_source, gpio_index = 0, gpio_number;
bool value;
drm_dbg_kms(&dev_priv->drm, "\n");
- if (dev_priv->vbt.dsi.seq_version >= 3)
+ if (connector->panel.vbt.dsi.seq_version >= 3)
gpio_index = *data++;
gpio_number = *data++;
/* gpio source in sequence v2 only */
- if (dev_priv->vbt.dsi.seq_version == 2)
+ if (connector->panel.vbt.dsi.seq_version == 2)
gpio_source = (*data >> 1) & 3;
else
gpio_source = 0;
@@ -396,13 +402,13 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
value = *data++ & 1;
if (DISPLAY_VER(dev_priv) >= 11)
- icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ icl_exec_gpio(connector, gpio_source, gpio_index, value);
else if (IS_VALLEYVIEW(dev_priv))
- vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
+ vlv_exec_gpio(connector, gpio_source, gpio_number, value);
else if (IS_CHERRYVIEW(dev_priv))
- chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
+ chv_exec_gpio(connector, gpio_source, gpio_number, value);
else
- bxt_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ bxt_exec_gpio(connector, gpio_source, gpio_index, value);
return data;
}
@@ -585,14 +591,15 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
if (drm_WARN_ON(&dev_priv->drm,
- seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
+ seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence)))
return;
- data = dev_priv->vbt.dsi.sequence[seq_id];
+ data = connector->panel.vbt.dsi.sequence[seq_id];
if (!data)
return;
@@ -605,7 +612,7 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
data++;
/* Skip Size of Sequence. */
- if (dev_priv->vbt.dsi.seq_version >= 3)
+ if (connector->panel.vbt.dsi.seq_version >= 3)
data += 4;
while (1) {
@@ -621,7 +628,7 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
mipi_elem_exec = NULL;
/* Size of Operation. */
- if (dev_priv->vbt.dsi.seq_version >= 3)
+ if (connector->panel.vbt.dsi.seq_version >= 3)
operation_size = *data++;
if (mipi_elem_exec) {
@@ -669,10 +676,10 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
- if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
+ if (is_vid_mode(intel_dsi) && connector->panel.vbt.dsi.seq_version >= 3)
return;
msleep(msec);
@@ -734,9 +741,10 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
- struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
- struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+ struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps;
+ struct drm_display_mode *mode = connector->panel.vbt.lfp_lvds_vbt_mode;
u16 burst_mode_ratio;
enum port port;
@@ -872,7 +880,8 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
bool want_backlight_gpio = false;
bool want_panel_gpio = false;
@@ -927,7 +936,8 @@ void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
if (intel_dsi->gpio_panel) {
gpiod_put(intel_dsi->gpio_panel);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 9f5a6b79e95b..b191915ab351 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -3,6 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_blend.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper.h>
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index bbdc34a23d54..16537830ccf0 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -40,6 +40,7 @@
#include <linux/string_helpers.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
@@ -813,8 +814,8 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
- /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,dg2,adlp */
- if (DISPLAY_VER(fbc->i915) >= 11)
+ /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp */
+ if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 44ac0cee8b77..8ea66a2e1b09 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -298,7 +298,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
* Mailbox interface.
*/
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
- ret = snb_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
drm_err(&dev_priv->drm,
"Failed to initiate HDCP key load (%d)\n",
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 1ae09431f53a..ebd91aa69dd2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2852,7 +2852,7 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_MCC(dev_priv))
+ else if (IS_JSL_EHL(dev_priv) && HAS_PCH_TGP(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 8204126d17f9..5f8b4f481cff 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -668,7 +668,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*/
void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
{
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) ||
+ !INTEL_DISPLAY_ENABLED(dev_priv))
return;
WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 7fbc8031a5aa..15d59de8810e 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -26,6 +26,7 @@
#include <drm/display/drm_dp_dual_mode_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include "intel_de.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index e8478161f8b9..730480ac3300 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -809,7 +809,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
else
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
if (val == 0)
- val = dev_priv->vbt.bios_lvds_val;
+ val = connector->panel.vbt.bios_lvds_val;
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
@@ -967,9 +967,13 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_connector->edid = edid;
+ intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL,
+ IS_ERR(edid) ? NULL : edid);
+
/* Try EDID first */
intel_panel_add_edid_fixed_modes(intel_connector,
- dev_priv->vbt.drrs_type != DRRS_TYPE_NONE);
+ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE,
+ false);
/* Failed to get EDID, what about VBT? */
if (!intel_panel_preferred_fixed_mode(intel_connector))
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
new file mode 100644
index 000000000000..f0e04d3904c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ *
+ * Read out the current hardware modeset state, and sanitize it to the current
+ * state.
+ */
+
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_atomic_state_helper.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_bw.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
+#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_modeset_setup.h"
+#include "intel_pch_display.h"
+#include "intel_pm.h"
+
+static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_encoder *encoder;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(i915->bw_obj.state);
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(i915->cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->dbuf.obj.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
+ struct drm_atomic_state *state;
+ struct intel_crtc_state *temp_crtc_state;
+ enum pipe pipe = crtc->pipe;
+ int ret;
+
+ if (!crtc_state->hw.active)
+ return;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+ }
+
+ state = drm_atomic_state_alloc(&i915->drm);
+ if (!state) {
+ drm_dbg_kms(&i915->drm,
+ "failed to disable [CRTC:%d:%s], out of memory",
+ crtc->base.base.id, crtc->base.name);
+ return;
+ }
+
+ state->acquire_ctx = ctx;
+
+ /* Everything's already locked, -EDEADLK can't happen. */
+ temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ ret = drm_atomic_add_affected_connectors(state, &crtc->base);
+
+ drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
+
+ i915->display->crtc_disable(to_intel_atomic_state(state), crtc);
+
+ drm_atomic_state_put(state);
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.base.id, crtc->base.name);
+
+ crtc->active = false;
+ crtc->base.enabled = false;
+
+ drm_WARN_ON(&i915->drm,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+ crtc_state->uapi.active = false;
+ crtc_state->uapi.connector_mask = 0;
+ crtc_state->uapi.encoder_mask = 0;
+ intel_crtc_free_hw_state(crtc_state);
+ memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
+
+ for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder)
+ encoder->base.crtc = NULL;
+
+ intel_fbc_disable(crtc);
+ intel_update_watermarks(i915);
+ intel_disable_shared_dpll(crtc_state);
+
+ intel_display_power_put_all_in_set(i915, &crtc->enabled_power_domains);
+
+ cdclk_state->min_cdclk[pipe] = 0;
+ cdclk_state->min_voltage_level[pipe] = 0;
+ cdclk_state->active_pipes &= ~BIT(pipe);
+
+ dbuf_state->active_pipes &= ~BIT(pipe);
+
+ bw_state->data_rate[pipe] = 0;
+ bw_state->num_active_planes[pipe] = 0;
+}
+
+static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915)
+{
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state = connector->base.state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector->base.encoder);
+
+ if (conn_state->crtc)
+ drm_connector_put(&connector->base);
+
+ if (encoder) {
+ struct intel_crtc *crtc =
+ to_intel_crtc(encoder->base.crtc);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ conn_state->best_encoder = &encoder->base;
+ conn_state->crtc = &crtc->base;
+ conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
+
+ drm_connector_get(&connector->base);
+ } else {
+ conn_state->best_encoder = NULL;
+ conn_state->crtc = NULL;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
+{
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ return;
+
+ crtc_state->uapi.enable = crtc_state->hw.enable;
+ crtc_state->uapi.active = crtc_state->hw.active;
+ drm_WARN_ON(crtc_state->uapi.crtc->dev,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
+
+ crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
+ crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
+
+ drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
+ crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
+ crtc_state->hw.gamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.ctm,
+ crtc_state->hw.ctm);
+}
+
+static void
+intel_sanitize_plane_mapping(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(i915) >= 4)
+ return;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_crtc *plane_crtc;
+ enum pipe pipe;
+
+ if (!plane->get_hw_state(plane, &pipe))
+ continue;
+
+ if (pipe == crtc->pipe)
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+ plane->base.base.id, plane->base.name);
+
+ plane_crtc = intel_crtc_for_pipe(i915, pipe);
+ intel_plane_disable_noatomic(plane_crtc, plane);
+ }
+}
+
+static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+ return true;
+
+ return false;
+}
+
+static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_connector *connector;
+
+ for_each_connector_on_encoder(dev, &encoder->base, connector)
+ return connector;
+
+ return NULL;
+}
+
+static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!crtc_state->hw.active && !HAS_GMCH(i915))
+ return;
+
+ /*
+ * We start out with underrun reporting disabled to avoid races.
+ * For correct bookkeeping mark this on active crtcs.
+ *
+ * Also on gmch platforms we dont have any hardware bits to
+ * disable the underrun reporting. Which means we need to start
+ * out with underrun reporting disabled also on inactive pipes,
+ * since otherwise we'll complain about the garbage we read when
+ * e.g. coming up after runtime pm.
+ *
+ * No protection against concurrent access is required - at
+ * worst a fifo underrun happens which also sets this to false.
+ */
+ crtc->cpu_fifo_underrun_disabled = true;
+
+ /*
+ * We track the PCH trancoder underrun reporting state
+ * within the crtc. With crtc for pipe A housing the underrun
+ * reporting state for PCH transcoder A, crtc for pipe B housing
+ * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+ * and marking underrun reporting as disabled for the non-existing
+ * PCH transcoders B and C would prevent enabling the south
+ * error interrupt (see cpt_can_enable_serr_int()).
+ */
+ if (intel_has_pch_trancoder(i915, crtc->pipe))
+ crtc->pch_fifo_underrun_disabled = true;
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active) {
+ struct intel_plane *plane;
+
+ /* Disable everything but the primary plane */
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (plane_state->uapi.visible &&
+ plane->base.type != DRM_PLANE_TYPE_PRIMARY)
+ intel_plane_disable_noatomic(crtc, plane);
+ }
+
+ /* Disable any background color/etc. set by the BIOS */
+ intel_color_commit_noarm(crtc_state);
+ intel_color_commit_arm(crtc_state);
+ }
+
+ /*
+ * Adjust the state of the output pipe according to whether we have
+ * active connectors/encoders.
+ */
+ if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
+ !intel_crtc_is_bigjoiner_slave(crtc_state))
+ intel_crtc_disable_noatomic(crtc, ctx);
+}
+
+static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /*
+ * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
+ * the hardware when a high res displays plugged in. DPLL P
+ * divider is zero, and the pipe timings are bonkers. We'll
+ * try to disable everything in that case.
+ *
+ * FIXME would be nice to be able to sanitize this state
+ * without several WARNs, but for now let's take the easy
+ * road.
+ */
+ return IS_SANDYBRIDGE(i915) &&
+ crtc_state->hw.active &&
+ crtc_state->shared_dpll &&
+ crtc_state->port_clock == 0;
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_connector *connector;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc_state *crtc_state = crtc ?
+ to_intel_crtc_state(crtc->base.state) : NULL;
+
+ /*
+ * We need to check both for a crtc link (meaning that the encoder is
+ * active and trying to read from a pipe) and the pipe itself being
+ * active.
+ */
+ bool has_active_crtc = crtc_state &&
+ crtc_state->hw.active;
+
+ if (crtc_state && has_bogus_dpll_config(crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+ pipe_name(crtc->pipe));
+ has_active_crtc = false;
+ }
+
+ connector = intel_encoder_find_connector(encoder);
+ if (connector && !has_active_crtc) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ /*
+ * Connector is active, but has no active pipe. This is fallout
+ * from our resume register restoring. Disable the encoder
+ * manually again.
+ */
+ if (crtc_state) {
+ struct drm_encoder *best_encoder;
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ /* avoid oopsing in case the hooks consult best_encoder */
+ best_encoder = connector->base.state->best_encoder;
+ connector->base.state->best_encoder = &encoder->base;
+
+ /* FIXME NULL atomic state passed! */
+ if (encoder->disable)
+ encoder->disable(NULL, encoder, crtc_state,
+ connector->base.state);
+ if (encoder->post_disable)
+ encoder->post_disable(NULL, encoder, crtc_state,
+ connector->base.state);
+
+ connector->base.state->best_encoder = best_encoder;
+ }
+ encoder->base.crtc = NULL;
+
+ /*
+ * Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+ * in our code, like the register restore mess on resume. Clamp
+ * things to off as a safer default.
+ */
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+
+ /* notify opregion of the sanitized encoder state */
+ intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
+
+ if (HAS_DDI(i915))
+ intel_ddi_sanitize_encoder_pll_mapping(encoder);
+}
+
+/* FIXME read out full plane state for all planes */
+static void readout_plane_state(struct drm_i915_private *i915)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ for_each_intel_plane(&i915->drm, plane) {
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state;
+ enum pipe pipe = PIPE_A;
+ bool visible;
+
+ visible = plane->get_hw_state(plane, &pipe);
+
+ crtc = intel_crtc_for_pipe(i915, pipe);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ intel_set_plane_visible(crtc_state, plane_state, visible);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+ plane->base.base.id, plane->base.name,
+ str_enabled_disabled(visible), pipe_name(pipe));
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ intel_plane_fixup_bitmasks(crtc_state);
+ }
+}
+
+static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
+{
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(i915->cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->dbuf.obj.state);
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u8 active_pipes = 0;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+ intel_crtc_free_hw_state(crtc_state);
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ intel_crtc_get_pipe_config(crtc_state);
+
+ crtc_state->hw.enable = crtc_state->hw.active;
+
+ crtc->base.enabled = crtc_state->hw.enable;
+ crtc->active = crtc_state->hw.active;
+
+ if (crtc_state->hw.active)
+ active_pipes |= BIT(crtc->pipe);
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] hw state readout: %s\n",
+ crtc->base.base.id, crtc->base.name,
+ str_enabled_disabled(crtc_state->hw.active));
+ }
+
+ cdclk_state->active_pipes = active_pipes;
+ dbuf_state->active_pipes = active_pipes;
+
+ readout_plane_state(i915);
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ struct intel_crtc_state *crtc_state = NULL;
+
+ pipe = 0;
+
+ if (encoder->get_hw_state(encoder, &pipe)) {
+ crtc = intel_crtc_for_pipe(i915, pipe);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ encoder->base.crtc = &crtc->base;
+ intel_encoder_get_config(encoder, crtc_state);
+
+ /* read out to slave crtc as well for bigjoiner */
+ if (crtc_state->bigjoiner_pipes) {
+ struct intel_crtc *slave_crtc;
+
+ /* encoder should read be linked to bigjoiner master */
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(crtc_state)) {
+ struct intel_crtc_state *slave_crtc_state;
+
+ slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state);
+ intel_encoder_get_config(encoder, slave_crtc_state);
+ }
+ }
+ } else {
+ encoder->base.crtc = NULL;
+ }
+
+ if (encoder->sync_state)
+ encoder->sync_state(encoder, crtc_state);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ str_enabled_disabled(encoder->base.crtc),
+ pipe_name(pipe));
+ }
+
+ intel_dpll_readout_hw_state(i915);
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->get_hw_state(connector)) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ connector->base.dpms = DRM_MODE_DPMS_ON;
+
+ encoder = intel_attached_encoder(connector);
+ connector->base.encoder = &encoder->base;
+
+ crtc = to_intel_crtc(encoder->base.crtc);
+ crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
+
+ if (crtc_state && crtc_state->hw.active) {
+ /*
+ * This has to be done during hardware readout
+ * because anything calling .crtc_disable may
+ * rely on the connector_mask being accurate.
+ */
+ crtc_state->uapi.connector_mask |=
+ drm_connector_mask(&connector->base);
+ crtc_state->uapi.encoder_mask |=
+ drm_encoder_mask(&encoder->base);
+ }
+ } else {
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id, connector->base.name,
+ str_enabled_disabled(connector->base.encoder));
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(i915->bw_obj.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+
+ if (crtc_state->hw.active) {
+ /*
+ * The initial mode needs to be set in order to keep
+ * the atomic core happy. It wants a valid mode if the
+ * crtc's enabled, so we do the above call.
+ *
+ * But we don't set all the derived state fully, hence
+ * set a flag to indicate that a full recalculation is
+ * needed on the next commit.
+ */
+ crtc_state->inherited = true;
+
+ intel_crtc_update_active_timings(crtc_state);
+
+ intel_crtc_copy_hw_to_uapi_state(crtc_state);
+ }
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use intel_plane_data_rate() :(
+ */
+ if (plane_state->uapi.visible)
+ crtc_state->data_rate[plane->id] =
+ 4 * crtc_state->pixel_rate;
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use plane->min_cdclk() :(
+ */
+ if (plane_state->uapi.visible && plane->min_cdclk) {
+ if (crtc_state->double_wide || DISPLAY_VER(i915) >= 10)
+ crtc_state->min_cdclk[plane->id] =
+ DIV_ROUND_UP(crtc_state->pixel_rate, 2);
+ else
+ crtc_state->min_cdclk[plane->id] =
+ crtc_state->pixel_rate;
+ }
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] min_cdclk %d kHz\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id]);
+ }
+
+ if (crtc_state->hw.active) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (drm_WARN_ON(&i915->drm, min_cdclk < 0))
+ min_cdclk = 0;
+ }
+
+ cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+ cdclk_state->min_voltage_level[crtc->pipe] =
+ crtc_state->min_voltage_level;
+
+ intel_bw_crtc_update(bw_state, crtc_state);
+ }
+}
+
+static void
+get_encoder_power_domains(struct drm_i915_private *i915)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ struct intel_crtc_state *crtc_state;
+
+ if (!encoder->get_power_domains)
+ continue;
+
+ /*
+ * MST-primary and inactive encoders don't have a crtc state
+ * and neither of these require any power domain references.
+ */
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
+ encoder->get_power_domains(encoder, crtc_state);
+ }
+}
+
+static void intel_early_display_was(struct drm_i915_private *i915)
+{
+ /*
+ * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
+ * Also known as Wa_14010480278.
+ */
+ if (IS_DISPLAY_VER(i915, 10, 12))
+ intel_de_write(i915, GEN9_CLKGATE_DIS_0,
+ intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
+
+ if (IS_HASWELL(i915)) {
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ intel_de_write(i915, CHICKEN_PAR1_1,
+ intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ }
+
+ if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ /* Display WA #1142:kbl,cfl,cml */
+ intel_de_rmw(i915, CHICKEN_PAR1_1,
+ KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
+ intel_de_rmw(i915, CHICKEN_MISC_2,
+ KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
+ KBL_ARB_FILL_SPARE_14);
+ }
+}
+
+void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ intel_early_display_was(i915);
+ intel_modeset_readout_hw_state(i915);
+
+ /* HW state is read out, now we need to sanitize this mess. */
+ get_encoder_power_domains(i915);
+
+ intel_pch_sanitize(i915);
+
+ /*
+ * intel_sanitize_plane_mapping() may need to do vblank
+ * waits, so we need vblank interrupts restored beforehand.
+ */
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ intel_sanitize_fifo_underrun_reporting(crtc_state);
+
+ drm_crtc_vblank_reset(&crtc->base);
+
+ if (crtc_state->hw.active)
+ intel_crtc_vblank_on(crtc_state);
+ }
+
+ intel_fbc_sanitize(i915);
+
+ intel_sanitize_plane_mapping(i915);
+
+ for_each_intel_encoder(&i915->drm, encoder)
+ intel_sanitize_encoder(encoder);
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ intel_sanitize_crtc(crtc, ctx);
+ intel_crtc_state_dump(crtc_state, NULL, "setup_hw_state");
+ }
+
+ intel_modeset_update_connector_atomic_state(i915);
+
+ intel_dpll_sanitize_state(i915);
+
+ if (IS_G4X(i915)) {
+ g4x_wm_get_hw_state(i915);
+ g4x_wm_sanitize(i915);
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ vlv_wm_get_hw_state(i915);
+ vlv_wm_sanitize(i915);
+ } else if (DISPLAY_VER(i915) >= 9) {
+ skl_wm_get_hw_state(i915);
+ skl_wm_sanitize(i915);
+ } else if (HAS_PCH_SPLIT(i915)) {
+ ilk_wm_get_hw_state(i915);
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_power_domain_mask put_domains;
+
+ intel_modeset_get_crtc_power_domains(crtc_state, &put_domains);
+ if (drm_WARN_ON(&i915->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
+ intel_modeset_put_crtc_power_domains(crtc, &put_domains);
+ }
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+
+ intel_power_domains_sanitize_state(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.h b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
new file mode 100644
index 000000000000..3beff67b33d0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_MODESET_SETUP_H__
+#define __INTEL_MODESET_SETUP_H__
+
+struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+
+void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif /* __INTEL_MODESET_SETUP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
new file mode 100644
index 000000000000..a91586d77cb6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ *
+ * High level crtc/connector/encoder modeset state verification.
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+#include "intel_modeset_verify.h"
+#include "intel_pm.h"
+#include "intel_snps_phy.h"
+
+/*
+ * Cross check the actual hw state with our own modeset state tracking (and its
+ * internal consistency).
+ */
+static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
+
+ if (connector->get_hw_state(connector)) {
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+ I915_STATE_WARN(!crtc_state,
+ "connector enabled without attached crtc\n");
+
+ if (!crtc_state)
+ return;
+
+ I915_STATE_WARN(!crtc_state->hw.active,
+ "connector is active, but attached crtc isn't\n");
+
+ if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
+ return;
+
+ I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
+ "atomic encoder doesn't match attached encoder\n");
+
+ I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
+ "attached encoder crtc differs from connector crtc\n");
+ } else {
+ I915_STATE_WARN(crtc_state && crtc_state->hw.active,
+ "attached crtc is active, but connector isn't\n");
+ I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
+ "best encoder set without crtc!\n");
+ }
+}
+
+static void
+verify_connector_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct intel_crtc_state *crtc_state = NULL;
+
+ if (new_conn_state->crtc != &crtc->base)
+ continue;
+
+ if (crtc)
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ intel_connector_verify_state(crtc_state, new_conn_state);
+
+ I915_STATE_WARN(new_conn_state->best_encoder != encoder,
+ "connector's atomic encoder doesn't match legacy encoder\n");
+ }
+}
+
+static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (pipe_config->has_pch_encoder) {
+ int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+ &pipe_config->fdi_m_n);
+ int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
+
+ /*
+ * FDI already provided one idea for the dotclock.
+ * Yell if the encoder disagrees.
+ */
+ drm_WARN(&dev_priv->drm,
+ !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ fdi_dotclock, dotclock);
+ }
+}
+
+static void
+verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
+{
+ struct intel_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ int i;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ bool enabled = false, found = false;
+ enum pipe pipe;
+
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
+ new_conn_state, i) {
+ if (old_conn_state->best_encoder == &encoder->base)
+ found = true;
+
+ if (new_conn_state->best_encoder != &encoder->base)
+ continue;
+
+ found = true;
+ enabled = true;
+
+ I915_STATE_WARN(new_conn_state->crtc !=
+ encoder->base.crtc,
+ "connector's crtc doesn't match encoder crtc\n");
+ }
+
+ if (!found)
+ continue;
+
+ I915_STATE_WARN(!!encoder->base.crtc != enabled,
+ "encoder's enabled state mismatch (expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
+
+ if (!encoder->base.crtc) {
+ bool active;
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active,
+ "encoder detached but still enabled on pipe %c.\n",
+ pipe_name(pipe));
+ }
+ }
+}
+
+static void
+verify_crtc_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_encoder *encoder;
+ struct intel_crtc_state *pipe_config = old_crtc_state;
+ struct drm_atomic_state *state = old_crtc_state->uapi.state;
+ struct intel_crtc *master_crtc;
+
+ __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
+ intel_crtc_free_hw_state(old_crtc_state);
+ intel_crtc_state_reset(old_crtc_state, crtc);
+ old_crtc_state->uapi.state = state;
+
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
+ crtc->base.name);
+
+ pipe_config->hw.enable = new_crtc_state->hw.enable;
+
+ intel_crtc_get_pipe_config(pipe_config);
+
+ /* we keep both pipes enabled on 830 */
+ if (IS_I830(dev_priv) && pipe_config->hw.active)
+ pipe_config->hw.active = new_crtc_state->hw.active;
+
+ I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
+ "crtc active state doesn't match with hw state (expected %i, found %i)\n",
+ new_crtc_state->hw.active, pipe_config->hw.active);
+
+ I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
+ "transitional active state does not match atomic hw state (expected %i, found %i)\n",
+ new_crtc_state->hw.active, crtc->active);
+
+ master_crtc = intel_master_crtc(new_crtc_state);
+
+ for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
+ enum pipe pipe;
+ bool active;
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active != new_crtc_state->hw.active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active,
+ new_crtc_state->hw.active);
+
+ I915_STATE_WARN(active && master_crtc->pipe != pipe,
+ "Encoder connected to wrong pipe %c\n",
+ pipe_name(pipe));
+
+ if (active)
+ intel_encoder_get_config(encoder, pipe_config);
+ }
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ intel_pipe_config_sanity_check(dev_priv, pipe_config);
+
+ if (!intel_pipe_config_compare(new_crtc_state,
+ pipe_config, false)) {
+ I915_STATE_WARN(1, "pipe state doesn't match!\n");
+ intel_crtc_state_dump(pipe_config, NULL, "hw state");
+ intel_crtc_state_dump(new_crtc_state, NULL, "sw state");
+ }
+}
+
+void intel_modeset_verify_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
+ return;
+
+ intel_wm_state_verify(crtc, new_crtc_state);
+ verify_connector_state(state, crtc);
+ verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
+ intel_shared_dpll_state_verify(crtc, old_crtc_state, new_crtc_state);
+ intel_mpllb_state_verify(state, new_crtc_state);
+}
+
+void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state)
+{
+ verify_encoder_state(dev_priv, state);
+ verify_connector_state(state, NULL);
+ intel_shared_dpll_verify_disabled(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.h b/drivers/gpu/drm/i915/display/intel_modeset_verify.h
new file mode 100644
index 000000000000..2d6fbe4f7846
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_MODESET_VERIFY_H__
+#define __INTEL_MODESET_VERIFY_H__
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+
+void intel_modeset_verify_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state);
+void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state);
+
+#endif /* __INTEL_MODESET_VERIFY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index f31e8c3f8ce0..1c0c745c142d 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -30,6 +30,8 @@
#include <linux/firmware.h>
#include <acpi/video.h>
+#include <drm/drm_edid.h>
+
#include "i915_drv.h"
#include "intel_acpi.h"
#include "intel_backlight.h"
@@ -53,6 +55,8 @@
#define MBOX_ASLE_EXT BIT(4) /* Mailbox #5 */
#define MBOX_BACKLIGHT BIT(5) /* Mailbox #2 (valid from v3.x) */
+#define PCON_HEADLESS_SKU BIT(13)
+
struct opregion_header {
u8 signature[16];
u32 size;
@@ -1135,6 +1139,18 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
return new_edid;
}
+bool intel_opregion_headless_sku(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+ struct opregion_header *header = opregion->header;
+
+ if (!header || header->over.major < 2 ||
+ (header->over.major == 2 && header->over.minor < 3))
+ return false;
+
+ return opregion->header->pcon & PCON_HEADLESS_SKU;
+}
+
void intel_opregion_register(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->opregion;
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 82cc0ba34af7..2f261f985400 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -76,6 +76,8 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
struct edid *intel_opregion_get_edid(struct intel_connector *connector);
+bool intel_opregion_headless_sku(struct drm_i915_private *i915);
+
#else /* CONFIG_ACPI*/
static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
@@ -127,6 +129,11 @@ intel_opregion_get_edid(struct intel_connector *connector)
return NULL;
}
+static inline bool intel_opregion_headless_sku(struct drm_i915_private *i915)
+{
+ return false;
+}
+
#endif /* CONFIG_ACPI */
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index ee46561b5ae8..79ed8bd04a07 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1399,8 +1399,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->i915 = dev_priv;
overlay->context = engine->kernel_context;
- GEM_BUG_ON(!overlay->context);
-
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
overlay->brightness = -19;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index d1d1b59102d6..237a40623dd7 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -71,20 +71,41 @@ intel_panel_fixed_mode(struct intel_connector *connector,
return best_mode;
}
+static bool is_alt_drrs_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
+{
+ return drm_mode_match(mode, preferred_mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ mode->clock != preferred_mode->clock;
+}
+
+static bool is_alt_vrr_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
+{
+ return drm_mode_match(mode, preferred_mode,
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ mode->hdisplay == preferred_mode->hdisplay &&
+ mode->vdisplay == preferred_mode->vdisplay &&
+ mode->clock != preferred_mode->clock;
+}
+
const struct drm_display_mode *
intel_panel_downclock_mode(struct intel_connector *connector,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode, *best_mode = NULL;
- int min_vrefresh = i915->vbt.seamless_drrs_min_refresh_rate;
+ int min_vrefresh = connector->panel.vbt.seamless_drrs_min_refresh_rate;
int max_vrefresh = drm_mode_vrefresh(adjusted_mode);
/* pick the fixed_mode with the lowest refresh rate */
list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
int vrefresh = drm_mode_vrefresh(fixed_mode);
- if (vrefresh >= min_vrefresh && vrefresh < max_vrefresh) {
+ if (is_alt_drrs_mode(fixed_mode, adjusted_mode) &&
+ vrefresh >= min_vrefresh && vrefresh < max_vrefresh) {
max_vrefresh = vrefresh;
best_mode = fixed_mode;
}
@@ -113,13 +134,11 @@ int intel_panel_get_modes(struct intel_connector *connector)
enum drrs_type intel_panel_drrs_type(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
-
if (list_empty(&connector->panel.fixed_modes) ||
list_is_singular(&connector->panel.fixed_modes))
return DRRS_TYPE_NONE;
- return i915->vbt.drrs_type;
+ return connector->panel.vbt.drrs_type;
}
int intel_panel_compute_config(struct intel_connector *connector,
@@ -154,16 +173,18 @@ int intel_panel_compute_config(struct intel_connector *connector,
}
static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
- const struct drm_display_mode *preferred_mode)
+ const struct drm_display_mode *preferred_mode,
+ bool has_vrr)
{
- return drm_mode_match(mode, preferred_mode,
- DRM_MODE_MATCH_TIMINGS |
- DRM_MODE_MATCH_FLAGS |
- DRM_MODE_MATCH_3D_FLAGS) &&
- mode->clock != preferred_mode->clock;
+ /* is_alt_drrs_mode() is a subset of is_alt_vrr_mode() */
+ if (has_vrr)
+ return is_alt_vrr_mode(mode, preferred_mode);
+ else
+ return is_alt_drrs_mode(mode, preferred_mode);
}
-static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector)
+static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector,
+ bool has_vrr)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *preferred_mode =
@@ -171,7 +192,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect
struct drm_display_mode *mode, *next;
list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
- if (!is_alt_fixed_mode(mode, preferred_mode))
+ if (!is_alt_fixed_mode(mode, preferred_mode, has_vrr))
continue;
drm_dbg_kms(&dev_priv->drm,
@@ -220,16 +241,21 @@ static void intel_panel_destroy_probed_modes(struct intel_connector *connector)
struct drm_display_mode *mode, *next;
list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] not using EDID mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name,
+ DRM_MODE_ARG(mode));
list_del(&mode->head);
drm_mode_destroy(&i915->drm, mode);
}
}
-void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, bool has_drrs)
+void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
+ bool has_drrs, bool has_vrr)
{
intel_panel_add_edid_preferred_mode(connector);
- if (intel_panel_preferred_fixed_mode(connector) && has_drrs)
- intel_panel_add_edid_alt_fixed_modes(connector);
+ if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr))
+ intel_panel_add_edid_alt_fixed_modes(connector, has_vrr);
intel_panel_destroy_probed_modes(connector);
}
@@ -260,7 +286,7 @@ void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *mode;
- mode = i915->vbt.lfp_lvds_vbt_mode;
+ mode = connector->panel.vbt.lfp_lvds_vbt_mode;
if (!mode)
return;
@@ -274,7 +300,7 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *mode;
- mode = i915->vbt.sdvo_lvds_vbt_mode;
+ mode = connector->panel.vbt.sdvo_lvds_vbt_mode;
if (!mode)
return;
@@ -639,6 +665,8 @@ void intel_panel_fini(struct intel_connector *connector)
intel_backlight_destroy(panel);
+ intel_bios_fini_panel(panel);
+
list_for_each_entry_safe(fixed_mode, next, &panel->fixed_modes, head) {
list_del(&fixed_mode->head);
drm_mode_destroy(connector->base.dev, fixed_mode);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 2e32bb728beb..b087c0c3cc6d 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -40,7 +40,8 @@ int intel_panel_fitting(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
int intel_panel_compute_config(struct intel_connector *connector,
struct drm_display_mode *adjusted_mode);
-void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, bool has_drrs);
+void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
+ bool has_drrs, bool has_vrr);
void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector);
void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector);
void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index b688fd87e3da..9934c8a9e240 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -122,16 +122,29 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->sb_lock);
}
-/* Program iCLKIP clock to the desired frequency */
-void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
+struct iclkip_params {
+ u32 iclk_virtual_root_freq;
+ u32 iclk_pi_range;
+ u32 divsel, phaseinc, auxdiv, phasedir, desired_divisor;
+};
+
+static void iclkip_params_init(struct iclkip_params *p)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int clock = crtc_state->hw.adjusted_mode.crtc_clock;
- u32 divsel, phaseinc, auxdiv, phasedir = 0;
- u32 temp;
+ memset(p, 0, sizeof(*p));
- lpt_disable_iclkip(dev_priv);
+ p->iclk_virtual_root_freq = 172800 * 1000;
+ p->iclk_pi_range = 64;
+}
+
+static int lpt_iclkip_freq(struct iclkip_params *p)
+{
+ return DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
+ p->desired_divisor << p->auxdiv);
+}
+
+static void lpt_compute_iclkip(struct iclkip_params *p, int clock)
+{
+ iclkip_params_init(p);
/* The iCLK virtual clock root frequency is in MHz,
* but the adjusted_mode->crtc_clock in KHz. To get the
@@ -139,50 +152,60 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
* convert the virtual clock precision to KHz here for higher
* precision.
*/
- for (auxdiv = 0; auxdiv < 2; auxdiv++) {
- u32 iclk_virtual_root_freq = 172800 * 1000;
- u32 iclk_pi_range = 64;
- u32 desired_divisor;
-
- desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
- clock << auxdiv);
- divsel = (desired_divisor / iclk_pi_range) - 2;
- phaseinc = desired_divisor % iclk_pi_range;
+ for (p->auxdiv = 0; p->auxdiv < 2; p->auxdiv++) {
+ p->desired_divisor = DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
+ clock << p->auxdiv);
+ p->divsel = (p->desired_divisor / p->iclk_pi_range) - 2;
+ p->phaseinc = p->desired_divisor % p->iclk_pi_range;
/*
* Near 20MHz is a corner case which is
* out of range for the 7-bit divisor
*/
- if (divsel <= 0x7f)
+ if (p->divsel <= 0x7f)
break;
}
+}
+
+/* Program iCLKIP clock to the desired frequency */
+void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock;
+ struct iclkip_params p;
+ u32 temp;
+
+ lpt_disable_iclkip(dev_priv);
+
+ lpt_compute_iclkip(&p, clock);
/* This should not happen with any sane values */
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
drm_dbg_kms(&dev_priv->drm,
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
- clock, auxdiv, divsel, phasedir, phaseinc);
+ clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
mutex_lock(&dev_priv->sb_lock);
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
- temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
- temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
- temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
- temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv);
intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
@@ -200,15 +223,14 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
int lpt_get_iclkip(struct drm_i915_private *dev_priv)
{
- u32 divsel, phaseinc, auxdiv;
- u32 iclk_virtual_root_freq = 172800 * 1000;
- u32 iclk_pi_range = 64;
- u32 desired_divisor;
+ struct iclkip_params p;
u32 temp;
if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
+ iclkip_params_init(&p);
+
mutex_lock(&dev_priv->sb_lock);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
@@ -218,21 +240,20 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
}
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
- divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
+ p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
- phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
+ p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
- auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
+ p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
mutex_unlock(&dev_priv->sb_lock);
- desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
+ p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
- return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
- desired_divisor << auxdiv);
+ return lpt_iclkip_freq(&p);
}
/* Implements 3 different sequences from BSpec chapter "Display iCLK
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 5a598dd06039..1b21a341962f 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -209,7 +209,8 @@ static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int backlight_controller = dev_priv->vbt.backlight.controller;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ int backlight_controller = connector->panel.vbt.backlight.controller;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -509,7 +510,7 @@ static void wait_panel_power_cycle(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
- /* take the difference of currrent time and panel power off time
+ /* take the difference of current time and panel power off time
* and then make panel wait for t11_t12 if needed. */
panel_power_on_time = ktime_get_boottime();
panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
@@ -723,6 +724,13 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
unsigned long delay;
/*
+ * We may not yet know the real power sequencing delays,
+ * so keep VDD enabled until we're done with init.
+ */
+ if (intel_dp->pps.initializing)
+ return;
+
+ /*
* Queue the timer to fire a long time from now (relative to the power
* down delay) to keep the panel power up across a sequence of
* operations.
@@ -1051,7 +1059,7 @@ void vlv_pps_init(struct intel_encoder *encoder,
pps_init_registers(intel_dp, true);
}
-static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
+static void pps_vdd_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -1072,8 +1080,6 @@ static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
-
- edp_panel_vdd_schedule_off(intel_dp);
}
bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
@@ -1159,53 +1165,96 @@ intel_pps_verify_state(struct intel_dp *intel_dp)
}
}
-static void pps_init_delays(struct intel_dp *intel_dp)
+static bool pps_delays_valid(struct edp_power_seq *delays)
+{
+ return delays->t1_t3 || delays->t8 || delays->t9 ||
+ delays->t10 || delays->t11_t12;
+}
+
+static void pps_init_delays_bios(struct intel_dp *intel_dp,
+ struct edp_power_seq *bios)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct edp_power_seq cur, vbt, spec,
- *final = &intel_dp->pps.pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
- /* already initialized? */
- if (final->t11_t12 != 0)
- return;
+ if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
+ intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
- intel_pps_readout_hw_state(intel_dp, &cur);
+ *bios = intel_dp->pps.bios_pps_delays;
- intel_pps_dump_state(intel_dp, "cur", &cur);
+ intel_pps_dump_state(intel_dp, "bios", bios);
+}
+
+static void pps_init_delays_vbt(struct intel_dp *intel_dp,
+ struct edp_power_seq *vbt)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ *vbt = connector->panel.vbt.edp.pps;
+
+ if (!pps_delays_valid(vbt))
+ return;
- vbt = dev_priv->vbt.edp.pps;
/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
* of 500ms appears to be too short. Ocassionally the panel
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
+ vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
drm_dbg_kms(&dev_priv->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
- vbt.t11_t12);
+ vbt->t11_t12);
}
+
/* T11_T12 delay is special and actually in units of 100ms, but zero
* based in the hw (so we need to add 100 ms). But the sw vbt
* table multiplies it with 1000 to make it in units of 100usec,
* too. */
- vbt.t11_t12 += 100 * 10;
+ vbt->t11_t12 += 100 * 10;
+
+ intel_pps_dump_state(intel_dp, "vbt", vbt);
+}
+
+static void pps_init_delays_spec(struct intel_dp *intel_dp,
+ struct edp_power_seq *spec)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
- spec.t1_t3 = 210 * 10;
- spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
- spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
- spec.t10 = 500 * 10;
+ spec->t1_t3 = 210 * 10;
+ spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec->t10 = 500 * 10;
/* This one is special and actually in units of 100ms, but zero
* based in the hw (so we need to add 100 ms). But the sw vbt
* table multiplies it with 1000 to make it in units of 100usec,
* too. */
- spec.t11_t12 = (510 + 100) * 10;
+ spec->t11_t12 = (510 + 100) * 10;
+
+ intel_pps_dump_state(intel_dp, "spec", spec);
+}
+
+static void pps_init_delays(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* already initialized? */
+ if (pps_delays_valid(final))
+ return;
- intel_pps_dump_state(intel_dp, "vbt", &vbt);
+ pps_init_delays_bios(intel_dp, &cur);
+ pps_init_delays_vbt(intel_dp, &vbt);
+ pps_init_delays_spec(intel_dp, &spec);
/* Use the max of the register settings and vbt. If both are
* unset, fall back to the spec limits. */
@@ -1367,18 +1416,48 @@ void intel_pps_encoder_reset(struct intel_dp *intel_dp)
pps_init_delays(intel_dp);
pps_init_registers(intel_dp, false);
+ pps_vdd_init(intel_dp);
- intel_pps_vdd_sanitize(intel_dp);
+ if (edp_have_panel_vdd(intel_dp))
+ edp_panel_vdd_schedule_off(intel_dp);
}
}
void intel_pps_init(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ intel_dp->pps.initializing = true;
INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
pps_init_timestamps(intel_dp);
- intel_pps_encoder_reset(intel_dp);
+ with_intel_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+ pps_vdd_init(intel_dp);
+ }
+}
+
+void intel_pps_init_late(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ /* Reinit delays after per-panel info has been parsed from VBT */
+ memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+
+ intel_dp->pps.initializing = false;
+
+ if (edp_have_panel_vdd(intel_dp))
+ edp_panel_vdd_schedule_off(intel_dp);
+ }
}
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index e64144659d31..a3a56f903f26 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -41,6 +41,7 @@ bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp);
void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
void intel_pps_init(struct intel_dp *intel_dp);
+void intel_pps_init_late(struct intel_dp *intel_dp);
void intel_pps_encoder_reset(struct intel_dp *intel_dp);
void intel_pps_reset_all(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 06db407e2749..e6a870641cd2 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -86,10 +86,13 @@
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
+ if (i915->params.enable_psr == -1)
+ return connector->panel.vbt.psr.enable;
return i915->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
@@ -399,6 +402,7 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
@@ -411,20 +415,20 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
goto check_tp3_sel;
}
- if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
+ if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
val |= EDP_PSR_TP1_TIME_0us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
+ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
+ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
val |= EDP_PSR_TP1_TIME_500us;
else
val |= EDP_PSR_TP1_TIME_2500us;
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
+ if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
val |= EDP_PSR_TP2_TP3_TIME_0us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR_TP2_TP3_TIME_500us;
else
val |= EDP_PSR_TP2_TP3_TIME_2500us;
@@ -441,13 +445,14 @@ check_tp3_sel:
static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
int idle_frames;
/* Let's use 6 as the minimum to cover all known cases including the
* off-by-one issue that HW has in some cases.
*/
- idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+ idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
@@ -483,18 +488,19 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
if (dev_priv->params.psr_safest_params)
return EDP_PSR2_TP2_TIME_2500us;
- if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
- dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
+ if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+ connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
val |= EDP_PSR2_TP2_TIME_50us;
- else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
+ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR2_TP2_TIME_100us;
- else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
+ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR2_TP2_TIME_500us;
else
val |= EDP_PSR2_TP2_TIME_2500us;
@@ -549,7 +555,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
/*
* TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
* values from BSpec. In order to setting an optimal power
- * consumption, lower than 4k resoluition mode needs to decrese
+ * consumption, lower than 4k resolution mode needs to decrease
* IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
* mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
*/
@@ -953,7 +959,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
int psr_setup_time;
/*
- * Current PSR panels dont work reliably with VRR enabled
+ * Current PSR panels don't work reliably with VRR enabled
* So if VRR is enabled, do not enable PSR.
*/
if (crtc_state->vrr.enable)
@@ -1618,8 +1624,12 @@ exit:
}
static void clip_area_update(struct drm_rect *overlap_damage_area,
- struct drm_rect *damage_area)
+ struct drm_rect *damage_area,
+ struct drm_rect *pipe_src)
{
+ if (!drm_rect_intersect(damage_area, pipe_src))
+ return;
+
if (overlap_damage_area->y1 == -1) {
overlap_damage_area->y1 = damage_area->y1;
overlap_damage_area->y2 = damage_area->y2;
@@ -1654,7 +1664,7 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c
*
* Plane scaling and rotation is not supported by selective fetch and both
* properties can change without a modeset, so need to be check at every
- * atomic commmit.
+ * atomic commit.
*/
static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
{
@@ -1685,6 +1695,7 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
struct intel_plane_state *new_plane_state, *old_plane_state;
@@ -1708,7 +1719,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
*/
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
- struct drm_rect src, damaged_area = { .y1 = -1 };
+ struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
+ .x2 = INT_MAX };
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
@@ -1735,20 +1747,23 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (old_plane_state->uapi.visible) {
damaged_area.y1 = old_plane_state->uapi.dst.y1;
damaged_area.y2 = old_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
}
if (new_plane_state->uapi.visible) {
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
}
continue;
} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
/* If alpha changed mark the whole plane area as damaged */
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
continue;
}
@@ -1759,7 +1774,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
&new_plane_state->uapi);
drm_atomic_for_each_plane_damage(&iter, &clip) {
if (drm_rect_intersect(&clip, &src))
- clip_area_update(&damaged_area, &clip);
+ clip_area_update(&damaged_area, &clip,
+ &crtc_state->pipe_src);
}
if (damaged_area.y1 == -1)
@@ -1767,7 +1783,20 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
+ }
+
+ /*
+ * TODO: For now we are just using full update in case
+ * selective fetch area calculation fails. To optimize this we
+ * should identify cases where this happens and fix the area
+ * calculation for those.
+ */
+ if (pipe_clip.y1 == -1) {
+ drm_info_once(&dev_priv->drm,
+ "Selective fetch area calculation failed in pipe %c\n",
+ pipe_name(crtc->pipe));
+ full_update = true;
}
if (full_update)
@@ -2174,7 +2203,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
}
/**
- * intel_psr_invalidate - Invalidade PSR
+ * intel_psr_invalidate - Invalidate PSR
* @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the invalidate
@@ -2344,6 +2373,7 @@ unlock:
*/
void intel_psr_init(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -2367,14 +2397,10 @@ void intel_psr_init(struct intel_dp *intel_dp)
intel_dp->psr.source_support = true;
- if (dev_priv->params.enable_psr == -1)
- if (!dev_priv->vbt.psr.enable)
- dev_priv->params.enable_psr = 0;
-
/* Set link_standby x link_off defaults */
if (DISPLAY_VER(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
- intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
+ intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
INIT_WORK(&intel_dp->psr.work, intel_psr_work);
INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index d81855d57cdc..19122bc6d2ab 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2869,6 +2869,7 @@ static bool
intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_i915_private *i915 = to_i915(encoder->dev);
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2900,6 +2901,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
+ intel_bios_init_panel(i915, &intel_connector->panel, NULL, NULL);
+
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
* SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -2908,7 +2911,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
if (!intel_panel_preferred_fixed_mode(intel_connector)) {
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
- intel_panel_add_edid_fixed_modes(intel_connector, false);
+ intel_panel_add_edid_fixed_modes(intel_connector, false, false);
}
intel_panel_init(intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 0dd4775e8195..0bdbedc67d7d 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -517,6 +517,37 @@ static const struct intel_mpllb_state dg2_hdmi_148_5 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+/* values in the below table are calculted using the algo */
+static const struct intel_mpllb_state dg2_hdmi_297 = {
+ .clock = 297000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_594 = {
.clock = 594000,
.ref_control =
@@ -551,6 +582,7 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_27_0,
&dg2_hdmi_74_25,
&dg2_hdmi_148_5,
+ &dg2_hdmi_297,
&dg2_hdmi_594,
NULL,
};
@@ -597,7 +629,7 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
return -EINVAL;
for (i = 0; tables[i]; i++) {
- if (crtc_state->port_clock <= tables[i]->clock) {
+ if (crtc_state->port_clock == tables[i]->clock) {
crtc_state->mpllb_state = *tables[i];
return 0;
}
@@ -781,3 +813,46 @@ int intel_snps_phy_check_hdmi_link_rate(int clock)
return MODE_CLOCK_RANGE;
}
+
+void intel_mpllb_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_mpllb_state mpllb_hw_state = { 0 };
+ struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_encoder *encoder;
+
+ if (!IS_DG2(i915))
+ return;
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
+ intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
+
+#define MPLLB_CHECK(__name) \
+ I915_STATE_WARN(mpllb_sw_state->__name != mpllb_hw_state.__name, \
+ "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \
+ crtc->base.base.id, crtc->base.name, \
+ __stringify(__name), \
+ mpllb_sw_state->__name, mpllb_hw_state.__name)
+
+ MPLLB_CHECK(mpllb_cp);
+ MPLLB_CHECK(mpllb_div);
+ MPLLB_CHECK(mpllb_div2);
+ MPLLB_CHECK(mpllb_fracn1);
+ MPLLB_CHECK(mpllb_fracn2);
+ MPLLB_CHECK(mpllb_sscen);
+ MPLLB_CHECK(mpllb_sscstep);
+
+ /*
+ * ref_control is handled by the hardware/firemware and never
+ * programmed by the software, but the proper values are supplied
+ * in the bspec for verification purposes.
+ */
+ MPLLB_CHECK(ref_control);
+
+#undef MPLLB_CHECK
+}
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
index 11dcd6deb070..557ef820bc0b 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -9,8 +9,9 @@
#include <linux/types.h>
struct drm_i915_private;
-struct intel_encoder;
+struct intel_atomic_state;
struct intel_crtc_state;
+struct intel_encoder;
struct intel_mpllb_state;
enum phy;
@@ -31,5 +32,7 @@ int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
int intel_snps_phy_check_hdmi_link_rate(int clock);
void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_mpllb_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state);
#endif /* __INTEL_SNPS_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 7c0df80612d0..2713faad0625 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b8b822ea3755..6773840f6cc7 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -494,7 +494,8 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
}
live_status_mask = tc_port_live_status_mask(dig_port);
- if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY)))) {
+ if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY))) &&
+ !dig_port->tc_legacy_port) {
drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n",
dig_port->tc_port_name, live_status_mask);
goto out_set_tbt_alt_mode;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 4b98bab3b890..509b0a419c20 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -182,6 +182,10 @@ struct bdb_general_features {
#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+/* Device handle */
+#define DEVICE_HANDLE_LFP1 0x0008
+#define DEVICE_HANDLE_LFP2 0x0080
+
/* Pre 915 */
#define DEVICE_TYPE_NONE 0x00
#define DEVICE_TYPE_CRT 0x01
@@ -564,7 +568,9 @@ struct bdb_driver_features {
u16 tbt_enabled:1;
u16 psr_enabled:1;
u16 ips_enabled:1;
- u16 reserved3:4;
+ u16 reserved3:1;
+ u16 dmrrs_enabled:1;
+ u16 reserved4:2;
u16 pc_feature_valid:1;
} __packed;
@@ -636,6 +642,7 @@ struct bdb_sdvo_panel_dtds {
#define EDP_30BPP 2
#define EDP_RATE_1_62 0
#define EDP_RATE_2_7 1
+#define EDP_RATE_5_4 2
#define EDP_LANE_1 0
#define EDP_LANE_2 1
#define EDP_LANE_4 3
@@ -666,6 +673,16 @@ struct edp_full_link_params {
u8 vswing:4;
} __packed;
+struct edp_apical_params {
+ u32 panel_oui;
+ u32 dpcd_base_address;
+ u32 dpcd_idridix_control_0;
+ u32 dpcd_option_select;
+ u32 dpcd_backlight;
+ u32 ambient_light;
+ u32 backlight_scale;
+} __packed;
+
struct bdb_edp {
struct edp_power_seq power_seqs[16];
u32 color_depth;
@@ -681,15 +698,16 @@ struct bdb_edp {
struct edp_pwm_delays pwm_delays[16]; /* 186 */
u16 full_link_params_provided; /* 199 */
struct edp_full_link_params full_link_params[16]; /* 199 */
+ u16 apical_enable; /* 203 */
+ struct edp_apical_params apical_params[16]; /* 203 */
+ u16 edp_fast_link_training_rate[16]; /* 224 */
+ u16 edp_max_port_link_rate[16]; /* 244 */
} __packed;
/*
* Block 40 - LFP Data Block
*/
-/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
-#define MODE_MASK 0x3
-
struct bdb_lvds_options {
u8 panel_type;
u8 panel_type2; /* 212 */
@@ -717,6 +735,7 @@ struct bdb_lvds_options {
u16 lcdvcc_s0_enable; /* 200 */
u32 rotation; /* 228 */
+ u32 position; /* 240 */
} __packed;
/*
@@ -843,28 +862,43 @@ struct bdb_lfp_backlight_data {
u8 level[16]; /* Obsolete from 234+ */
struct lfp_backlight_control_method backlight_control[16];
struct lfp_brightness_level brightness_level[16]; /* 234+ */
- struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
- u8 brightness_precision_bits[16]; /* 236+ */
+ struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
+ u8 brightness_precision_bits[16]; /* 236+ */
+ u16 hdr_dpcd_refresh_timeout[16]; /* 239+ */
} __packed;
/*
* Block 44 - LFP Power Conservation Features Block
*/
+struct lfp_power_features {
+ u8 reserved1:1;
+ u8 power_conservation_pref:3;
+ u8 reserved2:1;
+ u8 lace_enabled_status:1;
+ u8 lace_support:1;
+ u8 als_enable:1;
+} __packed;
struct als_data_entry {
u16 backlight_adjust;
u16 lux;
} __packed;
-struct agressiveness_profile_entry {
- u8 dpst_agressiveness : 4;
- u8 lace_agressiveness : 4;
+struct aggressiveness_profile_entry {
+ u8 dpst_aggressiveness : 4;
+ u8 lace_aggressiveness : 4;
+} __packed;
+
+struct aggressiveness_profile2_entry {
+ u8 opst_aggressiveness : 4;
+ u8 elp_aggressiveness : 4;
} __packed;
struct bdb_lfp_power {
- u8 lfp_feature_bits;
+ struct lfp_power_features features;
struct als_data_entry als[5];
- u8 lace_aggressiveness_profile;
+ u8 lace_aggressiveness_profile:3;
+ u8 reserved1:5;
u16 dpst;
u16 psr;
u16 drrs;
@@ -873,9 +907,12 @@ struct bdb_lfp_power {
u16 dmrrs;
u16 adb;
u16 lace_enabled_status;
- struct agressiveness_profile_entry aggressivenes[16];
+ struct aggressiveness_profile_entry aggressiveness[16];
u16 hobl; /* 232+ */
u16 vrr_feature_enabled; /* 233+ */
+ u16 elp; /* 247+ */
+ u16 opst; /* 247+ */
+ struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
} __packed;
/*
@@ -885,8 +922,10 @@ struct bdb_lfp_power {
#define MAX_MIPI_CONFIGURATIONS 6
struct bdb_mipi_config {
- struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
- struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175 */
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177 */
+ struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186 */
+ u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190 */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 396f2f994fa0..04250a0fec3c 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -9,25 +9,35 @@
#include "intel_display_types.h"
#include "intel_vrr.h"
-bool intel_vrr_is_capable(struct drm_connector *connector)
+bool intel_vrr_is_capable(struct intel_connector *connector)
{
+ const struct drm_display_info *info = &connector->base.display_info;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp;
- const struct drm_display_info *info = &connector->display_info;
- struct drm_i915_private *i915 = to_i915(connector->dev);
-
- if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
- connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
- return false;
- intel_dp = intel_attached_dp(to_intel_connector(connector));
/*
* DP Sink is capable of VRR video timings if
* Ignore MSA bit is set in DPCD.
* EDID monitor range also should be atleast 10 for reasonable
* Adaptive Sync or Variable Refresh Rate end user experience.
*/
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_eDP:
+ if (!connector->panel.vbt.vrr)
+ return false;
+ fallthrough;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ intel_dp = intel_attached_dp(connector);
+
+ if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd))
+ return false;
+
+ break;
+ default:
+ return false;
+ }
+
return HAS_VRR(i915) &&
- drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd) &&
info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
}
@@ -97,7 +107,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
- if (!intel_vrr_is_capable(&connector->base))
+ if (!intel_vrr_is_capable(connector))
return;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 1c2da572693d..9fda1135b0dd 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -8,15 +8,15 @@
#include <linux/types.h>
-struct drm_connector;
struct drm_connector_state;
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dp;
struct intel_encoder;
-bool intel_vrr_is_capable(struct drm_connector *connector);
+bool intel_vrr_is_capable(struct intel_connector *connector);
void intel_vrr_check_modeset(struct intel_atomic_state *state);
void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index caa03324a733..c11e15a93164 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 1954f07f0d3e..b9b1fed99874 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -782,6 +782,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
@@ -838,7 +839,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
* the delay in that case. If there is no deassert-seq, then an
* unconditional msleep is used to give the panel time to power-on.
*/
- if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+ if (connector->panel.vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
} else {
@@ -1690,7 +1691,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
u32 ui_num, ui_den;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
@@ -1924,13 +1926,15 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi->panel_power_off_time = ktime_get_boottime();
- if (dev_priv->vbt.dsi.config->dual_link)
+ intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
+
+ if (intel_connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
- intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+ intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ab4c5ab28e4d..dabdfe09f5e5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -933,8 +933,9 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
case I915_CONTEXT_PARAM_PERSISTENCE:
if (args->size)
ret = -EINVAL;
- ret = proto_context_set_persistence(fpriv->dev_priv, pc,
- args->value);
+ else
+ ret = proto_context_set_persistence(fpriv->dev_priv, pc,
+ args->value);
break;
case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
@@ -1367,7 +1368,8 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
return engine;
}
-static void kill_engines(struct i915_gem_engines *engines, bool ban)
+static void
+kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
@@ -1381,9 +1383,15 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban)
*/
for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine;
+ bool skip = false;
- if (ban && intel_context_ban(ce, NULL))
- continue;
+ if (exit)
+ skip = intel_context_set_exiting(ce);
+ else if (!persistent)
+ skip = intel_context_exit_nonpersistent(ce, NULL);
+
+ if (skip)
+ continue; /* Already marked. */
/*
* Check the current active state of this context; if we
@@ -1395,7 +1403,7 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban)
engine = active_engine(ce);
/* First attempt to gracefully cancel the context */
- if (engine && !__cancel_engine(engine) && ban)
+ if (engine && !__cancel_engine(engine) && (exit || !persistent))
/*
* If we are unable to send a preemptive pulse to bump
* the context from the GPU, we have to resort to a full
@@ -1407,8 +1415,6 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban)
static void kill_context(struct i915_gem_context *ctx)
{
- bool ban = (!i915_gem_context_is_persistent(ctx) ||
- !ctx->i915->params.enable_hangcheck);
struct i915_gem_engines *pos, *next;
spin_lock_irq(&ctx->stale.lock);
@@ -1421,7 +1427,8 @@ static void kill_context(struct i915_gem_context *ctx)
spin_unlock_irq(&ctx->stale.lock);
- kill_engines(pos, ban);
+ kill_engines(pos, !ctx->i915->params.enable_hangcheck,
+ i915_gem_context_is_persistent(ctx));
spin_lock_irq(&ctx->stale.lock);
GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
@@ -1467,7 +1474,8 @@ static void engines_idle_release(struct i915_gem_context *ctx,
kill:
if (list_empty(&engines->link)) /* raced, already closed */
- kill_engines(engines, true);
+ kill_engines(engines, true,
+ i915_gem_context_is_persistent(ctx));
i915_sw_fence_commit(&engines->fence);
}
@@ -1875,6 +1883,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt,
{
const struct sseu_dev_info *device = &gt->info.sseu;
struct drm_i915_private *i915 = gt->i915;
+ unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0);
/* No zeros in any field. */
if (!user->slice_mask || !user->subslice_mask ||
@@ -1901,7 +1910,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt,
if (user->slice_mask & ~device->slice_mask)
return -EINVAL;
- if (user->subslice_mask & ~device->subslice_mask[0])
+ if (user->subslice_mask & ~dev_subslice_mask)
return -EINVAL;
if (user->max_eus_per_subslice > device->max_eus_per_subslice)
@@ -1915,7 +1924,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt,
/* Part specific restrictions. */
if (GRAPHICS_VER(i915) == 11) {
unsigned int hw_s = hweight8(device->slice_mask);
- unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
+ unsigned int hw_ss_per_s = hweight8(dev_subslice_mask);
unsigned int req_s = hweight8(context->slice_mask);
unsigned int req_ss = hweight8(context->subslice_mask);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 3e5d6057b3ef..1674b0c5802b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -35,12 +35,12 @@ bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (obj->cache_dirty)
return false;
- if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
- return true;
-
if (IS_DGFX(i915))
return false;
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ return true;
+
/* Currently in use by HW (display engine)? Keep flushed. */
return i915_gem_object_is_framebuffer(obj);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b3383e047505..30fe847c6664 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -999,7 +999,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
}
}
- err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+ /* Reserve enough slots to accommodate composite fences */
+ err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches);
if (err)
return err;
@@ -1251,14 +1252,12 @@ static void *reloc_iomap(struct i915_vma *batch,
* Only attempt to pin the batch buffer to ggtt if the current batch
* is not inside ggtt, or the batch buffer is not misplaced.
*/
- if (!i915_is_ggtt(batch->vm)) {
+ if (!i915_is_ggtt(batch->vm) ||
+ !i915_vma_misplaced(batch, 0, 0, PIN_MAPPABLE)) {
vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
- } else if (i915_vma_is_map_and_fenceable(batch)) {
- __i915_vma_pin(batch);
- vma = batch;
}
if (vma == ERR_PTR(-EDEADLK))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index c2a3e388fcb4..4eed3dd90ba8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -409,6 +409,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
{
struct address_space *mapping = obj->base.filp->f_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
u64 remain, offset;
unsigned int pg;
@@ -466,9 +467,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (err)
return err;
- err = pagecache_write_begin(obj->base.filp, mapping,
- offset, len, 0,
- &page, &data);
+ err = aops->write_begin(obj->base.filp, mapping, offset, len,
+ &page, &data);
if (err < 0)
return err;
@@ -478,9 +478,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
len);
kunmap_atomic(vaddr);
- err = pagecache_write_end(obj->base.filp, mapping,
- offset, len, len - unwritten,
- page, data);
+ err = aops->write_end(obj->base.filp, mapping, offset, len,
+ len - unwritten, page, data);
if (err < 0)
return err;
@@ -624,6 +623,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
{
struct drm_i915_gem_object *obj;
struct file *file;
+ const struct address_space_operations *aops;
resource_size_t offset;
int err;
@@ -635,15 +635,15 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
+ aops = file->f_mapping->a_ops;
offset = 0;
do {
unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
struct page *page;
void *pgdata, *vaddr;
- err = pagecache_write_begin(file, file->f_mapping,
- offset, len, 0,
- &page, &pgdata);
+ err = aops->write_begin(file, file->f_mapping, offset, len,
+ &page, &pgdata);
if (err < 0)
goto fail;
@@ -651,9 +651,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
memcpy(vaddr, data, len);
kunmap(page);
- err = pagecache_write_end(file, file->f_mapping,
- offset, len, len,
- page, pgdata);
+ err = aops->write_end(file, file->f_mapping, offset, len, len,
+ page, pgdata);
if (err < 0)
goto fail;
@@ -671,17 +670,10 @@ fail:
static int init_shmem(struct intel_memory_region *mem)
{
- int err;
-
- err = i915_gemfs_init(mem->i915);
- if (err) {
- DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
- err);
- }
-
+ i915_gemfs_init(mem->i915);
intel_memory_region_set_name(mem, "system");
- return 0; /* Don't error, we can simply fallback to the kernel mnt */
+ return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
}
static int release_shmem(struct intel_memory_region *mem)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 6a6ff98a8746..1030053571a2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -36,7 +36,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
-static int drop_pages(struct drm_i915_gem_object *obj,
+static bool drop_pages(struct drm_i915_gem_object *obj,
unsigned long shrink, bool trylock_vm)
{
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 47b5e0e342ab..166d0a4b9e8c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -13,6 +13,8 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
+#include "gt/intel_gt_regs.h"
#include "gt/intel_region_lmem.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
@@ -834,8 +836,8 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
} else {
resource_size_t lmem_range;
- lmem_range = intel_gt_read_register(&i915->gt0, XEHPSDV_TILE0_ADDR_RANGE) & 0xFFFF;
- lmem_size = lmem_range >> XEHPSDV_TILE_LMEM_RANGE_SHIFT;
+ lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
+ lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
lmem_size *= SZ_1G;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 80ac0db1ae8c..85518b28cd72 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -114,7 +114,7 @@ u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
return i915_gem_fence_size(i915, size, tiling, stride);
}
-/* Check pitch constriants for all chips & tiling formats */
+/* Check pitch constraints for all chips & tiling formats */
static bool
i915_tiling_ok(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride)
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index ee87874e59dc..46b9a17d6abc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -11,16 +11,11 @@
#include "i915_gemfs.h"
#include "i915_utils.h"
-int i915_gemfs_init(struct drm_i915_private *i915)
+void i915_gemfs_init(struct drm_i915_private *i915)
{
char huge_opt[] = "huge=within_size"; /* r/w */
struct file_system_type *type;
struct vfsmount *gemfs;
- char *opts;
-
- type = get_fs_type("tmpfs");
- if (!type)
- return -ENODEV;
/*
* By creating our own shmemfs mountpoint, we can pass in
@@ -28,30 +23,35 @@ int i915_gemfs_init(struct drm_i915_private *i915)
*
* One example, although it is probably better with a per-file
* control, is selecting huge page allocations ("huge=within_size").
- * However, we only do so to offset the overhead of iommu lookups
- * due to bandwidth issues (slow reads) on Broadwell+.
+ * However, we only do so on platforms which benefit from it, or to
+ * offset the overhead of iommu lookups, where with latter it is a net
+ * win even on platforms which would otherwise see some performance
+ * regressions such a slow reads issue on Broadwell and Skylake.
*/
- opts = NULL;
- if (i915_vtd_active(i915)) {
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
- opts = huge_opt;
- drm_info(&i915->drm,
- "Transparent Hugepage mode '%s'\n",
- opts);
- } else {
- drm_notice(&i915->drm,
- "Transparent Hugepage support is recommended for optimal performance when IOMMU is enabled!\n");
- }
- }
-
- gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, opts);
+ if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
+ return;
+
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ goto err;
+
+ type = get_fs_type("tmpfs");
+ if (!type)
+ goto err;
+
+ gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt);
if (IS_ERR(gemfs))
- return PTR_ERR(gemfs);
+ goto err;
i915->mm.gemfs = gemfs;
-
- return 0;
+ drm_info(&i915->drm, "Using Transparent Hugepages\n");
+ return;
+
+err:
+ drm_notice(&i915->drm,
+ "Transparent Hugepage support is recommended for optimal performance%s\n",
+ GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
+ " when IOMMU is enabled!");
}
void i915_gemfs_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h
index 2a1e59af3e4a..5d835e44c4f6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.h
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.h
@@ -9,8 +9,7 @@
struct drm_i915_private;
-int i915_gemfs_init(struct drm_i915_private *i915);
-
+void i915_gemfs_init(struct drm_i915_private *i915);
void i915_gemfs_fini(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index ddd0772fd828..3cfc621ef363 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -6,6 +6,7 @@
#include "i915_selftest.h"
#include "gt/intel_context.h"
+#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
@@ -18,10 +19,71 @@
#include "huge_gem_object.h"
#include "mock_context.h"
+#define OW_SIZE 16 /* in bytes */
+#define F_SUBTILE_SIZE 64 /* in bytes */
+#define F_TILE_WIDTH 128 /* in bytes */
+#define F_TILE_HEIGHT 32 /* in pixels */
+#define F_SUBTILE_WIDTH OW_SIZE /* in bytes */
+#define F_SUBTILE_HEIGHT 4 /* in pixels */
+
+static int linear_x_y_to_ftiled_pos(int x, int y, u32 stride, int bpp)
+{
+ int tile_base;
+ int tile_x, tile_y;
+ int swizzle, subtile;
+ int pixel_size = bpp / 8;
+ int pos;
+
+ /*
+ * Subtile remapping for F tile. Note that map[a]==b implies map[b]==a
+ * so we can use the same table to tile and until.
+ */
+ static const u8 f_subtile_map[] = {
+ 0, 1, 2, 3, 8, 9, 10, 11,
+ 4, 5, 6, 7, 12, 13, 14, 15,
+ 16, 17, 18, 19, 24, 25, 26, 27,
+ 20, 21, 22, 23, 28, 29, 30, 31,
+ 32, 33, 34, 35, 40, 41, 42, 43,
+ 36, 37, 38, 39, 44, 45, 46, 47,
+ 48, 49, 50, 51, 56, 57, 58, 59,
+ 52, 53, 54, 55, 60, 61, 62, 63
+ };
+
+ x *= pixel_size;
+ /*
+ * Where does the 4k tile start (in bytes)? This is the same for Y and
+ * F so we can use the Y-tile algorithm to get to that point.
+ */
+ tile_base =
+ y / F_TILE_HEIGHT * stride * F_TILE_HEIGHT +
+ x / F_TILE_WIDTH * 4096;
+
+ /* Find pixel within tile */
+ tile_x = x % F_TILE_WIDTH;
+ tile_y = y % F_TILE_HEIGHT;
+
+ /* And figure out the subtile within the 4k tile */
+ subtile = tile_y / F_SUBTILE_HEIGHT * 8 + tile_x / F_SUBTILE_WIDTH;
+
+ /* Swizzle the subtile number according to the bspec diagram */
+ swizzle = f_subtile_map[subtile];
+
+ /* Calculate new position */
+ pos = tile_base +
+ swizzle * F_SUBTILE_SIZE +
+ tile_y % F_SUBTILE_HEIGHT * OW_SIZE +
+ tile_x % F_SUBTILE_WIDTH;
+
+ GEM_BUG_ON(!IS_ALIGNED(pos, pixel_size));
+
+ return pos / pixel_size * 4;
+}
+
enum client_tiling {
CLIENT_TILING_LINEAR,
CLIENT_TILING_X,
CLIENT_TILING_Y,
+ CLIENT_TILING_4,
CLIENT_NUM_TILING_TYPES
};
@@ -45,6 +107,36 @@ struct tiled_blits {
u32 height;
};
+static bool supports_x_tiling(const struct drm_i915_private *i915)
+{
+ int gen = GRAPHICS_VER(i915);
+
+ if (gen < 12)
+ return true;
+
+ if (!HAS_LMEM(i915) || IS_DG1(i915))
+ return false;
+
+ return true;
+}
+
+static bool fast_blit_ok(const struct blit_buffer *buf)
+{
+ int gen = GRAPHICS_VER(buf->vma->vm->i915);
+
+ if (gen < 9)
+ return false;
+
+ if (gen < 12)
+ return true;
+
+ /* filter out platforms with unsupported X-tile support in fastblit */
+ if (buf->tiling == CLIENT_TILING_X && !supports_x_tiling(buf->vma->vm->i915))
+ return false;
+
+ return true;
+}
+
static int prepare_blit(const struct tiled_blits *t,
struct blit_buffer *dst,
struct blit_buffer *src,
@@ -59,51 +151,103 @@ static int prepare_blit(const struct tiled_blits *t,
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
- cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
- if (src->tiling == CLIENT_TILING_Y)
- cmd |= BCS_SRC_Y;
- if (dst->tiling == CLIENT_TILING_Y)
- cmd |= BCS_DST_Y;
- *cs++ = cmd;
-
- cmd = MI_FLUSH_DW;
- if (ver >= 8)
- cmd++;
- *cs++ = cmd;
- *cs++ = 0;
- *cs++ = 0;
- *cs++ = 0;
-
- cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
- if (ver >= 8)
- cmd += 2;
-
- src_pitch = t->width * 4;
- if (src->tiling) {
- cmd |= XY_SRC_COPY_BLT_SRC_TILED;
- src_pitch /= 4;
- }
+ if (fast_blit_ok(dst) && fast_blit_ok(src)) {
+ struct intel_gt *gt = t->ce->engine->gt;
+ u32 src_tiles = 0, dst_tiles = 0;
+ u32 src_4t = 0, dst_4t = 0;
+
+ /* Need to program BLIT_CCTL if it is not done previously
+ * before using XY_FAST_COPY_BLT
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(BLIT_CCTL(t->ce->engine->mmio_base));
+ *cs++ = (BLIT_CCTL_SRC_MOCS(gt->mocs.uc_index) |
+ BLIT_CCTL_DST_MOCS(gt->mocs.uc_index));
+
+ src_pitch = t->width; /* in dwords */
+ if (src->tiling == CLIENT_TILING_4) {
+ src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
+ src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4;
+ } else if (src->tiling == CLIENT_TILING_Y) {
+ src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
+ } else if (src->tiling == CLIENT_TILING_X) {
+ src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X);
+ } else {
+ src_pitch *= 4; /* in bytes */
+ }
- dst_pitch = t->width * 4;
- if (dst->tiling) {
- cmd |= XY_SRC_COPY_BLT_DST_TILED;
- dst_pitch /= 4;
- }
+ dst_pitch = t->width; /* in dwords */
+ if (dst->tiling == CLIENT_TILING_4) {
+ dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
+ dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4;
+ } else if (dst->tiling == CLIENT_TILING_Y) {
+ dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
+ } else if (dst->tiling == CLIENT_TILING_X) {
+ dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X);
+ } else {
+ dst_pitch *= 4; /* in bytes */
+ }
- *cs++ = cmd;
- *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
- *cs++ = 0;
- *cs++ = t->height << 16 | t->width;
- *cs++ = lower_32_bits(dst->vma->node.start);
- if (use_64b_reloc)
+ *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2) |
+ src_tiles | dst_tiles;
+ *cs++ = src_4t | dst_4t | BLT_DEPTH_32 | dst_pitch;
+ *cs++ = 0;
+ *cs++ = t->height << 16 | t->width;
+ *cs++ = lower_32_bits(dst->vma->node.start);
*cs++ = upper_32_bits(dst->vma->node.start);
- *cs++ = 0;
- *cs++ = src_pitch;
- *cs++ = lower_32_bits(src->vma->node.start);
- if (use_64b_reloc)
+ *cs++ = 0;
+ *cs++ = src_pitch;
+ *cs++ = lower_32_bits(src->vma->node.start);
*cs++ = upper_32_bits(src->vma->node.start);
+ } else {
+ if (ver >= 6) {
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
+ cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
+ if (src->tiling == CLIENT_TILING_Y)
+ cmd |= BCS_SRC_Y;
+ if (dst->tiling == CLIENT_TILING_Y)
+ cmd |= BCS_DST_Y;
+ *cs++ = cmd;
+
+ cmd = MI_FLUSH_DW;
+ if (ver >= 8)
+ cmd++;
+ *cs++ = cmd;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ }
+
+ cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
+ if (ver >= 8)
+ cmd += 2;
+
+ src_pitch = t->width * 4;
+ if (src->tiling) {
+ cmd |= XY_SRC_COPY_BLT_SRC_TILED;
+ src_pitch /= 4;
+ }
+
+ dst_pitch = t->width * 4;
+ if (dst->tiling) {
+ cmd |= XY_SRC_COPY_BLT_DST_TILED;
+ dst_pitch /= 4;
+ }
+
+ *cs++ = cmd;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
+ *cs++ = 0;
+ *cs++ = t->height << 16 | t->width;
+ *cs++ = lower_32_bits(dst->vma->node.start);
+ if (use_64b_reloc)
+ *cs++ = upper_32_bits(dst->vma->node.start);
+ *cs++ = 0;
+ *cs++ = src_pitch;
+ *cs++ = lower_32_bits(src->vma->node.start);
+ if (use_64b_reloc)
+ *cs++ = upper_32_bits(src->vma->node.start);
+ }
*cs++ = MI_BATCH_BUFFER_END;
@@ -181,7 +325,13 @@ static int tiled_blits_create_buffers(struct tiled_blits *t,
t->buffers[i].vma = vma;
t->buffers[i].tiling =
- i915_prandom_u32_max_state(CLIENT_TILING_Y + 1, prng);
+ i915_prandom_u32_max_state(CLIENT_NUM_TILING_TYPES, prng);
+
+ /* Platforms support either TileY or Tile4, not both */
+ if (HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_Y)
+ t->buffers[i].tiling = CLIENT_TILING_4;
+ else if (!HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_4)
+ t->buffers[i].tiling = CLIENT_TILING_Y;
}
return 0;
@@ -206,7 +356,8 @@ static u64 swizzle_bit(unsigned int bit, u64 offset)
static u64 tiled_offset(const struct intel_gt *gt,
u64 v,
unsigned int stride,
- enum client_tiling tiling)
+ enum client_tiling tiling,
+ int x_pos, int y_pos)
{
unsigned int swizzle;
u64 x, y;
@@ -216,7 +367,12 @@ static u64 tiled_offset(const struct intel_gt *gt,
y = div64_u64_rem(v, stride, &x);
- if (tiling == CLIENT_TILING_X) {
+ if (tiling == CLIENT_TILING_4) {
+ v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32);
+
+ /* no swizzling for f-tiling */
+ swizzle = I915_BIT_6_SWIZZLE_NONE;
+ } else if (tiling == CLIENT_TILING_X) {
v = div64_u64_rem(y, 8, &y) * stride * 8;
v += y * 512;
v += div64_u64_rem(x, 512, &x) << 12;
@@ -259,6 +415,7 @@ static const char *repr_tiling(enum client_tiling tiling)
case CLIENT_TILING_LINEAR: return "linear";
case CLIENT_TILING_X: return "X";
case CLIENT_TILING_Y: return "Y";
+ case CLIENT_TILING_4: return "F";
default: return "unknown";
}
}
@@ -284,7 +441,7 @@ static int verify_buffer(const struct tiled_blits *t,
} else {
u64 v = tiled_offset(buf->vma->vm->gt,
p * 4, t->width * 4,
- buf->tiling);
+ buf->tiling, x, y);
if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p)
ret = -EINVAL;
@@ -504,6 +661,9 @@ static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng)
if (err)
return err;
+ /* Simulating GTT eviction of the same buffer / layout */
+ t->buffers[2].tiling = t->buffers[0].tiling;
+
/* Reposition so that we overlap the old addresses, and slightly off */
err = tiled_blit(t,
&t->buffers[2], t->hole + t->align,
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 93a67422ca3b..c6ad67b90e8a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -212,7 +212,7 @@ static int __live_parallel_switch1(void *data)
i915_request_add(rq);
}
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ if (i915_request_wait(rq, 0, HZ) < 0)
err = -ETIME;
i915_request_put(rq);
if (err)
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 3e13960615bd..98645797962f 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -197,8 +197,10 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
flags |= PIPE_CONTROL_CS_STALL;
- if (engine->class == COMPUTE_CLASS)
- flags &= ~PIPE_CONTROL_3D_FLAGS;
+ if (!HAS_3D_PIPELINE(engine->i915))
+ flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (engine->class == COMPUTE_CLASS)
+ flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -227,8 +229,10 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
flags |= PIPE_CONTROL_CS_STALL;
- if (engine->class == COMPUTE_CLASS)
- flags &= ~PIPE_CONTROL_3D_FLAGS;
+ if (!HAS_3D_PIPELINE(engine->i915))
+ flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (engine->class == COMPUTE_CLASS)
+ flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
if (!HAS_FLAT_CCS(rq->engine->i915))
count = 8 + 4;
@@ -272,7 +276,8 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
if (!HAS_FLAT_CCS(rq->engine->i915) &&
(rq->engine->class == VIDEO_DECODE_CLASS ||
rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
- aux_inv = rq->engine->mask & ~BIT(BCS0);
+ aux_inv = rq->engine->mask &
+ ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0);
if (aux_inv)
cmd += 4;
}
@@ -716,8 +721,10 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
/* Wa_1409600907 */
flags |= PIPE_CONTROL_DEPTH_STALL;
- if (rq->engine->class == COMPUTE_CLASS)
- flags &= ~PIPE_CONTROL_3D_FLAGS;
+ if (!HAS_3D_PIPELINE(rq->engine->i915))
+ flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (rq->engine->class == COMPUTE_CLASS)
+ flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
cs = gen12_emit_ggtt_write_rcs(cs,
rq->fence.seqno,
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 4070cb5711d8..654a092ed3d6 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -601,6 +601,30 @@ u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
return avg;
}
+bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
+{
+ bool ret = intel_context_set_banned(ce);
+
+ trace_intel_context_ban(ce);
+
+ if (ce->ops->revoke)
+ ce->ops->revoke(ce, rq,
+ INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS);
+
+ return ret;
+}
+
+bool intel_context_exit_nonpersistent(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ bool ret = intel_context_set_exiting(ce);
+
+ if (ce->ops->revoke)
+ ce->ops->revoke(ce, rq, ce->engine->props.preempt_timeout_ms);
+
+ return ret;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index b7d3214d2cdd..8e2d70630c49 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -25,6 +25,8 @@
##__VA_ARGS__); \
} while (0)
+#define INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS (1)
+
struct i915_gem_ww_ctx;
void intel_context_init(struct intel_context *ce,
@@ -309,18 +311,27 @@ static inline bool intel_context_set_banned(struct intel_context *ce)
return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
}
-static inline bool intel_context_ban(struct intel_context *ce,
- struct i915_request *rq)
+bool intel_context_ban(struct intel_context *ce, struct i915_request *rq);
+
+static inline bool intel_context_is_schedulable(const struct intel_context *ce)
{
- bool ret = intel_context_set_banned(ce);
+ return !test_bit(CONTEXT_EXITING, &ce->flags) &&
+ !test_bit(CONTEXT_BANNED, &ce->flags);
+}
- trace_intel_context_ban(ce);
- if (ce->ops->ban)
- ce->ops->ban(ce, rq);
+static inline bool intel_context_is_exiting(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_EXITING, &ce->flags);
+}
- return ret;
+static inline bool intel_context_set_exiting(struct intel_context *ce)
+{
+ return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
}
+bool intel_context_exit_nonpersistent(struct intel_context *ce,
+ struct i915_request *rq);
+
static inline bool
intel_context_force_single_submission(const struct intel_context *ce)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 09f82545789f..d2d75d9c0c8d 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -40,7 +40,8 @@ struct intel_context_ops {
int (*alloc)(struct intel_context *ce);
- void (*ban)(struct intel_context *ce, struct i915_request *rq);
+ void (*revoke)(struct intel_context *ce, struct i915_request *rq,
+ unsigned int preempt_timeout_ms);
int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
int (*pin)(struct intel_context *ce, void *vaddr);
@@ -122,6 +123,7 @@ struct intel_context {
#define CONTEXT_GUC_INIT 10
#define CONTEXT_PERMA_PIN 11
#define CONTEXT_IS_PARKING 12
+#define CONTEXT_EXITING 13
struct {
u64 timeout_us;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 1431f1e9dbee..04e435bce79b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -201,6 +201,8 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
+void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine);
+
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 14c6ddbbfde8..283870c65991 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -21,8 +21,9 @@
#include "intel_engine_user.h"
#include "intel_execlists_submission.h"
#include "intel_gt.h"
-#include "intel_gt_requests.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
#include "intel_lrc.h"
#include "intel_lrc_reg.h"
#include "intel_reset.h"
@@ -71,6 +72,62 @@ static const struct engine_info intel_engines[] = {
{ .graphics_ver = 6, .base = BLT_RING_BASE }
},
},
+ [BCS1] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 1,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS1_RING_BASE }
+ },
+ },
+ [BCS2] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 2,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS2_RING_BASE }
+ },
+ },
+ [BCS3] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 3,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS3_RING_BASE }
+ },
+ },
+ [BCS4] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 4,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS4_RING_BASE }
+ },
+ },
+ [BCS5] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 5,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS5_RING_BASE }
+ },
+ },
+ [BCS6] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 6,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS6_RING_BASE }
+ },
+ },
+ [BCS7] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 7,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS7_RING_BASE }
+ },
+ },
+ [BCS8] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 8,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS8_RING_BASE }
+ },
+ },
[VCS0] = {
.class = VIDEO_DECODE_CLASS,
.instance = 0,
@@ -334,6 +391,14 @@ static u32 get_reset_domain(u8 ver, enum intel_engine_id id)
static const u32 engine_reset_domains[] = {
[RCS0] = GEN11_GRDOM_RENDER,
[BCS0] = GEN11_GRDOM_BLT,
+ [BCS1] = XEHPC_GRDOM_BLT1,
+ [BCS2] = XEHPC_GRDOM_BLT2,
+ [BCS3] = XEHPC_GRDOM_BLT3,
+ [BCS4] = XEHPC_GRDOM_BLT4,
+ [BCS5] = XEHPC_GRDOM_BLT5,
+ [BCS6] = XEHPC_GRDOM_BLT6,
+ [BCS7] = XEHPC_GRDOM_BLT7,
+ [BCS8] = XEHPC_GRDOM_BLT8,
[VCS0] = GEN11_GRDOM_MEDIA,
[VCS1] = GEN11_GRDOM_MEDIA2,
[VCS2] = GEN11_GRDOM_MEDIA3,
@@ -610,8 +675,8 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
return;
- ccs_mask = intel_slicemask_from_dssmask(intel_sseu_get_compute_subslices(&info->sseu),
- ss_per_ccs);
+ ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask,
+ ss_per_ccs);
/*
* If all DSS in a quadrant are fused off, the corresponding CCS
* engine is not available for use.
@@ -622,6 +687,34 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
}
}
+static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_gt_info *info = &gt->info;
+ unsigned long meml3_mask;
+ unsigned long quad;
+
+ meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
+ meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
+
+ /*
+ * Link Copy engines may be fused off according to meml3_mask. Each
+ * bit is a quad that houses 2 Link Copy and two Sub Copy engines.
+ */
+ for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) {
+ unsigned int instance = quad * 2 + 1;
+ intel_engine_mask_t mask = GENMASK(_BCS(instance + 1),
+ _BCS(instance));
+
+ if (mask & info->engine_mask) {
+ drm_dbg(&i915->drm, "bcs%u fused off\n", instance);
+ drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1);
+
+ info->engine_mask &= ~mask;
+ }
+ }
+}
+
/*
* Determine which engines are fused off in our particular hardware.
* Note that we have a catch-22 situation where we need to be able to access
@@ -704,6 +797,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
engine_mask_apply_compute_fuses(gt);
+ engine_mask_apply_copy_fuses(gt);
return info->engine_mask;
}
@@ -1282,10 +1376,10 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
/*
- * Wa_22011802037 : gen12, Prior to doing a reset, ensure CS is
+ * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is
* stopped, set ring stop bit and prefetch disable bit to halt CS
*/
- if (GRAPHICS_VER(engine->i915) == 12)
+ if (IS_GRAPHICS_VER(engine->i915, 11, 12))
intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
_MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
@@ -1308,6 +1402,18 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
return -ENODEV;
ENGINE_TRACE(engine, "\n");
+ /*
+ * TODO: Find out why occasionally stopping the CS times out. Seen
+ * especially with gem_eio tests.
+ *
+ * Occasionally trying to stop the cs times out, but does not adversely
+ * affect functionality. The timeout is set as a config parameter that
+ * defaults to 100ms. In most cases the follow up operation is to wait
+ * for pending MI_FORCE_WAKES. The assumption is that this timeout is
+ * sufficient for any pending MI_FORCEWAKEs to complete. Once root
+ * caused, the caller must check and handle the return from this
+ * function.
+ */
if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
ENGINE_TRACE(engine,
"timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
@@ -1334,12 +1440,76 @@ void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
-static u32
-read_subslice_reg(const struct intel_engine_cs *engine,
- int slice, int subslice, i915_reg_t reg)
+static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
+{
+ static const i915_reg_t _reg[I915_NUM_ENGINES] = {
+ [RCS0] = MSG_IDLE_CS,
+ [BCS0] = MSG_IDLE_BCS,
+ [VCS0] = MSG_IDLE_VCS0,
+ [VCS1] = MSG_IDLE_VCS1,
+ [VCS2] = MSG_IDLE_VCS2,
+ [VCS3] = MSG_IDLE_VCS3,
+ [VCS4] = MSG_IDLE_VCS4,
+ [VCS5] = MSG_IDLE_VCS5,
+ [VCS6] = MSG_IDLE_VCS6,
+ [VCS7] = MSG_IDLE_VCS7,
+ [VECS0] = MSG_IDLE_VECS0,
+ [VECS1] = MSG_IDLE_VECS1,
+ [VECS2] = MSG_IDLE_VECS2,
+ [VECS3] = MSG_IDLE_VECS3,
+ [CCS0] = MSG_IDLE_CS,
+ [CCS1] = MSG_IDLE_CS,
+ [CCS2] = MSG_IDLE_CS,
+ [CCS3] = MSG_IDLE_CS,
+ };
+ u32 val;
+
+ if (!_reg[engine->id].reg) {
+ drm_err(&engine->i915->drm,
+ "MSG IDLE undefined for engine id %u\n", engine->id);
+ return 0;
+ }
+
+ val = intel_uncore_read(engine->uncore, _reg[engine->id]);
+
+ /* bits[29:25] & bits[13:9] >> shift */
+ return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
+}
+
+static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
{
- return intel_uncore_read_with_mcr_steering(engine->uncore, reg,
- slice, subslice);
+ int ret;
+
+ /* Ensure GPM receives fw up/down after CS is stopped */
+ udelay(1);
+
+ /* Wait for forcewake request to complete in GPM */
+ ret = __intel_wait_for_register_fw(gt->uncore,
+ GEN9_PWRGT_DOMAIN_STATUS,
+ fw_mask, fw_mask, 5000, 0, NULL);
+
+ /* Ensure CS receives fw ack from GPM */
+ udelay(1);
+
+ if (ret)
+ GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
+}
+
+/*
+ * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
+ * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
+ * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the
+ * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
+ * are concerned only with the gt reset here, we use a logical OR of pending
+ * forcewakeups from all reset domains and then wait for them to complete by
+ * querying PWRGT_DOMAIN_STATUS.
+ */
+void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
+{
+ u32 fw_pending = __cs_pending_mi_force_wakes(engine);
+
+ if (fw_pending)
+ __gpm_wait_for_fw_complete(engine->gt, fw_pending);
}
/* NB: please notice the memset */
@@ -1375,28 +1545,33 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
instdone->sampler[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_SAMPLER_INSTDONE);
+ intel_gt_mcr_read(engine->gt,
+ GEN7_SAMPLER_INSTDONE,
+ slice, subslice);
instdone->row[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_ROW_INSTDONE);
+ intel_gt_mcr_read(engine->gt,
+ GEN7_ROW_INSTDONE,
+ slice, subslice);
}
} else {
for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
instdone->sampler[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_SAMPLER_INSTDONE);
+ intel_gt_mcr_read(engine->gt,
+ GEN7_SAMPLER_INSTDONE,
+ slice, subslice);
instdone->row[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_ROW_INSTDONE);
+ intel_gt_mcr_read(engine->gt,
+ GEN7_ROW_INSTDONE,
+ slice, subslice);
}
}
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice)
instdone->geom_svg[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- XEHPG_INSTDONE_GEOM_SVG);
+ intel_gt_mcr_read(engine->gt,
+ XEHPG_INSTDONE_GEOM_SVG,
+ slice, subslice);
}
} else if (GRAPHICS_VER(i915) >= 7) {
instdone->instdone =
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index 75a0c55c5aa5..889f0df3940b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -8,6 +8,7 @@
#include "i915_reg_defs.h"
+#define RING_EXCC(base) _MMIO((base) + 0x28)
#define RING_TAIL(base) _MMIO((base) + 0x30)
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD(base) _MMIO((base) + 0x34)
@@ -133,6 +134,8 @@
(REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (dst) << 1) | \
REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (src) << 1))
+#define RING_CSCMDOP(base) _MMIO((base) + 0x20c)
+
/*
* CMD_CCTL read/write fields take a MOCS value and _not_ a table index.
* The lsb of each can be considered a separate enabling bit for encryption.
@@ -149,6 +152,7 @@
REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1))
#define RING_PREDICATE_RESULT(base) _MMIO((base) + 0x3b8) /* gen12+ */
+
#define MI_PREDICATE_RESULT_2(base) _MMIO((base) + 0x3bc)
#define LOWER_SLICE_ENABLED (1 << 0)
#define LOWER_SLICE_DISABLED (0 << 0)
@@ -172,6 +176,7 @@
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8)
+#define RING_CTX_SR_CTL(base) _MMIO((base) + 0x244)
#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4)
#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8)
@@ -196,6 +201,7 @@
#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */
#define RING_PREDICATE_RESULT(base) _MMIO((base) + 0x3b8)
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
+#define RING_FORCE_TO_NONPRIV_DENY REG_BIT(30)
#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2)
#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */
#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28)
@@ -208,7 +214,9 @@
#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0)
#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0)
#define RING_FORCE_TO_NONPRIV_MASK_VALID \
- (RING_FORCE_TO_NONPRIV_RANGE_MASK | RING_FORCE_TO_NONPRIV_ACCESS_MASK)
+ (RING_FORCE_TO_NONPRIV_RANGE_MASK | \
+ RING_FORCE_TO_NONPRIV_ACCESS_MASK | \
+ RING_FORCE_TO_NONPRIV_DENY)
#define RING_MAX_NONPRIV_SLOTS 12
#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 298f2cc7a879..2286f96f5f87 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -35,7 +35,7 @@
#define OTHER_CLASS 4
#define COMPUTE_CLASS 5
#define MAX_ENGINE_CLASS 5
-#define MAX_ENGINE_INSTANCE 7
+#define MAX_ENGINE_INSTANCE 8
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 8
@@ -99,6 +99,7 @@ struct i915_ctx_workarounds {
#define I915_MAX_SFC (I915_MAX_VCS / 2)
#define I915_MAX_CCS 4
#define I915_MAX_RCS 1
+#define I915_MAX_BCS 9
/*
* Engine IDs definitions.
@@ -107,6 +108,15 @@ struct i915_ctx_workarounds {
enum intel_engine_id {
RCS0 = 0,
BCS0,
+ BCS1,
+ BCS2,
+ BCS3,
+ BCS4,
+ BCS5,
+ BCS6,
+ BCS7,
+ BCS8,
+#define _BCS(n) (BCS0 + (n))
VCS0,
VCS1,
VCS2,
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 86f7a9ac1c39..4b909cb88cdf 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -480,9 +480,9 @@ __execlists_schedule_in(struct i915_request *rq)
if (unlikely(intel_context_is_closed(ce) &&
!intel_engine_has_heartbeat(engine)))
- intel_context_set_banned(ce);
+ intel_context_set_exiting(ce);
- if (unlikely(intel_context_is_banned(ce) || bad_request(rq)))
+ if (unlikely(!intel_context_is_schedulable(ce) || bad_request(rq)))
reset_active(rq, engine);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -661,6 +661,16 @@ static inline void execlists_schedule_out(struct i915_request *rq)
i915_request_put(rq);
}
+static u32 map_i915_prio_to_lrc_desc_prio(int prio)
+{
+ if (prio > I915_PRIORITY_NORMAL)
+ return GEN12_CTX_PRIORITY_HIGH;
+ else if (prio < I915_PRIORITY_NORMAL)
+ return GEN12_CTX_PRIORITY_LOW;
+ else
+ return GEN12_CTX_PRIORITY_NORMAL;
+}
+
static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
@@ -669,7 +679,7 @@ static u64 execlists_update_context(struct i915_request *rq)
desc = ce->lrc.desc;
if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
- desc |= lrc_desc_priority(rq_prio(rq));
+ desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
/*
* WaIdleLiteRestore:bdw,skl
@@ -1233,7 +1243,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
- return 1;
+ return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
return READ_ONCE(engine->props.preempt_timeout_ms);
}
@@ -1350,7 +1360,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* submission. If we don't cancel the timer now,
* we will see that the timer has expired and
* reschedule the tasklet; continually until the
- * next context switch or other preeemption event.
+ * next context switch or other preemption event.
*
* Since we have decided to reschedule based on
* consumption of this timeslice, if we submit the
@@ -2958,6 +2968,13 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
ring_set_paused(engine, 1);
intel_engine_stop_cs(engine);
+ /*
+ * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
+ * to wait for any pending mi force wakeups
+ */
+ if (IS_GRAPHICS_VER(engine->i915, 11, 12))
+ intel_engine_wait_for_pending_mi_fw(engine);
+
engine->execlists.reset_ccid = active_ccid(engine);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index e6b2eb122ad7..15a915bb4088 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -3,16 +3,18 @@
* Copyright © 2020 Intel Corporation
*/
-#include <linux/types.h>
#include <asm/set_memory.h>
#include <asm/smp.h>
+#include <linux/types.h>
+#include <linux/stop_machine.h>
#include <drm/i915_drm.h>
+#include <drm/intel-gtt.h>
#include "gem/i915_gem_lmem.h"
+#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
-#include "intel_gt_gmch.h"
#include "intel_gt_regs.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -22,6 +24,13 @@
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
+static inline bool suspend_retains_ptes(struct i915_address_space *vm)
+{
+ return GRAPHICS_VER(vm->i915) >= 8 &&
+ !HAS_LMEM(vm->i915) &&
+ vm->is_ggtt;
+}
+
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -93,6 +102,23 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
return 0;
}
+/*
+ * Return the value of the last GGTT pte cast to an u64, if
+ * the system is supposed to retain ptes across resume. 0 otherwise.
+ */
+static u64 read_last_pte(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *ptep;
+
+ if (!suspend_retains_ptes(vm))
+ return 0;
+
+ GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8);
+ ptep = (typeof(ptep))ggtt->gsm + (ggtt_total_entries(ggtt) - 1);
+ return readq(ptep);
+}
+
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
@@ -156,7 +182,10 @@ retry:
i915_gem_object_unlock(obj);
}
- vm->clear_range(vm, 0, vm->total);
+ if (!suspend_retains_ptes(vm))
+ vm->clear_range(vm, 0, vm->total);
+ else
+ i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm);
vm->skip_pte_rewrite = save_skip_rewrite;
@@ -181,7 +210,7 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
spin_unlock_irq(&uncore->lock);
}
-void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
@@ -218,11 +247,232 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
+
+ ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *gte;
+ gen8_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ gte = (gen8_pte_t __iomem *)ggtt->gsm;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
+ gen8_set_pte(gte++, pte_encode | addr);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0]->encode);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
+
+ ggtt->invalidate(ggtt);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level.
+ * The object will be accessible to the GPU via commands whose operands
+ * reference offsets within the global GTT as well as accessible by the GPU
+ * through the GMADR mapped BAR (i915->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *gte;
+ gen6_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ gte = (gen6_pte_t __iomem *)ggtt->gsm;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
+ iowrite32(vm->pte_encode(addr, level, flags), gte++);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ iowrite32(vm->scratch[0]->encode, gte++);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void nop_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
+ gen8_pte_t __iomem *gtt_base =
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+}
+
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct i915_vma_resource *vma_res;
+ enum i915_cache_level level;
+ u32 flags;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct insert_entries arg = { vm, vma_res, level, flags };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = vm->scratch[0]->encode;
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+}
+
void intel_ggtt_bind_vma(struct i915_address_space *vm,
- struct i915_vm_pt_stash *stash,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
- u32 flags)
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
u32 pte_flags;
@@ -243,7 +493,7 @@ void intel_ggtt_bind_vma(struct i915_address_space *vm,
}
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res)
+ struct i915_vma_resource *vma_res)
{
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
@@ -299,6 +549,8 @@ static int init_ggtt(struct i915_ggtt *ggtt)
struct drm_mm_node *entry;
int ret;
+ ggtt->pte_lost = true;
+
/*
* GuC requires all resources that we're sharing with it to be placed in
* non-WOPCM memory. If GuC is not present or not in use we still need a
@@ -560,12 +812,326 @@ void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
dma_resv_fini(&ggtt->vm._resv);
}
-struct resource intel_pci_resource(struct pci_dev *pdev, int bar)
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+ if (bdw_gmch_ctl)
+ bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
+ if (bdw_gmch_ctl > 4)
+ bdw_gmch_ctl = 4;
+#endif
+
+ return bdw_gmch_ctl << 20;
+}
+
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
+static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
+{
+ /*
+ * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
+ * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
+ */
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
+ return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
+}
+
+static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
+{
+ return gen6_gttmmadr_size(i915) / 2;
+}
+
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ phys_addr_t phys_addr;
+ u32 pte_flags;
+ int ret;
+
+ GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
+
+ /*
+ * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
+ * will be dropped. For WC mappings in general we have 64 byte burst
+ * writes when the WC buffer is flushed, so we can't use it, but have to
+ * resort to an uncached mapping. The WC issue is easily caught by the
+ * readback check when writing GTT PTE entries.
+ */
+ if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
+ ggtt->gsm = ioremap(phys_addr, size);
+ else
+ ggtt->gsm = ioremap_wc(phys_addr, size);
+ if (!ggtt->gsm) {
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
+ return -ENOMEM;
+ }
+
+ kref_init(&ggtt->vm.resv_ref);
+ ret = setup_scratch_page(&ggtt->vm);
+ if (ret) {
+ drm_err(&i915->drm, "Scratch setup failed\n");
+ /* iounmap will also get called at remove, but meh */
+ iounmap(ggtt->gsm);
+ return ret;
+ }
+
+ pte_flags = 0;
+ if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
+ pte_flags |= PTE_LM;
+
+ ggtt->vm.scratch[0]->encode =
+ ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
+ I915_CACHE_NONE, pte_flags);
+
+ return 0;
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ iounmap(ggtt->gsm);
+ free_scratch(vm);
+}
+
+static struct resource pci_resource(struct pci_dev *pdev, int bar)
{
return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
}
+static int gen8_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int size;
+ u16 snb_gmch_ctl;
+
+ if (!HAS_LMEM(i915)) {
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ if (IS_CHERRYVIEW(i915))
+ size = chv_get_total_gtt_size(snb_gmch_ctl);
+ else
+ size = gen8_get_total_gtt_size(snb_gmch_ctl);
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
+
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
+ if (intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+
+ /*
+ * Serialize GTT updates with aperture access on BXT if VT-d is on,
+ * and always on CHV.
+ */
+ if (intel_vm_no_concurrent_access_wa(i915)) {
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+
+ /*
+ * Calling stop_machine() version of GGTT update function
+ * at error capture/reset path will raise lockdep warning.
+ * Allow calling gen8_ggtt_insert_* directly at reset path
+ * which is safe from parallel GGTT updates.
+ */
+ ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
+
+ ggtt->vm.bind_async_flags =
+ I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ }
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+
+ setup_private_pat(ggtt->vm.gt->uncore);
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
+
+ if (level != I915_CACHE_NONE)
+ pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+ return pte;
+}
+
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (level != I915_CACHE_NONE)
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE3;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE3;
+ break;
+ }
+
+ return pte;
+}
+
+static int gen6_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int size;
+ u16 snb_gmch_ctl;
+
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+
+ /*
+ * 64/512MB is the current min/max we actually know of, but this is
+ * just a coarse sanity check.
+ */
+ if (ggtt->mappable_end < (64 << 20) ||
+ ggtt->mappable_end > (512 << 20)) {
+ drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
+ &ggtt->mappable_end);
+ return -ENXIO;
+ }
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
+ if (HAS_EDRAM(i915))
+ ggtt->vm.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(i915))
+ ggtt->vm.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(i915))
+ ggtt->vm.pte_encode = byt_pte_encode;
+ else if (GRAPHICS_VER(i915) >= 7)
+ ggtt->vm.pte_encode = ivb_pte_encode;
+ else
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ return ggtt_probe_common(ggtt, size);
+}
+
static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -576,12 +1142,13 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
ggtt->vm.dma = i915->drm.dev;
dma_resv_init(&ggtt->vm._resv);
- if (GRAPHICS_VER(i915) <= 5)
- ret = intel_gt_gmch_gen5_probe(ggtt);
- else if (GRAPHICS_VER(i915) < 8)
- ret = intel_gt_gmch_gen6_probe(ggtt);
+ if (GRAPHICS_VER(i915) >= 8)
+ ret = gen8_gmch_probe(ggtt);
+ else if (GRAPHICS_VER(i915) >= 6)
+ ret = gen6_gmch_probe(ggtt);
else
- ret = intel_gt_gmch_gen8_probe(ggtt);
+ ret = intel_ggtt_gmch_probe(ggtt);
+
if (ret) {
dma_resv_fini(&ggtt->vm._resv);
return ret;
@@ -635,7 +1202,10 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
{
- return intel_gt_gmch_gen5_enable_hw(i915);
+ if (GRAPHICS_VER(i915) < 6)
+ return intel_ggtt_gmch_enable_hw(i915);
+
+ return 0;
}
void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
@@ -675,11 +1245,20 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
bool write_domain_objs = false;
+ bool retained_ptes;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
- /* First fill our portion of the GTT with scratch pages */
- vm->clear_range(vm, 0, vm->total);
+ /*
+ * First fill our portion of the GTT with scratch pages if
+ * they were not retained across suspend.
+ */
+ retained_ptes = suspend_retains_ptes(vm) &&
+ !i915_vm_to_ggtt(vm)->pte_lost &&
+ !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm));
+
+ if (!retained_ptes)
+ vm->clear_range(vm, 0, vm->total);
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &vm->bound_list, vm_link) {
@@ -688,9 +1267,10 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- vma->ops->bind_vma(vm, NULL, vma->resource,
- obj ? obj->cache_level : 0,
- was_bound);
+ if (!retained_ptes)
+ vma->ops->bind_vma(vm, NULL, vma->resource,
+ obj ? obj->cache_level : 0,
+ was_bound);
if (obj) { /* only used during resume => exclusive access */
write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
@@ -718,3 +1298,8 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
intel_ggtt_restore_fences(ggtt);
}
+
+void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val)
+{
+ to_gt(i915)->ggtt->pte_lost = val;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
new file mode 100644
index 000000000000..4e2163a1aa46
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "intel_ggtt_gmch.h"
+
+#include <drm/intel-gtt.h>
+#include <drm/i915_drm.h>
+
+#include <linux/agp_backend.h>
+
+#include "i915_drv.h"
+#include "i915_utils.h"
+#include "intel_gtt.h"
+#include "intel_gt_regs.h"
+#include "intel_gt.h"
+
+static void gmch_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+}
+
+static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
+ flags);
+}
+
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ intel_gmch_gtt_flush();
+}
+
+static void gmch_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ intel_gmch_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
+}
+
+static void gmch_ggtt_remove(struct i915_address_space *vm)
+{
+ intel_gmch_remove();
+}
+
+/*
+ * Certain Gen5 chipsets require idling the GPU before unmapping anything from
+ * the GTT when VT-d is enabled.
+ */
+static bool needs_idle_maps(struct drm_i915_private *i915)
+{
+ /*
+ * Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if (!i915_vtd_active(i915))
+ return false;
+
+ if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
+ return true;
+
+ return false;
+}
+
+int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ phys_addr_t gmadr_base;
+ int ret;
+
+ ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
+ if (!ret) {
+ drm_err(&i915->drm, "failed to set up gmch\n");
+ return -EIO;
+ }
+
+ intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
+
+ ggtt->gmadr =
+ (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ if (needs_idle_maps(i915)) {
+ drm_notice(&i915->drm,
+ "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
+ ggtt->do_idle_maps = true;
+ }
+
+ ggtt->vm.insert_page = gmch_ggtt_insert_page;
+ ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
+ ggtt->vm.clear_range = gmch_ggtt_clear_range;
+ ggtt->vm.cleanup = gmch_ggtt_remove;
+
+ ggtt->invalidate = gmch_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ if (unlikely(ggtt->do_idle_maps))
+ drm_notice(&i915->drm,
+ "Applying Ironlake quirks for intel_iommu\n");
+
+ return 0;
+}
+
+int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915)
+{
+ if (!intel_gmch_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
+
+void intel_ggtt_gmch_flush(void)
+{
+ intel_gmch_gtt_flush();
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h
new file mode 100644
index 000000000000..370bf321b4e2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GGTT_GMCH_H__
+#define __INTEL_GGTT_GMCH_H__
+
+#include "intel_gtt.h"
+
+/* For x86 platforms */
+#if IS_ENABLED(CONFIG_X86)
+
+void intel_ggtt_gmch_flush(void);
+int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915);
+int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt);
+
+/* Stubs for non-x86 platforms */
+#else
+
+static inline void intel_ggtt_gmch_flush(void) { }
+static inline int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915) { return -ENODEV; }
+static inline int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt) { return -ENODEV; }
+
+#endif
+
+#endif /* __INTEL_GGTT_GMCH_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 556bca3be804..d4e9702d3c8e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -236,6 +236,28 @@
#define XY_FAST_COLOR_BLT_DW 16
#define XY_FAST_COLOR_BLT_MOCS_MASK GENMASK(27, 21)
#define XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT 31
+
+#define XY_FAST_COPY_BLT_D0_SRC_TILING_MASK REG_GENMASK(21, 20)
+#define XY_FAST_COPY_BLT_D0_DST_TILING_MASK REG_GENMASK(14, 13)
+#define XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(mode) \
+ REG_FIELD_PREP(XY_FAST_COPY_BLT_D0_SRC_TILING_MASK, mode)
+#define XY_FAST_COPY_BLT_D0_DST_TILE_MODE(mode) \
+ REG_FIELD_PREP(XY_FAST_COPY_BLT_D0_DST_TILING_MASK, mode)
+#define LINEAR 0
+#define TILE_X 0x1
+#define XMAJOR 0x1
+#define YMAJOR 0x2
+#define TILE_64 0x3
+#define XY_FAST_COPY_BLT_D1_SRC_TILE4 REG_BIT(31)
+#define XY_FAST_COPY_BLT_D1_DST_TILE4 REG_BIT(30)
+#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0)
+#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8)
+/* Note: MOCS value = (index << 1) */
+#define BLIT_CCTL_SRC_MOCS(idx) \
+ REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (idx) << 1)
+#define BLIT_CCTL_DST_MOCS(idx) \
+ REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (idx) << 1)
+
#define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22)
#define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22)
#define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22)
@@ -288,8 +310,11 @@
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
-/* 3D-related flags can't be set on compute engine */
-#define PIPE_CONTROL_3D_FLAGS (\
+/*
+ * 3D-related flags that can't be set on _engines_ that lack access to the 3D
+ * pipeline (i.e., CCS engines).
+ */
+#define PIPE_CONTROL_3D_ENGINE_FLAGS (\
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | \
PIPE_CONTROL_DEPTH_CACHE_FLUSH | \
PIPE_CONTROL_TILE_CACHE_FLUSH | \
@@ -300,6 +325,14 @@
PIPE_CONTROL_VF_CACHE_INVALIDATE | \
PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET)
+/* 3D-related flags that can't be set on _platforms_ that lack a 3D pipeline */
+#define PIPE_CONTROL_3D_ARCH_FLAGS ( \
+ PIPE_CONTROL_3D_ENGINE_FLAGS | \
+ PIPE_CONTROL_INDIRECT_STATE_DISABLE | \
+ PIPE_CONTROL_FLUSH_ENABLE | \
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \
+ PIPE_CONTROL_DC_FLUSH_ENABLE)
+
#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1)
#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2))
/* Opcodes for MI_MATH_INSTR */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 53307ca0eed0..8da3314bb6bf 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_managed.h>
+#include <drm/intel-gtt.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
@@ -12,11 +13,12 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_regs.h"
+#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
#include "intel_gt_buffer_pool.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_debugfs.h"
-#include "intel_gt_gmch.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
@@ -102,78 +104,13 @@ int intel_gt_assign_ggtt(struct intel_gt *gt)
return gt->ggtt ? 0 : -ENOMEM;
}
-static const char * const intel_steering_types[] = {
- "L3BANK",
- "MSLICE",
- "LNCF",
-};
-
-static const struct intel_mmio_range icl_l3bank_steering_table[] = {
- { 0x00B100, 0x00B3FF },
- {},
-};
-
-static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
- { 0x004000, 0x004AFF },
- { 0x00C800, 0x00CFFF },
- { 0x00DD00, 0x00DDFF },
- { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
- {},
-};
-
-static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
- { 0x00B000, 0x00B0FF },
- { 0x00D800, 0x00D8FF },
- {},
-};
-
-static const struct intel_mmio_range dg2_lncf_steering_table[] = {
- { 0x00B000, 0x00B0FF },
- { 0x00D880, 0x00D8FF },
- {},
-};
-
-static u16 slicemask(struct intel_gt *gt, int count)
-{
- u64 dss_mask = intel_sseu_get_subslices(&gt->info.sseu, 0);
-
- return intel_slicemask_from_dssmask(dss_mask, count);
-}
-
int intel_gt_init_mmio(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = gt->i915;
-
intel_gt_init_clock_frequency(gt);
intel_uc_init_mmio(&gt->uc);
intel_sseu_info_init(gt);
-
- /*
- * An mslice is unavailable only if both the meml3 for the slice is
- * disabled *and* all of the DSS in the slice (quadrant) are disabled.
- */
- if (HAS_MSLICES(i915))
- gt->info.mslice_mask =
- slicemask(gt, GEN_DSS_PER_MSLICE) |
- (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
- GEN12_MEML3_EN_MASK);
-
- if (IS_DG2(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
- gt->steering_table[LNCF] = dg2_lncf_steering_table;
- } else if (IS_XEHPSDV(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
- gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
- } else if (GRAPHICS_VER(i915) >= 11 &&
- GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
- gt->steering_table[L3BANK] = icl_l3bank_steering_table;
- gt->info.l3bank_mask =
- ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
- GEN10_L3BANK_MASK;
- } else if (HAS_MSLICES(i915)) {
- MISSING_CASE(INTEL_INFO(i915)->platform);
- }
+ intel_gt_mcr_init(gt);
return intel_engines_init_mmio(gt);
}
@@ -451,7 +388,7 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
{
wmb();
if (GRAPHICS_VER(gt->i915) < 6)
- intel_gt_gmch_gen5_chipset_flush(gt);
+ intel_ggtt_gmch_flush();
}
void intel_gt_driver_register(struct intel_gt *gt)
@@ -785,6 +722,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
+ intel_gt_sysfs_unregister(gt);
intel_rps_driver_unregister(&gt->rps);
intel_gsc_fini(&gt->gsc);
@@ -834,200 +772,6 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
}
}
-/**
- * intel_gt_reg_needs_read_steering - determine whether a register read
- * requires explicit steering
- * @gt: GT structure
- * @reg: the register to check steering requirements for
- * @type: type of multicast steering to check
- *
- * Determines whether @reg needs explicit steering of a specific type for
- * reads.
- *
- * Returns false if @reg does not belong to a register range of the given
- * steering type, or if the default (subslice-based) steering IDs are suitable
- * for @type steering too.
- */
-static bool intel_gt_reg_needs_read_steering(struct intel_gt *gt,
- i915_reg_t reg,
- enum intel_steering_type type)
-{
- const u32 offset = i915_mmio_reg_offset(reg);
- const struct intel_mmio_range *entry;
-
- if (likely(!intel_gt_needs_read_steering(gt, type)))
- return false;
-
- for (entry = gt->steering_table[type]; entry->end; entry++) {
- if (offset >= entry->start && offset <= entry->end)
- return true;
- }
-
- return false;
-}
-
-/**
- * intel_gt_get_valid_steering - determines valid IDs for a class of MCR steering
- * @gt: GT structure
- * @type: multicast register type
- * @sliceid: Slice ID returned
- * @subsliceid: Subslice ID returned
- *
- * Determines sliceid and subsliceid values that will steer reads
- * of a specific multicast register class to a valid value.
- */
-static void intel_gt_get_valid_steering(struct intel_gt *gt,
- enum intel_steering_type type,
- u8 *sliceid, u8 *subsliceid)
-{
- switch (type) {
- case L3BANK:
- GEM_DEBUG_WARN_ON(!gt->info.l3bank_mask); /* should be impossible! */
-
- *sliceid = 0; /* unused */
- *subsliceid = __ffs(gt->info.l3bank_mask);
- break;
- case MSLICE:
- GEM_DEBUG_WARN_ON(!gt->info.mslice_mask); /* should be impossible! */
-
- *sliceid = __ffs(gt->info.mslice_mask);
- *subsliceid = 0; /* unused */
- break;
- case LNCF:
- GEM_DEBUG_WARN_ON(!gt->info.mslice_mask); /* should be impossible! */
-
- /*
- * An LNCF is always present if its mslice is present, so we
- * can safely just steer to LNCF 0 in all cases.
- */
- *sliceid = __ffs(gt->info.mslice_mask) << 1;
- *subsliceid = 0; /* unused */
- break;
- default:
- MISSING_CASE(type);
- *sliceid = 0;
- *subsliceid = 0;
- }
-}
-
-/**
- * intel_gt_read_register_fw - reads a GT register with support for multicast
- * @gt: GT structure
- * @reg: register to read
- *
- * This function will read a GT register. If the register is a multicast
- * register, the read will be steered to a valid instance (i.e., one that
- * isn't fused off or powered down by power gating).
- *
- * Returns the value from a valid instance of @reg.
- */
-u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg)
-{
- int type;
- u8 sliceid, subsliceid;
-
- for (type = 0; type < NUM_STEERING_TYPES; type++) {
- if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
- intel_gt_get_valid_steering(gt, type, &sliceid,
- &subsliceid);
- return intel_uncore_read_with_mcr_steering_fw(gt->uncore,
- reg,
- sliceid,
- subsliceid);
- }
- }
-
- return intel_uncore_read_fw(gt->uncore, reg);
-}
-
-/**
- * intel_gt_get_valid_steering_for_reg - get a valid steering for a register
- * @gt: GT structure
- * @reg: register for which the steering is required
- * @sliceid: return variable for slice steering
- * @subsliceid: return variable for subslice steering
- *
- * This function returns a slice/subslice pair that is guaranteed to work for
- * read steering of the given register. Note that a value will be returned even
- * if the register is not replicated and therefore does not actually require
- * steering.
- */
-void intel_gt_get_valid_steering_for_reg(struct intel_gt *gt, i915_reg_t reg,
- u8 *sliceid, u8 *subsliceid)
-{
- int type;
-
- for (type = 0; type < NUM_STEERING_TYPES; type++) {
- if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
- intel_gt_get_valid_steering(gt, type, sliceid,
- subsliceid);
- return;
- }
- }
-
- *sliceid = gt->default_steering.groupid;
- *subsliceid = gt->default_steering.instanceid;
-}
-
-u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg)
-{
- int type;
- u8 sliceid, subsliceid;
-
- for (type = 0; type < NUM_STEERING_TYPES; type++) {
- if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
- intel_gt_get_valid_steering(gt, type, &sliceid,
- &subsliceid);
- return intel_uncore_read_with_mcr_steering(gt->uncore,
- reg,
- sliceid,
- subsliceid);
- }
- }
-
- return intel_uncore_read(gt->uncore, reg);
-}
-
-static void report_steering_type(struct drm_printer *p,
- struct intel_gt *gt,
- enum intel_steering_type type,
- bool dump_table)
-{
- const struct intel_mmio_range *entry;
- u8 slice, subslice;
-
- BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
-
- if (!gt->steering_table[type]) {
- drm_printf(p, "%s steering: uses default steering\n",
- intel_steering_types[type]);
- return;
- }
-
- intel_gt_get_valid_steering(gt, type, &slice, &subslice);
- drm_printf(p, "%s steering: sliceid=0x%x, subsliceid=0x%x\n",
- intel_steering_types[type], slice, subslice);
-
- if (!dump_table)
- return;
-
- for (entry = gt->steering_table[type]; entry->end; entry++)
- drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
-}
-
-void intel_gt_report_steering(struct drm_printer *p, struct intel_gt *gt,
- bool dump_table)
-{
- drm_printf(p, "Default steering: sliceid=0x%x, subsliceid=0x%x\n",
- gt->default_steering.groupid,
- gt->default_steering.instanceid);
-
- if (HAS_MSLICES(gt->i915)) {
- report_steering_type(p, gt, MSLICE, dump_table);
- report_steering_type(p, gt, LNCF, dump_table);
- }
-}
-
static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
{
int ret;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 44c6cb63ccbc..82d6f248d876 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -13,13 +13,6 @@
struct drm_i915_private;
struct drm_printer;
-struct insert_entries {
- struct i915_address_space *vm;
- struct i915_vma_resource *vma_res;
- enum i915_cache_level level;
- u32 flags;
-};
-
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
@@ -93,21 +86,6 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
return unlikely(test_bit(I915_WEDGED, &gt->reset.flags));
}
-static inline bool intel_gt_needs_read_steering(struct intel_gt *gt,
- enum intel_steering_type type)
-{
- return gt->steering_table[type];
-}
-
-void intel_gt_get_valid_steering_for_reg(struct intel_gt *gt, i915_reg_t reg,
- u8 *sliceid, u8 *subsliceid);
-
-u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg);
-u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg);
-
-void intel_gt_report_steering(struct drm_printer *p, struct intel_gt *gt,
- bool dump_table);
-
int intel_gt_probe_all(struct drm_i915_private *i915);
int intel_gt_tiles_init(struct drm_i915_private *i915);
void intel_gt_release_all(struct drm_i915_private *i915);
@@ -125,6 +103,4 @@ void intel_gt_watchdog_work(struct work_struct *work);
void intel_gt_invalidate_tlbs(struct intel_gt *gt);
-struct resource intel_pci_resource(struct pci_dev *pdev, int bar);
-
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
index d886fdc2c694..dd53641f3637 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -9,6 +9,7 @@
#include "intel_gt.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_engines_debugfs.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_pm_debugfs.h"
#include "intel_sseu_debugfs.h"
#include "pxp/intel_pxp_debugfs.h"
@@ -64,7 +65,7 @@ static int steering_show(struct seq_file *m, void *data)
struct drm_printer p = drm_seq_file_printer(m);
struct intel_gt *gt = m->private;
- intel_gt_report_steering(&p, gt, true);
+ intel_gt_mcr_report_steering(&p, gt, true);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_gmch.c b/drivers/gpu/drm/i915/gt/intel_gt_gmch.c
deleted file mode 100644
index 18e488672d1b..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_gt_gmch.c
+++ /dev/null
@@ -1,654 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include <drm/intel-gtt.h>
-#include <drm/i915_drm.h>
-
-#include <linux/agp_backend.h>
-#include <linux/stop_machine.h>
-
-#include "i915_drv.h"
-#include "intel_gt_gmch.h"
-#include "intel_gt_regs.h"
-#include "intel_gt.h"
-#include "i915_utils.h"
-
-#include "gen8_ppgtt.h"
-
-struct insert_page {
- struct i915_address_space *vm;
- dma_addr_t addr;
- u64 offset;
- enum i915_cache_level level;
-};
-
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void nop_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
-}
-
-static u64 snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- pte |= GEN7_PTE_CACHE_L3_LLC;
- break;
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- if (!(flags & PTE_READ_ONLY))
- pte |= BYT_PTE_WRITEABLE;
-
- if (level != I915_CACHE_NONE)
- pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
-
- return pte;
-}
-
-static u64 hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- if (level != I915_CACHE_NONE)
- pte |= HSW_WB_LLC_AGE3;
-
- return pte;
-}
-
-static u64 iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_NONE:
- break;
- case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE3;
- break;
- default:
- pte |= HSW_WB_ELLC_LLC_AGE3;
- break;
- }
-
- return pte;
-}
-
-static void gen5_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-}
-
-static void gen6_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- iowrite32(vm->pte_encode(addr, level, flags), pte);
-
- ggtt->invalidate(ggtt);
-}
-
-static void gen8_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
-
- ggtt->invalidate(ggtt);
-}
-
-static void gen5_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
- flags);
-}
-
-/*
- * Binds an object into the global gtt with the specified cache level.
- * The object will be accessible to the GPU via commands whose operands
- * reference offsets within the global GTT as well as accessible by the GPU
- * through the GMADR mapped BAR (i915->mm.gtt->gtt).
- */
-static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *gte;
- gen6_pte_t __iomem *end;
- struct sgt_iter iter;
- dma_addr_t addr;
-
- gte = (gen6_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
-
- for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
- iowrite32(vm->pte_encode(addr, level, flags), gte++);
- GEM_BUG_ON(gte > end);
-
- /* Fill the allocated but "unused" space beyond the end of the buffer */
- while (gte < end)
- iowrite32(vm->scratch[0]->encode, gte++);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *gte;
- gen8_pte_t __iomem *end;
- struct sgt_iter iter;
- dma_addr_t addr;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- gte = (gen8_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
-
- for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
- gen8_set_pte(gte++, pte_encode | addr);
- GEM_BUG_ON(gte > end);
-
- /* Fill the allocated but "unused" space beyond the end of the buffer */
- while (gte < end)
- gen8_set_pte(gte++, vm->scratch[0]->encode);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
-{
- /*
- * Make sure the internal GAM fifo has been cleared of all GTT
- * writes before exiting stop_machine(). This guarantees that
- * any aperture accesses waiting to start in another process
- * cannot back up behind the GTT writes causing a hang.
- * The register can be any arbitrary GAM register.
- */
- intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
-}
-
-static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
-{
- struct insert_page *arg = _arg;
-
- gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
-{
- struct insert_page arg = { vm, addr, offset, level };
-
- stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
-}
-
-static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
-{
- struct insert_entries *arg = _arg;
-
- gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- struct insert_entries arg = { vm, vma_res, level, flags };
-
- stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
-}
-
-void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt)
-{
- intel_gtt_chipset_flush();
-}
-
-static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- intel_gtt_chipset_flush();
-}
-
-static void gen5_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
-}
-
-static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = vm->scratch[0]->encode;
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, &gtt_base[i]);
-}
-
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
- gen8_pte_t __iomem *gtt_base =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- for (i = 0; i < num_entries; i++)
- gen8_set_pte(&gtt_base[i], scratch_pte);
-}
-
-static void gen5_gmch_remove(struct i915_address_space *vm)
-{
- intel_gmch_remove();
-}
-
-static void gen6_gmch_remove(struct i915_address_space *vm)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-
- iounmap(ggtt->gsm);
- free_scratch(vm);
-}
-
-/*
- * Certain Gen5 chipsets require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static bool needs_idle_maps(struct drm_i915_private *i915)
-{
- /*
- * Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- if (!i915_vtd_active(i915))
- return false;
-
- if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
- return true;
-
- if (GRAPHICS_VER(i915) == 12)
- return true; /* XXX DMAR fault reason 7 */
-
- return false;
-}
-
-static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
-{
- /*
- * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
- * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
- */
- GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
- return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
-}
-
-static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
- return snb_gmch_ctl << 20;
-}
-
-static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
-{
- bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
- if (bdw_gmch_ctl)
- bdw_gmch_ctl = 1 << bdw_gmch_ctl;
-
-#ifdef CONFIG_X86_32
- /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
- if (bdw_gmch_ctl > 4)
- bdw_gmch_ctl = 4;
-#endif
-
- return bdw_gmch_ctl << 20;
-}
-
-static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
-{
- return gen6_gttmmadr_size(i915) / 2;
-}
-
-static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- phys_addr_t phys_addr;
- u32 pte_flags;
- int ret;
-
- GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
- phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
-
- /*
- * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
- * will be dropped. For WC mappings in general we have 64 byte burst
- * writes when the WC buffer is flushed, so we can't use it, but have to
- * resort to an uncached mapping. The WC issue is easily caught by the
- * readback check when writing GTT PTE entries.
- */
- if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
- ggtt->gsm = ioremap(phys_addr, size);
- else
- ggtt->gsm = ioremap_wc(phys_addr, size);
- if (!ggtt->gsm) {
- drm_err(&i915->drm, "Failed to map the ggtt page table\n");
- return -ENOMEM;
- }
-
- kref_init(&ggtt->vm.resv_ref);
- ret = setup_scratch_page(&ggtt->vm);
- if (ret) {
- drm_err(&i915->drm, "Scratch setup failed\n");
- /* iounmap will also get called at remove, but meh */
- iounmap(ggtt->gsm);
- return ret;
- }
-
- pte_flags = 0;
- if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
- pte_flags |= PTE_LM;
-
- ggtt->vm.scratch[0]->encode =
- ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
- I915_CACHE_NONE, pte_flags);
-
- return 0;
-}
-
-int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- phys_addr_t gmadr_base;
- int ret;
-
- ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
- if (!ret) {
- drm_err(&i915->drm, "failed to set up gmch\n");
- return -EIO;
- }
-
- intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
-
- ggtt->gmadr =
- (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
-
- if (needs_idle_maps(i915)) {
- drm_notice(&i915->drm,
- "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
- ggtt->do_idle_maps = true;
- }
-
- ggtt->vm.insert_page = gen5_ggtt_insert_page;
- ggtt->vm.insert_entries = gen5_ggtt_insert_entries;
- ggtt->vm.clear_range = gen5_ggtt_clear_range;
- ggtt->vm.cleanup = gen5_gmch_remove;
-
- ggtt->invalidate = gmch_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
-
- if (unlikely(ggtt->do_idle_maps))
- drm_notice(&i915->drm,
- "Applying Ironlake quirks for intel_iommu\n");
-
- return 0;
-}
-
-int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- unsigned int size;
- u16 snb_gmch_ctl;
-
- ggtt->gmadr = intel_pci_resource(pdev, 2);
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
-
- /*
- * 64/512MB is the current min/max we actually know of, but this is
- * just a coarse sanity check.
- */
- if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
- &ggtt->mappable_end);
- return -ENXIO;
- }
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
- size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
-
- ggtt->vm.clear_range = nop_clear_range;
- if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
- ggtt->vm.clear_range = gen6_ggtt_clear_range;
- ggtt->vm.insert_page = gen6_ggtt_insert_page;
- ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
- ggtt->vm.cleanup = gen6_gmch_remove;
-
- ggtt->invalidate = gen6_ggtt_invalidate;
-
- if (HAS_EDRAM(i915))
- ggtt->vm.pte_encode = iris_pte_encode;
- else if (IS_HASWELL(i915))
- ggtt->vm.pte_encode = hsw_pte_encode;
- else if (IS_VALLEYVIEW(i915))
- ggtt->vm.pte_encode = byt_pte_encode;
- else if (GRAPHICS_VER(i915) >= 7)
- ggtt->vm.pte_encode = ivb_pte_encode;
- else
- ggtt->vm.pte_encode = snb_pte_encode;
-
- ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
-{
- gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GGMS_MASK;
-
- if (gmch_ctrl)
- return 1 << (20 + gmch_ctrl);
-
- return 0;
-}
-
-int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- unsigned int size;
- u16 snb_gmch_ctl;
-
- /* TODO: We're not aware of mappable constraints on gen8 yet */
- if (!HAS_LMEM(i915)) {
- ggtt->gmadr = intel_pci_resource(pdev, 2);
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
- }
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (IS_CHERRYVIEW(i915))
- size = chv_get_total_gtt_size(snb_gmch_ctl);
- else
- size = gen8_get_total_gtt_size(snb_gmch_ctl);
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
- ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
-
- ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- ggtt->vm.cleanup = gen6_gmch_remove;
- ggtt->vm.insert_page = gen8_ggtt_insert_page;
- ggtt->vm.clear_range = nop_clear_range;
- if (intel_scanout_needs_vtd_wa(i915))
- ggtt->vm.clear_range = gen8_ggtt_clear_range;
-
- ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
-
- /*
- * Serialize GTT updates with aperture access on BXT if VT-d is on,
- * and always on CHV.
- */
- if (intel_vm_no_concurrent_access_wa(i915)) {
- ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- ggtt->vm.bind_async_flags =
- I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- }
-
- ggtt->invalidate = gen8_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
-
- ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
-
- setup_private_pat(ggtt->vm.gt->uncore);
-
- return ggtt_probe_common(ggtt, size);
-}
-
-int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915)
-{
- if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
- return -EIO;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_gmch.h b/drivers/gpu/drm/i915/gt/intel_gt_gmch.h
deleted file mode 100644
index 75ed55c1f30a..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_gt_gmch.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#ifndef __INTEL_GT_GMCH_H__
-#define __INTEL_GT_GMCH_H__
-
-#include "intel_gtt.h"
-
-/* For x86 platforms */
-#if IS_ENABLED(CONFIG_X86)
-void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt);
-int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt);
-int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt);
-int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt);
-int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915);
-
-/* Stubs for non-x86 platforms */
-#else
-static inline void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt)
-{
-}
-static inline int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt)
-{
- /* No HW should be probed for this case yet, return fail */
- return -ENODEV;
-}
-static inline int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt)
-{
- /* No HW should be probed for this case yet, return fail */
- return -ENODEV;
-}
-static inline int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt)
-{
- /* No HW should be probed for this case yet, return fail */
- return -ENODEV;
-}
-static inline int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915)
-{
- /* No HW should be enabled for this case yet, return fail */
- return -ENODEV;
-}
-#endif
-
-#endif /* __INTEL_GT_GMCH_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 88b4becfcb17..3a72d4fd0214 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -193,6 +193,14 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
+ intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
+ intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
+ intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
+ intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
@@ -248,6 +256,14 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
+ if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
+ intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
+ intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
+ intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
+ intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
new file mode 100644
index 000000000000..777025d5bd66
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "intel_gt_mcr.h"
+#include "intel_gt_regs.h"
+
+/**
+ * DOC: GT Multicast/Replicated (MCR) Register Support
+ *
+ * Some GT registers are designed as "multicast" or "replicated" registers:
+ * multiple instances of the same register share a single MMIO offset. MCR
+ * registers are generally used when the hardware needs to potentially track
+ * independent values of a register per hardware unit (e.g., per-subslice,
+ * per-L3bank, etc.). The specific types of replication that exist vary
+ * per-platform.
+ *
+ * MMIO accesses to MCR registers are controlled according to the settings
+ * programmed in the platform's MCR_SELECTOR register(s). MMIO writes to MCR
+ * registers can be done in either a (i.e., a single write updates all
+ * instances of the register to the same value) or unicast (a write updates only
+ * one specific instance). Reads of MCR registers always operate in a unicast
+ * manner regardless of how the multicast/unicast bit is set in MCR_SELECTOR.
+ * Selection of a specific MCR instance for unicast operations is referred to
+ * as "steering."
+ *
+ * If MCR register operations are steered toward a hardware unit that is
+ * fused off or currently powered down due to power gating, the MMIO operation
+ * is "terminated" by the hardware. Terminated read operations will return a
+ * value of zero and terminated unicast write operations will be silently
+ * ignored.
+ */
+
+#define HAS_MSLICE_STEERING(dev_priv) (INTEL_INFO(dev_priv)->has_mslice_steering)
+
+static const char * const intel_steering_types[] = {
+ "L3BANK",
+ "MSLICE",
+ "LNCF",
+ "INSTANCE 0",
+};
+
+static const struct intel_mmio_range icl_l3bank_steering_table[] = {
+ { 0x00B100, 0x00B3FF },
+ {},
+};
+
+static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
+ { 0x004000, 0x004AFF },
+ { 0x00C800, 0x00CFFF },
+ { 0x00DD00, 0x00DDFF },
+ { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
+ {},
+};
+
+static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
+ { 0x00B000, 0x00B0FF },
+ { 0x00D800, 0x00D8FF },
+ {},
+};
+
+static const struct intel_mmio_range dg2_lncf_steering_table[] = {
+ { 0x00B000, 0x00B0FF },
+ { 0x00D880, 0x00D8FF },
+ {},
+};
+
+/*
+ * We have several types of MCR registers on PVC where steering to (0,0)
+ * will always provide us with a non-terminated value. We'll stick them
+ * all in the same table for simplicity.
+ */
+static const struct intel_mmio_range pvc_instance0_steering_table[] = {
+ { 0x004000, 0x004AFF }, /* HALF-BSLICE */
+ { 0x008800, 0x00887F }, /* CC */
+ { 0x008A80, 0x008AFF }, /* TILEPSMI */
+ { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
+ { 0x00B100, 0x00B3FF }, /* L3BANK */
+ { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
+ { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
+ { 0x00DD00, 0x00DDFF }, /* BSLICE */
+ { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
+ { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
+ { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
+ { 0x024180, 0x0241FF }, /* HALF-BSLICE */
+ {},
+};
+
+void intel_gt_mcr_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ /*
+ * An mslice is unavailable only if both the meml3 for the slice is
+ * disabled *and* all of the DSS in the slice (quadrant) are disabled.
+ */
+ if (HAS_MSLICE_STEERING(i915)) {
+ gt->info.mslice_mask =
+ intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask,
+ GEN_DSS_PER_MSLICE);
+ gt->info.mslice_mask |=
+ (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
+ GEN12_MEML3_EN_MASK);
+
+ if (!gt->info.mslice_mask) /* should be impossible! */
+ drm_warn(&i915->drm, "mslice mask all zero!\n");
+ }
+
+ if (IS_PONTEVECCHIO(i915)) {
+ gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
+ } else if (IS_DG2(i915)) {
+ gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+ gt->steering_table[LNCF] = dg2_lncf_steering_table;
+ } else if (IS_XEHPSDV(i915)) {
+ gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+ gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
+ } else if (GRAPHICS_VER(i915) >= 11 &&
+ GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
+ gt->steering_table[L3BANK] = icl_l3bank_steering_table;
+ gt->info.l3bank_mask =
+ ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
+ GEN10_L3BANK_MASK;
+ if (!gt->info.l3bank_mask) /* should be impossible! */
+ drm_warn(&i915->drm, "L3 bank mask is all zero!\n");
+ } else if (GRAPHICS_VER(i915) >= 11) {
+ /*
+ * We expect all modern platforms to have at least some
+ * type of steering that needs to be initialized.
+ */
+ MISSING_CASE(INTEL_INFO(i915)->platform);
+ }
+}
+
+/*
+ * rw_with_mcr_steering_fw - Access a register with specific MCR steering
+ * @uncore: pointer to struct intel_uncore
+ * @reg: register being accessed
+ * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
+ * @group: group number (documented as "sliceid" on older platforms)
+ * @instance: instance number (documented as "subsliceid" on older platforms)
+ * @value: register value to be written (ignored for read)
+ *
+ * Return: 0 for write access. register value for read access.
+ *
+ * Caller needs to make sure the relevant forcewake wells are up.
+ */
+static u32 rw_with_mcr_steering_fw(struct intel_uncore *uncore,
+ i915_reg_t reg, u8 rw_flag,
+ int group, int instance, u32 value)
+{
+ u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
+
+ lockdep_assert_held(&uncore->lock);
+
+ if (GRAPHICS_VER(uncore->i915) >= 11) {
+ mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN11_MCR_SLICE(group) | GEN11_MCR_SUBSLICE(instance);
+
+ /*
+ * Wa_22013088509
+ *
+ * The setting of the multicast/unicast bit usually wouldn't
+ * matter for read operations (which always return the value
+ * from a single register instance regardless of how that bit
+ * is set), but some platforms have a workaround requiring us
+ * to remain in multicast mode for reads. There's no real
+ * downside to this, so we'll just go ahead and do so on all
+ * platforms; we'll only clear the multicast bit from the mask
+ * when exlicitly doing a write operation.
+ */
+ if (rw_flag == FW_REG_WRITE)
+ mcr_mask |= GEN11_MCR_MULTICAST;
+ } else {
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN8_MCR_SLICE(group) | GEN8_MCR_SUBSLICE(instance);
+ }
+
+ old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
+
+ mcr &= ~mcr_mask;
+ mcr |= mcr_ss;
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+
+ if (rw_flag == FW_REG_READ)
+ val = intel_uncore_read_fw(uncore, reg);
+ else
+ intel_uncore_write_fw(uncore, reg, value);
+
+ mcr &= ~mcr_mask;
+ mcr |= old_mcr & mcr_mask;
+
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+
+ return val;
+}
+
+static u32 rw_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, u8 rw_flag,
+ int group, int instance,
+ u32 value)
+{
+ enum forcewake_domains fw_domains;
+ u32 val;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
+ rw_flag);
+ fw_domains |= intel_uncore_forcewake_for_reg(uncore,
+ GEN8_MCR_SELECTOR,
+ FW_REG_READ | FW_REG_WRITE);
+
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
+
+ val = rw_with_mcr_steering_fw(uncore, reg, rw_flag, group, instance, value);
+
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irq(&uncore->lock);
+
+ return val;
+}
+
+/**
+ * intel_gt_mcr_read - read a specific instance of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to read
+ * @group: the MCR group
+ * @instance: the MCR instance
+ *
+ * Returns the value read from an MCR register after steering toward a specific
+ * group/instance.
+ */
+u32 intel_gt_mcr_read(struct intel_gt *gt,
+ i915_reg_t reg,
+ int group, int instance)
+{
+ return rw_with_mcr_steering(gt->uncore, reg, FW_REG_READ, group, instance, 0);
+}
+
+/**
+ * intel_gt_mcr_unicast_write - write a specific instance of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to write
+ * @value: value to write
+ * @group: the MCR group
+ * @instance: the MCR instance
+ *
+ * Write an MCR register in unicast mode after steering toward a specific
+ * group/instance.
+ */
+void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_reg_t reg, u32 value,
+ int group, int instance)
+{
+ rw_with_mcr_steering(gt->uncore, reg, FW_REG_WRITE, group, instance, value);
+}
+
+/**
+ * intel_gt_mcr_multicast_write - write a value to all instances of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to write
+ * @value: value to write
+ *
+ * Write an MCR register in multicast mode to update all instances.
+ */
+void intel_gt_mcr_multicast_write(struct intel_gt *gt,
+ i915_reg_t reg, u32 value)
+{
+ intel_uncore_write(gt->uncore, reg, value);
+}
+
+/**
+ * intel_gt_mcr_multicast_write_fw - write a value to all instances of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to write
+ * @value: value to write
+ *
+ * Write an MCR register in multicast mode to update all instances. This
+ * function assumes the caller is already holding any necessary forcewake
+ * domains; use intel_gt_mcr_multicast_write() in cases where forcewake should
+ * be obtained automatically.
+ */
+void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_reg_t reg, u32 value)
+{
+ intel_uncore_write_fw(gt->uncore, reg, value);
+}
+
+/*
+ * reg_needs_read_steering - determine whether a register read requires
+ * explicit steering
+ * @gt: GT structure
+ * @reg: the register to check steering requirements for
+ * @type: type of multicast steering to check
+ *
+ * Determines whether @reg needs explicit steering of a specific type for
+ * reads.
+ *
+ * Returns false if @reg does not belong to a register range of the given
+ * steering type, or if the default (subslice-based) steering IDs are suitable
+ * for @type steering too.
+ */
+static bool reg_needs_read_steering(struct intel_gt *gt,
+ i915_reg_t reg,
+ enum intel_steering_type type)
+{
+ const u32 offset = i915_mmio_reg_offset(reg);
+ const struct intel_mmio_range *entry;
+
+ if (likely(!gt->steering_table[type]))
+ return false;
+
+ for (entry = gt->steering_table[type]; entry->end; entry++) {
+ if (offset >= entry->start && offset <= entry->end)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * get_nonterminated_steering - determines valid IDs for a class of MCR steering
+ * @gt: GT structure
+ * @type: multicast register type
+ * @group: Group ID returned
+ * @instance: Instance ID returned
+ *
+ * Determines group and instance values that will steer reads of the specified
+ * MCR class to a non-terminated instance.
+ */
+static void get_nonterminated_steering(struct intel_gt *gt,
+ enum intel_steering_type type,
+ u8 *group, u8 *instance)
+{
+ switch (type) {
+ case L3BANK:
+ *group = 0; /* unused */
+ *instance = __ffs(gt->info.l3bank_mask);
+ break;
+ case MSLICE:
+ GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
+ *group = __ffs(gt->info.mslice_mask);
+ *instance = 0; /* unused */
+ break;
+ case LNCF:
+ /*
+ * An LNCF is always present if its mslice is present, so we
+ * can safely just steer to LNCF 0 in all cases.
+ */
+ GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
+ *group = __ffs(gt->info.mslice_mask) << 1;
+ *instance = 0; /* unused */
+ break;
+ case INSTANCE0:
+ /*
+ * There are a lot of MCR types for which instance (0, 0)
+ * will always provide a non-terminated value.
+ */
+ *group = 0;
+ *instance = 0;
+ break;
+ default:
+ MISSING_CASE(type);
+ *group = 0;
+ *instance = 0;
+ }
+}
+
+/**
+ * intel_gt_mcr_get_nonterminated_steering - find group/instance values that
+ * will steer a register to a non-terminated instance
+ * @gt: GT structure
+ * @reg: register for which the steering is required
+ * @group: return variable for group steering
+ * @instance: return variable for instance steering
+ *
+ * This function returns a group/instance pair that is guaranteed to work for
+ * read steering of the given register. Note that a value will be returned even
+ * if the register is not replicated and therefore does not actually require
+ * steering.
+ */
+void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
+ i915_reg_t reg,
+ u8 *group, u8 *instance)
+{
+ int type;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (reg_needs_read_steering(gt, reg, type)) {
+ get_nonterminated_steering(gt, type, group, instance);
+ return;
+ }
+ }
+
+ *group = gt->default_steering.groupid;
+ *instance = gt->default_steering.instanceid;
+}
+
+/**
+ * intel_gt_mcr_read_any_fw - reads one instance of an MCR register
+ * @gt: GT structure
+ * @reg: register to read
+ *
+ * Reads a GT MCR register. The read will be steered to a non-terminated
+ * instance (i.e., one that isn't fused off or powered down by power gating).
+ * This function assumes the caller is already holding any necessary forcewake
+ * domains; use intel_gt_mcr_read_any() in cases where forcewake should be
+ * obtained automatically.
+ *
+ * Returns the value from a non-terminated instance of @reg.
+ */
+u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_reg_t reg)
+{
+ int type;
+ u8 group, instance;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (reg_needs_read_steering(gt, reg, type)) {
+ get_nonterminated_steering(gt, type, &group, &instance);
+ return rw_with_mcr_steering_fw(gt->uncore, reg,
+ FW_REG_READ,
+ group, instance, 0);
+ }
+ }
+
+ return intel_uncore_read_fw(gt->uncore, reg);
+}
+
+/**
+ * intel_gt_mcr_read_any - reads one instance of an MCR register
+ * @gt: GT structure
+ * @reg: register to read
+ *
+ * Reads a GT MCR register. The read will be steered to a non-terminated
+ * instance (i.e., one that isn't fused off or powered down by power gating).
+ *
+ * Returns the value from a non-terminated instance of @reg.
+ */
+u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_reg_t reg)
+{
+ int type;
+ u8 group, instance;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (reg_needs_read_steering(gt, reg, type)) {
+ get_nonterminated_steering(gt, type, &group, &instance);
+ return rw_with_mcr_steering(gt->uncore, reg,
+ FW_REG_READ,
+ group, instance, 0);
+ }
+ }
+
+ return intel_uncore_read(gt->uncore, reg);
+}
+
+static void report_steering_type(struct drm_printer *p,
+ struct intel_gt *gt,
+ enum intel_steering_type type,
+ bool dump_table)
+{
+ const struct intel_mmio_range *entry;
+ u8 group, instance;
+
+ BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
+
+ if (!gt->steering_table[type]) {
+ drm_printf(p, "%s steering: uses default steering\n",
+ intel_steering_types[type]);
+ return;
+ }
+
+ get_nonterminated_steering(gt, type, &group, &instance);
+ drm_printf(p, "%s steering: group=0x%x, instance=0x%x\n",
+ intel_steering_types[type], group, instance);
+
+ if (!dump_table)
+ return;
+
+ for (entry = gt->steering_table[type]; entry->end; entry++)
+ drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
+}
+
+void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
+ bool dump_table)
+{
+ drm_printf(p, "Default steering: group=0x%x, instance=0x%x\n",
+ gt->default_steering.groupid,
+ gt->default_steering.instanceid);
+
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ report_steering_type(p, gt, INSTANCE0, dump_table);
+ } else if (HAS_MSLICE_STEERING(gt->i915)) {
+ report_steering_type(p, gt, MSLICE, dump_table);
+ report_steering_type(p, gt, LNCF, dump_table);
+ }
+}
+
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
new file mode 100644
index 000000000000..506b0cbc8db3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_MCR__
+#define __INTEL_GT_MCR__
+
+#include "intel_gt_types.h"
+
+void intel_gt_mcr_init(struct intel_gt *gt);
+
+u32 intel_gt_mcr_read(struct intel_gt *gt,
+ i915_reg_t reg,
+ int group, int instance);
+u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_reg_t reg);
+u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_reg_t reg);
+
+void intel_gt_mcr_unicast_write(struct intel_gt *gt,
+ i915_reg_t reg, u32 value,
+ int group, int instance);
+void intel_gt_mcr_multicast_write(struct intel_gt *gt,
+ i915_reg_t reg, u32 value);
+void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt,
+ i915_reg_t reg, u32 value);
+
+void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
+ i915_reg_t reg,
+ u8 *group, u8 *instance);
+
+void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
+ bool dump_table);
+
+#endif /* __INTEL_GT_MCR__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 0c6b9eb724ae..40bdd4cb629f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -100,14 +100,16 @@ static int vlv_drpc(struct seq_file *m)
{
struct intel_gt *gt = m->private;
struct intel_uncore *uncore = gt->uncore;
- u32 rcctl1, pw_status;
+ u32 rcctl1, pw_status, mt_fwake_req;
+ mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
seq_printf(m, "RC6 Enabled: %s\n",
str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_EI_MODE(1))));
+ seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
seq_printf(m, "Render Power Well: %s\n",
(pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Media Power Well: %s\n",
@@ -124,9 +126,10 @@ static int gen6_drpc(struct seq_file *m)
struct intel_gt *gt = m->private;
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
- u32 gt_core_status, rcctl1, rc6vids = 0;
+ u32 gt_core_status, mt_fwake_req, rcctl1, rc6vids = 0;
u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
+ mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
@@ -138,7 +141,7 @@ static int gen6_drpc(struct seq_file *m)
}
if (GRAPHICS_VER(i915) <= 7)
- snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
+ snb_pcode_read(gt->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
seq_printf(m, "RC1e Enabled: %s\n",
str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -178,6 +181,7 @@ static int gen6_drpc(struct seq_file *m)
seq_printf(m, "Core Power Down: %s\n",
str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
if (GRAPHICS_VER(i915) >= 9) {
seq_printf(m, "Render Power Well: %s\n",
(gen9_powergate_status &
@@ -545,7 +549,7 @@ static int llc_show(struct seq_file *m, void *data)
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
- snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ snb_pcode_read(gt->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(rps,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index a0a49c16babd..37c1095d8603 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -140,6 +140,7 @@
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
+#define GEN12_PERF_FIX_BALANCING_CFE_DISABLE REG_BIT(15)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
@@ -323,8 +324,11 @@
#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
-#define XEHPSDV_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
-#define XEHPSDV_CCS_BASE_SHIFT 8
+#define XEHP_TILE0_ADDR_RANGE _MMIO(0x4900)
+#define XEHP_TILE_LMEM_RANGE_SHIFT 8
+
+#define XEHP_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
+#define XEHP_CCS_BASE_SHIFT 8
#define GAMTARBMODE _MMIO(0x4a08)
#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
@@ -561,6 +565,7 @@
#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
+#define XEHPC_GT_COMPUTE_DSS_ENABLE_EXT _MMIO(0x9148)
#define GEN6_UCGCTL1 _MMIO(0x9400)
#define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
@@ -597,24 +602,32 @@
/* GEN11 changed all bit defs except for FULL & RENDER */
#define GEN11_GRDOM_FULL GEN6_GRDOM_FULL
#define GEN11_GRDOM_RENDER GEN6_GRDOM_RENDER
-#define GEN11_GRDOM_BLT (1 << 2)
-#define GEN11_GRDOM_GUC (1 << 3)
-#define GEN11_GRDOM_MEDIA (1 << 5)
-#define GEN11_GRDOM_MEDIA2 (1 << 6)
-#define GEN11_GRDOM_MEDIA3 (1 << 7)
-#define GEN11_GRDOM_MEDIA4 (1 << 8)
-#define GEN11_GRDOM_MEDIA5 (1 << 9)
-#define GEN11_GRDOM_MEDIA6 (1 << 10)
-#define GEN11_GRDOM_MEDIA7 (1 << 11)
-#define GEN11_GRDOM_MEDIA8 (1 << 12)
-#define GEN11_GRDOM_VECS (1 << 13)
-#define GEN11_GRDOM_VECS2 (1 << 14)
-#define GEN11_GRDOM_VECS3 (1 << 15)
-#define GEN11_GRDOM_VECS4 (1 << 16)
-#define GEN11_GRDOM_SFC0 (1 << 17)
-#define GEN11_GRDOM_SFC1 (1 << 18)
-#define GEN11_GRDOM_SFC2 (1 << 19)
-#define GEN11_GRDOM_SFC3 (1 << 20)
+#define XEHPC_GRDOM_BLT8 REG_BIT(31)
+#define XEHPC_GRDOM_BLT7 REG_BIT(30)
+#define XEHPC_GRDOM_BLT6 REG_BIT(29)
+#define XEHPC_GRDOM_BLT5 REG_BIT(28)
+#define XEHPC_GRDOM_BLT4 REG_BIT(27)
+#define XEHPC_GRDOM_BLT3 REG_BIT(26)
+#define XEHPC_GRDOM_BLT2 REG_BIT(25)
+#define XEHPC_GRDOM_BLT1 REG_BIT(24)
+#define GEN11_GRDOM_SFC3 REG_BIT(20)
+#define GEN11_GRDOM_SFC2 REG_BIT(19)
+#define GEN11_GRDOM_SFC1 REG_BIT(18)
+#define GEN11_GRDOM_SFC0 REG_BIT(17)
+#define GEN11_GRDOM_VECS4 REG_BIT(16)
+#define GEN11_GRDOM_VECS3 REG_BIT(15)
+#define GEN11_GRDOM_VECS2 REG_BIT(14)
+#define GEN11_GRDOM_VECS REG_BIT(13)
+#define GEN11_GRDOM_MEDIA8 REG_BIT(12)
+#define GEN11_GRDOM_MEDIA7 REG_BIT(11)
+#define GEN11_GRDOM_MEDIA6 REG_BIT(10)
+#define GEN11_GRDOM_MEDIA5 REG_BIT(9)
+#define GEN11_GRDOM_MEDIA4 REG_BIT(8)
+#define GEN11_GRDOM_MEDIA3 REG_BIT(7)
+#define GEN11_GRDOM_MEDIA2 REG_BIT(6)
+#define GEN11_GRDOM_MEDIA REG_BIT(5)
+#define GEN11_GRDOM_GUC REG_BIT(3)
+#define GEN11_GRDOM_BLT REG_BIT(2)
#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
@@ -622,6 +635,7 @@
#define GEN7_MISCCPCTL _MMIO(0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
+#define GEN12_DOP_CLOCK_GATE_RENDER_ENABLE REG_BIT(1)
#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6)
@@ -732,6 +746,7 @@
#define GEN6_AGGRESSIVE_TURBO (0 << 15)
#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23
#define GEN9_IGNORE_SLICE_RATIO (0 << 0)
+#define GEN12_MEDIA_FREQ_RATIO REG_BIT(13)
#define GEN6_RC_VIDEO_FREQ _MMIO(0xa00c)
#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16)
@@ -969,6 +984,11 @@
#define XEHP_L3SCQREG7 _MMIO(0xb188)
#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
+#define XEHPC_L3SCRUB _MMIO(0xb18c)
+#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12)
+#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0)
+#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
+
#define L3SQCREG1_CCS0 _MMIO(0xb200)
#define FLUSHALLNONCOH REG_BIT(5)
@@ -1060,8 +1080,10 @@
#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2)
#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
-#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
+#define ENABLE_EU_COUNT_FOR_TDL_FLUSH REG_BIT(10)
+#define DISABLE_ECC REG_BIT(5)
#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4)
+#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
#define EU_PERF_CNTL0 _MMIO(0xe458)
#define EU_PERF_CNTL4 _MMIO(0xe45c)
@@ -1476,6 +1498,14 @@
#define GEN11_KCR (19)
#define GEN11_GTPM (16)
#define GEN11_BCS (15)
+#define XEHPC_BCS1 (14)
+#define XEHPC_BCS2 (13)
+#define XEHPC_BCS3 (12)
+#define XEHPC_BCS4 (11)
+#define XEHPC_BCS5 (10)
+#define XEHPC_BCS6 (9)
+#define XEHPC_BCS7 (8)
+#define XEHPC_BCS8 (23)
#define GEN12_CCS3 (7)
#define GEN12_CCS2 (6)
#define GEN12_CCS1 (5)
@@ -1521,6 +1551,10 @@
#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
#define GEN12_CCS0_CCS1_INTR_MASK _MMIO(0x190100)
#define GEN12_CCS2_CCS3_INTR_MASK _MMIO(0x190104)
+#define XEHPC_BCS1_BCS2_INTR_MASK _MMIO(0x190110)
+#define XEHPC_BCS3_BCS4_INTR_MASK _MMIO(0x190114)
+#define XEHPC_BCS5_BCS6_INTR_MASK _MMIO(0x190118)
+#define XEHPC_BCS7_BCS8_INTR_MASK _MMIO(0x19011c)
#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
index 8ec8bc660c8c..9e4ebf53379b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -24,7 +24,7 @@ bool is_object_gt(struct kobject *kobj)
static struct intel_gt *kobj_to_gt(struct kobject *kobj)
{
- return container_of(kobj, struct kobj_gt, base)->gt;
+ return container_of(kobj, struct intel_gt, sysfs_gt);
}
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
@@ -72,9 +72,9 @@ static struct attribute *id_attrs[] = {
};
ATTRIBUTE_GROUPS(id);
+/* A kobject needs a release() method even if it does nothing */
static void kobj_gt_release(struct kobject *kobj)
{
- kfree(kobj);
}
static struct kobj_type kobj_gt_type = {
@@ -85,8 +85,6 @@ static struct kobj_type kobj_gt_type = {
void intel_gt_sysfs_register(struct intel_gt *gt)
{
- struct kobj_gt *kg;
-
/*
* We need to make things right with the
* ABI compatibility. The files were originally
@@ -98,25 +96,22 @@ void intel_gt_sysfs_register(struct intel_gt *gt)
if (gt_is_root(gt))
intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt));
- kg = kzalloc(sizeof(*kg), GFP_KERNEL);
- if (!kg)
+ /* init and xfer ownership to sysfs tree */
+ if (kobject_init_and_add(&gt->sysfs_gt, &kobj_gt_type,
+ gt->i915->sysfs_gt, "gt%d", gt->info.id))
goto exit_fail;
- kobject_init(&kg->base, &kobj_gt_type);
- kg->gt = gt;
-
- /* xfer ownership to sysfs tree */
- if (kobject_add(&kg->base, gt->i915->sysfs_gt, "gt%d", gt->info.id))
- goto exit_kobj_put;
-
- intel_gt_sysfs_pm_init(gt, &kg->base);
+ intel_gt_sysfs_pm_init(gt, &gt->sysfs_gt);
return;
-exit_kobj_put:
- kobject_put(&kg->base);
-
exit_fail:
+ kobject_put(&gt->sysfs_gt);
drm_warn(&gt->i915->drm,
"failed to initialize gt%d sysfs root\n", gt->info.id);
}
+
+void intel_gt_sysfs_unregister(struct intel_gt *gt)
+{
+ kobject_put(&gt->sysfs_gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
index 9471b26752cf..a99aa7e8b01a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
@@ -13,11 +13,6 @@
struct intel_gt;
-struct kobj_gt {
- struct kobject base;
- struct intel_gt *gt;
-};
-
bool is_object_gt(struct kobject *kobj);
struct drm_i915_private *kobj_to_i915(struct kobject *kobj);
@@ -28,6 +23,7 @@ intel_gt_create_kobj(struct intel_gt *gt,
const char *name);
void intel_gt_sysfs_register(struct intel_gt *gt);
+void intel_gt_sysfs_unregister(struct intel_gt *gt);
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
const char *name);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index f76b6cf8040e..73a8b46e0234 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -14,6 +14,7 @@
#include "intel_gt_regs.h"
#include "intel_gt_sysfs.h"
#include "intel_gt_sysfs_pm.h"
+#include "intel_pcode.h"
#include "intel_rc6.h"
#include "intel_rps.h"
@@ -558,6 +559,174 @@ static const struct attribute *freq_attrs[] = {
NULL
};
+/*
+ * Scaling for multipliers (aka frequency factors).
+ * The format of the value in the register is u8.8.
+ *
+ * The presentation to userspace is inspired by the perf event framework.
+ * See:
+ * Documentation/ABI/testing/sysfs-bus-event_source-devices-events
+ * for description of:
+ * /sys/bus/event_source/devices/<pmu>/events/<event>.scale
+ *
+ * Summary: Expose two sysfs files for each multiplier.
+ *
+ * 1. File <attr> contains a raw hardware value.
+ * 2. File <attr>.scale contains the multiplicative scale factor to be
+ * used by userspace to compute the actual value.
+ *
+ * So userspace knows that to get the frequency_factor it multiplies the
+ * provided value by the specified scale factor and vice-versa.
+ *
+ * That way there is no precision loss in the kernel interface and API
+ * is future proof should one day the hardware register change to u16.u16,
+ * on some platform. (Or any other fixed point representation.)
+ *
+ * Example:
+ * File <attr> contains the value 2.5, represented as u8.8 0x0280, which
+ * is comprised of:
+ * - an integer part of 2
+ * - a fractional part of 0x80 (representing 0x80 / 2^8 == 0x80 / 256).
+ * File <attr>.scale contains a string representation of floating point
+ * value 0.00390625 (which is (1 / 256)).
+ * Userspace computes the actual value:
+ * 0x0280 * 0.00390625 -> 2.5
+ * or converts an actual value to the value to be written into <attr>:
+ * 2.5 / 0.00390625 -> 0x0280
+ */
+
+#define U8_8_VAL_MASK 0xffff
+#define U8_8_SCALE_TO_VALUE "0.00390625"
+
+static ssize_t freq_factor_scale_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ return sysfs_emit(buff, "%s\n", U8_8_SCALE_TO_VALUE);
+}
+
+static u32 media_ratio_mode_to_factor(u32 mode)
+{
+ /* 0 -> 0, 1 -> 256, 2 -> 128 */
+ return !mode ? mode : 256 / mode;
+}
+
+static ssize_t media_freq_factor_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ intel_wakeref_t wakeref;
+ u32 mode;
+
+ /*
+ * Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by
+ * GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1
+ */
+ if (IS_XEHPSDV(gt->i915) &&
+ slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) {
+ /*
+ * For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain
+ * the media_ratio_mode, just return the cached media ratio
+ */
+ mode = slpc->media_ratio_mode;
+ } else {
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
+ mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
+ }
+
+ return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode));
+}
+
+static ssize_t media_freq_factor_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ u32 factor, mode;
+ int err;
+
+ err = kstrtou32(buff, 0, &factor);
+ if (err)
+ return err;
+
+ for (mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
+ mode <= SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO; mode++)
+ if (factor == media_ratio_mode_to_factor(mode))
+ break;
+
+ if (mode > SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO)
+ return -EINVAL;
+
+ err = intel_guc_slpc_set_media_ratio_mode(slpc, mode);
+ if (!err) {
+ slpc->media_ratio_mode = mode;
+ DRM_DEBUG("Set slpc->media_ratio_mode to %d", mode);
+ }
+ return err ?: count;
+}
+
+static ssize_t media_RP0_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ u32 val;
+ int err;
+
+ err = snb_pcode_read_p(gt->uncore, XEHP_PCODE_FREQUENCY_CONFIG,
+ PCODE_MBOX_FC_SC_READ_FUSED_P0,
+ PCODE_MBOX_DOMAIN_MEDIAFF, &val);
+
+ if (err)
+ return err;
+
+ /* Fused media RP0 read from pcode is in units of 50 MHz */
+ val *= GT_FREQUENCY_MULTIPLIER;
+
+ return sysfs_emit(buff, "%u\n", val);
+}
+
+static ssize_t media_RPn_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ u32 val;
+ int err;
+
+ err = snb_pcode_read_p(gt->uncore, XEHP_PCODE_FREQUENCY_CONFIG,
+ PCODE_MBOX_FC_SC_READ_FUSED_PN,
+ PCODE_MBOX_DOMAIN_MEDIAFF, &val);
+
+ if (err)
+ return err;
+
+ /* Fused media RPn read from pcode is in units of 50 MHz */
+ val *= GT_FREQUENCY_MULTIPLIER;
+
+ return sysfs_emit(buff, "%u\n", val);
+}
+
+static DEVICE_ATTR_RW(media_freq_factor);
+static struct device_attribute dev_attr_media_freq_factor_scale =
+ __ATTR(media_freq_factor.scale, 0444, freq_factor_scale_show, NULL);
+static DEVICE_ATTR_RO(media_RP0_freq_mhz);
+static DEVICE_ATTR_RO(media_RPn_freq_mhz);
+
+static const struct attribute *media_perf_power_attrs[] = {
+ &dev_attr_media_freq_factor.attr,
+ &dev_attr_media_freq_factor_scale.attr,
+ &dev_attr_media_RP0_freq_mhz.attr,
+ &dev_attr_media_RPn_freq_mhz.attr,
+ NULL
+};
+
static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj,
const struct attribute * const *attrs)
{
@@ -599,4 +768,12 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
drm_warn(&gt->i915->drm,
"failed to create gt%u throttle sysfs files (%pe)",
gt->info.id, ERR_PTR(ret));
+
+ if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
+ ret = sysfs_create_files(kobj, media_perf_power_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u media_perf_power_attrs sysfs (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+ }
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index b06611c1d4ad..df708802889d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -59,6 +59,13 @@ enum intel_steering_type {
MSLICE,
LNCF,
+ /*
+ * On some platforms there are multiple types of MCR registers that
+ * will always return a non-terminated value at instance (0, 0). We'll
+ * lump those all into a single category to keep things simple.
+ */
+ INSTANCE0,
+
NUM_STEERING_TYPES
};
@@ -221,9 +228,13 @@ struct intel_gt {
struct {
u8 uc_index;
+ u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
} mocs;
struct intel_pxp pxp;
+
+ /* gt/gtN sysfs */
+ struct kobject sysfs_gt;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index a40d928b3888..e639434e97fd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -306,6 +306,15 @@ struct i915_address_space {
struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags);
+ void (*raw_insert_page)(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*raw_insert_entries)(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
void (*cleanup)(struct i915_address_space *vm);
void (*foreach)(struct i915_address_space *vm,
@@ -345,6 +354,19 @@ struct i915_ggtt {
bool do_idle_maps;
+ /**
+ * @pte_lost: Are ptes lost on resume?
+ *
+ * Whether the system was recently restored from hibernate and
+ * thus may have lost pte content.
+ */
+ bool pte_lost;
+
+ /**
+ * @probed_pte: Probed pte value on suspend. Re-checked on resume.
+ */
+ u64 probed_pte;
+
int mtrr;
/** Bit 6 swizzling required for X tiling */
@@ -548,14 +570,13 @@ i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
-
void intel_ggtt_bind_vma(struct i915_address_space *vm,
- struct i915_vm_pt_stash *stash,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
- u32 flags);
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res);
+ struct i915_vma_resource *vma_res);
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
@@ -581,6 +602,17 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
+/**
+ * i915_ggtt_mark_pte_lost - Mark ggtt ptes as lost or clear such a marking
+ * @i915 The device private.
+ * @val whether the ptes should be marked as lost.
+ *
+ * In some cases pte content is retained across suspend, but typically lost
+ * across hibernate. Typically they should be marked as lost on
+ * hibernation restore and such marking cleared on suspend.
+ */
+void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val);
+
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
@@ -627,7 +659,6 @@ release_pd_entry(struct i915_page_directory * const pd,
struct i915_page_table * const pt,
const struct drm_i915_gem_object * const scratch);
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
-void gen8_ggtt_invalidate(struct i915_ggtt *ggtt);
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 40e2e28ee6c7..14fe65812e42 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -124,7 +124,6 @@ static void calc_ia_freq(struct intel_llc *llc,
static void gen6_update_ring_freq(struct intel_llc *llc)
{
- struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
struct ia_constants consts;
unsigned int gpu_freq;
@@ -142,7 +141,7 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
unsigned int ia_freq, ring_freq;
calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
- snb_pcode_write(i915, GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ snb_pcode_write(llc_to_gt(llc)->uncore, GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
gpu_freq);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 31be734010db..a390f0813c8b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -111,16 +111,6 @@ enum {
#define XEHP_SW_COUNTER_SHIFT 58
#define XEHP_SW_COUNTER_WIDTH 6
-static inline u32 lrc_desc_priority(int prio)
-{
- if (prio > I915_PRIORITY_NORMAL)
- return GEN12_CTX_PRIORITY_HIGH;
- else if (prio < I915_PRIORITY_NORMAL)
- return GEN12_CTX_PRIORITY_LOW;
- else
- return GEN12_CTX_PRIORITY_NORMAL;
-}
-
static inline void lrc_runtime_start(struct intel_context *ce)
{
struct intel_context_stats *stats = &ce->stats;
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index c4c37585ae8c..c6ebe2781076 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -23,6 +23,7 @@ struct drm_i915_mocs_table {
unsigned int n_entries;
const struct drm_i915_mocs_entry *table;
u8 uc_index;
+ u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
u8 unused_entries_index;
};
@@ -47,6 +48,7 @@ struct drm_i915_mocs_table {
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
+#define PVC_NUM_MOCS_ENTRIES 3
/* (e)LLC caching options */
/*
@@ -394,6 +396,17 @@ static const struct drm_i915_mocs_entry dg2_mocs_table_g10_ax[] = {
MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
};
+static const struct drm_i915_mocs_entry pvc_mocs_table[] = {
+ /* Error */
+ MOCS_ENTRY(0, 0, L3_3_WB),
+
+ /* UC */
+ MOCS_ENTRY(1, 0, L3_1_UC),
+
+ /* WB */
+ MOCS_ENTRY(2, 0, L3_3_WB),
+};
+
enum {
HAS_GLOBAL_MOCS = BIT(0),
HAS_ENGINE_MOCS = BIT(1),
@@ -423,7 +436,14 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
memset(table, 0, sizeof(struct drm_i915_mocs_table));
table->unused_entries_index = I915_MOCS_PTE;
- if (IS_DG2(i915)) {
+ if (IS_PONTEVECCHIO(i915)) {
+ table->size = ARRAY_SIZE(pvc_mocs_table);
+ table->table = pvc_mocs_table;
+ table->n_entries = PVC_NUM_MOCS_ENTRIES;
+ table->uc_index = 1;
+ table->wb_index = 2;
+ table->unused_entries_index = 2;
+ } else if (IS_DG2(i915)) {
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax);
table->table = dg2_mocs_table_g10_ax;
@@ -622,6 +642,8 @@ void intel_set_mocs_index(struct intel_gt *gt)
get_mocs_settings(gt->i915, &table);
gt->mocs.uc_index = table.uc_index;
+ if (HAS_L3_CCS_READ(gt->i915))
+ gt->mocs.wb_index = table.wb_index;
}
void intel_mocs_init(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index b4770690e794..f8d0523f4c18 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -272,7 +272,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_HW_ENABLE;
rc6vids = 0;
- ret = snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
+ ret = snb_pcode_read(rc6_to_gt(rc6)->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
if (GRAPHICS_VER(i915) == 6 && ret) {
drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
} else if (GRAPHICS_VER(i915) == 6 &&
@@ -282,7 +282,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
rc6vids |= GEN6_ENCODE_RC6_VID(450);
- ret = snb_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ ret = snb_pcode_write(rc6_to_gt(rc6)->uncore, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
if (ret)
drm_err(&i915->drm,
"Couldn't fix incorrect rc6 voltage\n");
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index f5111c0a0060..d09b996a9759 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -12,6 +12,7 @@
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
static int
@@ -101,14 +102,24 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
return ERR_PTR(-ENODEV);
if (HAS_FLAT_CCS(i915)) {
+ resource_size_t lmem_range;
u64 tile_stolen, flat_ccs_base;
- lmem_size = pci_resource_len(pdev, 2);
- flat_ccs_base = intel_gt_read_register(gt, XEHPSDV_FLAT_CCS_BASE_ADDR);
- flat_ccs_base = (flat_ccs_base >> XEHPSDV_CCS_BASE_SHIFT) * SZ_64K;
+ lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
+ lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
+ lmem_size *= SZ_1G;
+
+ flat_ccs_base = intel_gt_mcr_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
+ flat_ccs_base = (flat_ccs_base >> XEHP_CCS_BASE_SHIFT) * SZ_64K;
+
+ /* FIXME: Remove this when we have small-bar enabled */
+ if (pci_resource_len(pdev, 2) < lmem_size) {
+ drm_err(&i915->drm, "System requires small-BAR support, which is currently unsupported on this kernel\n");
+ return ERR_PTR(-EINVAL);
+ }
if (GEM_WARN_ON(lmem_size < flat_ccs_base))
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EIO);
tile_stolen = lmem_size - flat_ccs_base;
@@ -131,7 +142,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
io_start = pci_resource_start(pdev, 2);
io_size = min(pci_resource_len(pdev, 2), lmem_size);
if (!io_size)
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EIO);
min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
I915_GTT_PAGE_SIZE_4K;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 5422a3b84bd4..a5338c3fde7a 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -808,7 +808,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
__intel_engine_reset(engine, stalled_mask & engine->mask);
local_bh_enable();
- intel_uc_reset(&gt->uc, true);
+ intel_uc_reset(&gt->uc, ALL_ENGINES);
intel_ggtt_restore_fences(gt->ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 40ffcb94e379..15ec64d881c4 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -299,7 +299,8 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
GEM_BUG_ON(ring->emit > ring->size - bytes);
GEM_BUG_ON(ring->space < bytes);
cs = ring->vaddr + ring->emit;
- GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset32(cs, POISON_INUSE, bytes / sizeof(*cs));
ring->emit += bytes;
ring->space -= bytes;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 5423bfd301ad..d5d6f1fadcae 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -117,7 +117,9 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
return;
/* ring should be idle before issuing a sync flush*/
- GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ if ((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0)
+ drm_warn(&engine->i915->drm, "%s not idle before sync flush!\n",
+ engine->name);
ENGINE_WRITE_FW(engine, RING_INSTPM,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -596,8 +598,9 @@ static void ring_context_reset(struct intel_context *ce)
clear_bit(CONTEXT_VALID_BIT, &ce->flags);
}
-static void ring_context_ban(struct intel_context *ce,
- struct i915_request *rq)
+static void ring_context_revoke(struct intel_context *ce,
+ struct i915_request *rq,
+ unsigned int preempt_timeout_ms)
{
struct intel_engine_cs *engine;
@@ -632,7 +635,7 @@ static const struct intel_context_ops ring_context_ops = {
.cancel_request = ring_context_cancel_request,
- .ban = ring_context_ban,
+ .revoke = ring_context_revoke,
.pre_pin = ring_context_pre_pin,
.pin = ring_context_pin,
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 3476a11f294c..fb3f57ee450b 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1075,7 +1075,9 @@ static u32 intel_rps_read_state_cap(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
- if (IS_XEHPSDV(i915))
+ if (IS_PONTEVECCHIO(i915))
+ return intel_uncore_read(uncore, PVC_RP_STATE_CAP);
+ else if (IS_XEHPSDV(i915))
return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
else if (IS_GEN9_LP(i915))
return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
@@ -1142,7 +1144,8 @@ static void gen6_rps_init(struct intel_rps *rps)
if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11)
mult = GEN9_FREQ_SCALER;
- if (snb_pcode_read(i915, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ if (snb_pcode_read(rps_to_gt(rps)->uncore,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status, NULL) == 0)
rps->efficient_freq =
clamp_t(u32,
@@ -1982,7 +1985,7 @@ void intel_rps_init(struct intel_rps *rps)
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
u32 params = 0;
- snb_pcode_read(i915, GEN6_READ_OC_PARAMS, &params, NULL);
+ snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, &params, NULL);
if (params & BIT(31)) { /* OC supported */
drm_dbg(&i915->drm,
"Overclocking supported, max: %dMHz, overclock: %dMHz\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index fdd25691beda..c6d3050604c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -16,11 +16,6 @@ void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
sseu->max_slices = max_slices;
sseu->max_subslices = max_subslices;
sseu->max_eus_per_subslice = max_eus_per_subslice;
-
- sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
- GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE);
- sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
- GEM_BUG_ON(sseu->eu_stride > GEN_MAX_EU_STRIDE);
}
unsigned int
@@ -28,152 +23,240 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
{
unsigned int i, total = 0;
- for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
- total += hweight8(sseu->subslice_mask[i]);
+ if (sseu->has_xehp_dss)
+ return bitmap_weight(sseu->subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->subslice_mask));
+
+ for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask.hsw); i++)
+ total += hweight8(sseu->subslice_mask.hsw[i]);
return total;
}
-static u32
-sseu_get_subslices(const struct sseu_dev_info *sseu,
- const u8 *subslice_mask, u8 slice)
+unsigned int
+intel_sseu_get_hsw_subslices(const struct sseu_dev_info *sseu, u8 slice)
{
- int i, offset = slice * sseu->ss_stride;
- u32 mask = 0;
-
- GEM_BUG_ON(slice >= sseu->max_slices);
-
- for (i = 0; i < sseu->ss_stride; i++)
- mask |= (u32)subslice_mask[offset + i] << i * BITS_PER_BYTE;
+ WARN_ON(sseu->has_xehp_dss);
+ if (WARN_ON(slice >= sseu->max_slices))
+ return 0;
- return mask;
+ return sseu->subslice_mask.hsw[slice];
}
-u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
+static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
{
- return sseu_get_subslices(sseu, sseu->subslice_mask, slice);
+ if (sseu->has_xehp_dss) {
+ WARN_ON(slice > 0);
+ return sseu->eu_mask.xehp[subslice];
+ } else {
+ return sseu->eu_mask.hsw[slice][subslice];
+ }
}
-static u32 sseu_get_geometry_subslices(const struct sseu_dev_info *sseu)
+static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
+ u16 eu_mask)
{
- return sseu_get_subslices(sseu, sseu->geometry_subslice_mask, 0);
+ GEM_WARN_ON(eu_mask && __fls(eu_mask) >= sseu->max_eus_per_subslice);
+ if (sseu->has_xehp_dss) {
+ GEM_WARN_ON(slice > 0);
+ sseu->eu_mask.xehp[subslice] = eu_mask;
+ } else {
+ sseu->eu_mask.hsw[slice][subslice] = eu_mask;
+ }
}
-u32 intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu)
+static u16 compute_eu_total(const struct sseu_dev_info *sseu)
{
- return sseu_get_subslices(sseu, sseu->compute_subslice_mask, 0);
-}
+ int s, ss, total = 0;
-void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
- u8 *subslice_mask, u32 ss_mask)
-{
- int offset = slice * sseu->ss_stride;
+ for (s = 0; s < sseu->max_slices; s++)
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (sseu->has_xehp_dss)
+ total += hweight16(sseu->eu_mask.xehp[ss]);
+ else
+ total += hweight16(sseu->eu_mask.hsw[s][ss]);
- memcpy(&subslice_mask[offset], &ss_mask, sseu->ss_stride);
+ return total;
}
-unsigned int
-intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
+/**
+ * intel_sseu_copy_eumask_to_user - Copy EU mask into a userspace buffer
+ * @to: Pointer to userspace buffer to copy to
+ * @sseu: SSEU structure containing EU mask to copy
+ *
+ * Copies the EU mask to a userspace buffer in the format expected by
+ * the query ioctl's topology queries.
+ *
+ * Returns the result of the copy_to_user() operation.
+ */
+int intel_sseu_copy_eumask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu)
{
- return hweight32(intel_sseu_get_subslices(sseu, slice));
-}
+ u8 eu_mask[GEN_SS_MASK_SIZE * GEN_MAX_EU_STRIDE] = {};
+ int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
+ int len = sseu->max_slices * sseu->max_subslices * eu_stride;
+ int s, ss, i;
-static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
- int subslice)
-{
- int slice_stride = sseu->max_subslices * sseu->eu_stride;
+ for (s = 0; s < sseu->max_slices; s++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ int uapi_offset =
+ s * sseu->max_subslices * eu_stride +
+ ss * eu_stride;
+ u16 mask = sseu_get_eus(sseu, s, ss);
+
+ for (i = 0; i < eu_stride; i++)
+ eu_mask[uapi_offset + i] =
+ (mask >> (BITS_PER_BYTE * i)) & 0xff;
+ }
+ }
- return slice * slice_stride + subslice * sseu->eu_stride;
+ return copy_to_user(to, eu_mask, len);
}
-static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
- int subslice)
+/**
+ * intel_sseu_copy_ssmask_to_user - Copy subslice mask into a userspace buffer
+ * @to: Pointer to userspace buffer to copy to
+ * @sseu: SSEU structure containing subslice mask to copy
+ *
+ * Copies the subslice mask to a userspace buffer in the format expected by
+ * the query ioctl's topology queries.
+ *
+ * Returns the result of the copy_to_user() operation.
+ */
+int intel_sseu_copy_ssmask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu)
{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
- u16 eu_mask = 0;
+ u8 ss_mask[GEN_SS_MASK_SIZE] = {};
+ int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ int len = sseu->max_slices * ss_stride;
+ int s, ss, i;
- for (i = 0; i < sseu->eu_stride; i++)
- eu_mask |=
- ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE);
+ for (s = 0; s < sseu->max_slices; s++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ i = s * ss_stride * BITS_PER_BYTE + ss;
- return eu_mask;
-}
+ if (!intel_sseu_has_subslice(sseu, s, ss))
+ continue;
-static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
- u16 eu_mask)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
+ ss_mask[i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
+ }
+ }
- for (i = 0; i < sseu->eu_stride; i++)
- sseu->eu_mask[offset + i] =
- (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
+ return copy_to_user(to, ss_mask, len);
}
-static u16 compute_eu_total(const struct sseu_dev_info *sseu)
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
+ u32 ss_en, u16 eu_en)
{
- u16 i, total = 0;
+ u32 valid_ss_mask = GENMASK(sseu->max_subslices - 1, 0);
+ int ss;
- for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
- total += hweight8(sseu->eu_mask[i]);
+ sseu->slice_mask |= BIT(0);
+ sseu->subslice_mask.hsw[0] = ss_en & valid_ss_mask;
- return total;
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, 0, ss))
+ sseu_set_eus(sseu, 0, ss, eu_en);
+
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
}
-static u32 get_ss_stride_mask(struct sseu_dev_info *sseu, u8 s, u32 ss_en)
+static void xehp_compute_sseu_info(struct sseu_dev_info *sseu,
+ u16 eu_en)
{
- u32 ss_mask;
+ int ss;
- ss_mask = ss_en >> (s * sseu->max_subslices);
- ss_mask &= GENMASK(sseu->max_subslices - 1, 0);
+ sseu->slice_mask |= BIT(0);
- return ss_mask;
+ bitmap_or(sseu->subslice_mask.xehp,
+ sseu->compute_subslice_mask.xehp,
+ sseu->geometry_subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->subslice_mask));
+
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, 0, ss))
+ sseu_set_eus(sseu, 0, ss, eu_en);
+
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
}
-static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, u8 s_en,
- u32 g_ss_en, u32 c_ss_en, u16 eu_en)
+static void
+xehp_load_dss_mask(struct intel_uncore *uncore,
+ intel_sseu_ss_mask_t *ssmask,
+ int numregs,
+ ...)
{
- int s, ss;
+ va_list argp;
+ u32 fuse_val[I915_MAX_SS_FUSE_REGS] = {};
+ int i;
- /* g_ss_en/c_ss_en represent entire subslice mask across all slices */
- GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
- sizeof(g_ss_en) * BITS_PER_BYTE);
+ if (WARN_ON(numregs > I915_MAX_SS_FUSE_REGS))
+ numregs = I915_MAX_SS_FUSE_REGS;
- for (s = 0; s < sseu->max_slices; s++) {
- if ((s_en & BIT(s)) == 0)
- continue;
+ va_start(argp, numregs);
+ for (i = 0; i < numregs; i++)
+ fuse_val[i] = intel_uncore_read(uncore, va_arg(argp, i915_reg_t));
+ va_end(argp);
- sseu->slice_mask |= BIT(s);
-
- /*
- * XeHP introduces the concept of compute vs geometry DSS. To
- * reduce variation between GENs around subslice usage, store a
- * mask for both the geometry and compute enabled masks since
- * userspace will need to be able to query these masks
- * independently. Also compute a total enabled subslice count
- * for the purposes of selecting subslices to use in a
- * particular GEM context.
- */
- intel_sseu_set_subslices(sseu, s, sseu->compute_subslice_mask,
- get_ss_stride_mask(sseu, s, c_ss_en));
- intel_sseu_set_subslices(sseu, s, sseu->geometry_subslice_mask,
- get_ss_stride_mask(sseu, s, g_ss_en));
- intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
- get_ss_stride_mask(sseu, s,
- g_ss_en | c_ss_en));
+ bitmap_from_arr32(ssmask->xehp, fuse_val, numregs * 32);
+}
- for (ss = 0; ss < sseu->max_subslices; ss++)
- if (intel_sseu_has_subslice(sseu, s, ss))
- sseu_set_eus(sseu, s, ss, eu_en);
+static void xehp_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u16 eu_en = 0;
+ u8 eu_en_fuse;
+ int num_compute_regs, num_geometry_regs;
+ int eu;
+
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ num_geometry_regs = 0;
+ num_compute_regs = 2;
+ } else {
+ num_geometry_regs = 1;
+ num_compute_regs = 1;
}
- sseu->eu_per_subslice = hweight16(eu_en);
- sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * The concept of slice has been removed in Xe_HP. To be compatible
+ * with prior generations, assume a single slice across the entire
+ * device. Then calculate out the DSS for each workload type within
+ * that software slice.
+ */
+ intel_sseu_set_info(sseu, 1,
+ 32 * max(num_geometry_regs, num_compute_regs),
+ HAS_ONE_EU_PER_FUSE_BIT(gt->i915) ? 8 : 16);
+ sseu->has_xehp_dss = 1;
+
+ xehp_load_dss_mask(uncore, &sseu->geometry_subslice_mask,
+ num_geometry_regs,
+ GEN12_GT_GEOMETRY_DSS_ENABLE);
+ xehp_load_dss_mask(uncore, &sseu->compute_subslice_mask,
+ num_compute_regs,
+ GEN12_GT_COMPUTE_DSS_ENABLE,
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT);
+
+ eu_en_fuse = intel_uncore_read(uncore, XEHP_EU_ENABLE) & XEHP_EU_ENA_MASK;
+
+ if (HAS_ONE_EU_PER_FUSE_BIT(gt->i915))
+ eu_en = eu_en_fuse;
+ else
+ for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+ if (eu_en_fuse & BIT(eu))
+ eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+ xehp_compute_sseu_info(sseu, eu_en);
}
static void gen12_sseu_info_init(struct intel_gt *gt)
{
struct sseu_dev_info *sseu = &gt->info.sseu;
struct intel_uncore *uncore = gt->uncore;
- u32 g_dss_en, c_dss_en = 0;
+ u32 g_dss_en;
u16 eu_en = 0;
u8 eu_en_fuse;
u8 s_en;
@@ -183,43 +266,28 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
* Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
* Instead of splitting these, provide userspace with an array
* of DSS to more closely represent the hardware resource.
- *
- * In addition, the concept of slice has been removed in Xe_HP.
- * To be compatible with prior generations, assume a single slice
- * across the entire device. Then calculate out the DSS for each
- * workload type within that software slice.
*/
- if (IS_DG2(gt->i915) || IS_XEHPSDV(gt->i915))
- intel_sseu_set_info(sseu, 1, 32, 16);
- else
- intel_sseu_set_info(sseu, 1, 6, 16);
+ intel_sseu_set_info(sseu, 1, 6, 16);
/*
- * As mentioned above, Xe_HP does not have the concept of a slice.
- * Enable one for software backwards compatibility.
+ * Although gen12 architecture supported multiple slices, TGL, RKL,
+ * DG1, and ADL only had a single slice.
*/
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
- s_en = 0x1;
- else
- s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
- GEN11_GT_S_ENA_MASK;
+ s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+ GEN11_GT_S_ENA_MASK;
+ drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
g_dss_en = intel_uncore_read(uncore, GEN12_GT_GEOMETRY_DSS_ENABLE);
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
- c_dss_en = intel_uncore_read(uncore, GEN12_GT_COMPUTE_DSS_ENABLE);
/* one bit per pair of EUs */
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
- eu_en_fuse = intel_uncore_read(uncore, XEHP_EU_ENABLE) & XEHP_EU_ENA_MASK;
- else
- eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
- GEN11_EU_DIS_MASK);
+ eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+ GEN11_EU_DIS_MASK);
for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
if (eu_en_fuse & BIT(eu))
eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
- gen11_compute_sseu_info(sseu, s_en, g_dss_en, c_dss_en, eu_en);
+ gen11_compute_sseu_info(sseu, g_dss_en, eu_en);
/* TGL only supports slice-level power gating */
sseu->has_slice_pg = 1;
@@ -238,14 +306,20 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
else
intel_sseu_set_info(sseu, 1, 8, 8);
+ /*
+ * Although gen11 architecture supported multiple slices, ICL and
+ * EHL/JSL only had a single slice in practice.
+ */
s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
GEN11_GT_S_ENA_MASK;
+ drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
+
ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE);
eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
GEN11_EU_DIS_MASK);
- gen11_compute_sseu_info(sseu, s_en, ss_en, 0, eu_en);
+ gen11_compute_sseu_info(sseu, ss_en, eu_en);
/* ICL has no power gating restrictions. */
sseu->has_slice_pg = 1;
@@ -257,7 +331,6 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
{
struct sseu_dev_info *sseu = &gt->info.sseu;
u32 fuse;
- u8 subslice_mask = 0;
fuse = intel_uncore_read(gt->uncore, CHV_FUSE_GT);
@@ -271,8 +344,8 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
- subslice_mask |= BIT(0);
- sseu_set_eus(sseu, 0, 0, ~disabled_mask);
+ sseu->subslice_mask.hsw[0] |= BIT(0);
+ sseu_set_eus(sseu, 0, 0, ~disabled_mask & 0xFF);
}
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
@@ -282,12 +355,10 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
- subslice_mask |= BIT(1);
- sseu_set_eus(sseu, 0, 1, ~disabled_mask);
+ sseu->subslice_mask.hsw[0] |= BIT(1);
+ sseu_set_eus(sseu, 0, 1, ~disabled_mask & 0xFF);
}
- intel_sseu_set_subslices(sseu, 0, sseu->subslice_mask, subslice_mask);
-
sseu->eu_total = compute_eu_total(sseu);
/*
@@ -342,8 +413,7 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
/* skip disabled slice */
continue;
- intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
- subslice_mask);
+ sseu->subslice_mask.hsw[s] = subslice_mask;
eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
@@ -356,7 +426,7 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
- sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask & eu_mask);
eu_per_ss = sseu->max_eus_per_subslice -
hweight8(eu_disabled_mask);
@@ -400,8 +470,8 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_GEN9_LP(i915)) {
-#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
- info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
+#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask.hsw[0] & BIT(ss)))
+ info->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3;
sseu->min_eu_in_pool = 0;
if (info->has_pooled_eu) {
@@ -455,8 +525,7 @@ static void bdw_sseu_info_init(struct intel_gt *gt)
/* skip disabled slice */
continue;
- intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
- subslice_mask);
+ sseu->subslice_mask.hsw[s] = subslice_mask;
for (ss = 0; ss < sseu->max_subslices; ss++) {
u8 eu_disabled_mask;
@@ -469,7 +538,7 @@ static void bdw_sseu_info_init(struct intel_gt *gt)
eu_disabled_mask =
eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
- sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask & 0xFF);
n_disabled = hweight8(eu_disabled_mask);
@@ -553,8 +622,7 @@ static void hsw_sseu_info_init(struct intel_gt *gt)
sseu->eu_per_subslice);
for (s = 0; s < sseu->max_slices; s++) {
- intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
- subslice_mask);
+ sseu->subslice_mask.hsw[s] = subslice_mask;
for (ss = 0; ss < sseu->max_subslices; ss++) {
sseu_set_eus(sseu, s, ss,
@@ -574,18 +642,20 @@ void intel_sseu_info_init(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
- if (IS_HASWELL(i915))
- hsw_sseu_info_init(gt);
- else if (IS_CHERRYVIEW(i915))
- cherryview_sseu_info_init(gt);
- else if (IS_BROADWELL(i915))
- bdw_sseu_info_init(gt);
- else if (GRAPHICS_VER(i915) == 9)
- gen9_sseu_info_init(gt);
- else if (GRAPHICS_VER(i915) == 11)
- gen11_sseu_info_init(gt);
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ xehp_sseu_info_init(gt);
else if (GRAPHICS_VER(i915) >= 12)
gen12_sseu_info_init(gt);
+ else if (GRAPHICS_VER(i915) >= 11)
+ gen11_sseu_info_init(gt);
+ else if (GRAPHICS_VER(i915) >= 9)
+ gen9_sseu_info_init(gt);
+ else if (IS_BROADWELL(i915))
+ bdw_sseu_info_init(gt);
+ else if (IS_CHERRYVIEW(i915))
+ cherryview_sseu_info_init(gt);
+ else if (IS_HASWELL(i915))
+ hsw_sseu_info_init(gt);
}
u32 intel_sseu_make_rpcs(struct intel_gt *gt,
@@ -641,7 +711,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
*/
if (GRAPHICS_VER(i915) == 11 &&
slices == 1 &&
- subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
+ subslices > min_t(u8, 4, hweight8(sseu->subslice_mask.hsw[0]) / 2)) {
GEM_BUG_ON(subslices & 1);
subslice_pg = false;
@@ -707,14 +777,29 @@ void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
int s;
- drm_printf(p, "slice total: %u, mask=%04x\n",
- hweight8(sseu->slice_mask), sseu->slice_mask);
- drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
- for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
- s, intel_sseu_subslices_per_slice(sseu, s),
- intel_sseu_get_subslices(sseu, s));
+ if (sseu->has_xehp_dss) {
+ drm_printf(p, "subslice total: %u\n",
+ intel_sseu_subslice_total(sseu));
+ drm_printf(p, "geometry dss mask=%*pb\n",
+ XEHP_BITMAP_BITS(sseu->geometry_subslice_mask),
+ sseu->geometry_subslice_mask.xehp);
+ drm_printf(p, "compute dss mask=%*pb\n",
+ XEHP_BITMAP_BITS(sseu->compute_subslice_mask),
+ sseu->compute_subslice_mask.xehp);
+ } else {
+ drm_printf(p, "slice total: %u, mask=%04x\n",
+ hweight8(sseu->slice_mask), sseu->slice_mask);
+ drm_printf(p, "subslice total: %u\n",
+ intel_sseu_subslice_total(sseu));
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ u8 ss_mask = sseu->subslice_mask.hsw[s];
+
+ drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
+ s, hweight8(ss_mask), ss_mask);
+ }
}
+
drm_printf(p, "EU total: %u\n", sseu->eu_total);
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
drm_printf(p, "has slice power gating: %s\n",
@@ -731,9 +816,10 @@ static void sseu_print_hsw_topology(const struct sseu_dev_info *sseu,
int s, ss;
for (s = 0; s < sseu->max_slices; s++) {
+ u8 ss_mask = sseu->subslice_mask.hsw[s];
+
drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
- s, intel_sseu_subslices_per_slice(sseu, s),
- intel_sseu_get_subslices(sseu, s));
+ s, hweight8(ss_mask), ss_mask);
for (ss = 0; ss < sseu->max_subslices; ss++) {
u16 enabled_eus = sseu_get_eus(sseu, s, ss);
@@ -747,16 +833,14 @@ static void sseu_print_hsw_topology(const struct sseu_dev_info *sseu,
static void sseu_print_xehp_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
- u32 g_dss_mask = sseu_get_geometry_subslices(sseu);
- u32 c_dss_mask = intel_sseu_get_compute_subslices(sseu);
int dss;
for (dss = 0; dss < sseu->max_subslices; dss++) {
u16 enabled_eus = sseu_get_eus(sseu, 0, dss);
drm_printf(p, "DSS_%02d: G:%3s C:%3s, %2u EUs (0x%04hx)\n", dss,
- str_yes_no(g_dss_mask & BIT(dss)),
- str_yes_no(c_dss_mask & BIT(dss)),
+ str_yes_no(test_bit(dss, sseu->geometry_subslice_mask.xehp)),
+ str_yes_no(test_bit(dss, sseu->compute_subslice_mask.xehp)),
hweight16(enabled_eus), enabled_eus);
}
}
@@ -774,20 +858,44 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
}
}
-u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice)
+void intel_sseu_print_ss_info(const char *type,
+ const struct sseu_dev_info *sseu,
+ struct seq_file *m)
{
- u16 slice_mask = 0;
+ int s;
+
+ if (sseu->has_xehp_dss) {
+ seq_printf(m, " %s Geometry DSS: %u\n", type,
+ bitmap_weight(sseu->geometry_subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->geometry_subslice_mask)));
+ seq_printf(m, " %s Compute DSS: %u\n", type,
+ bitmap_weight(sseu->compute_subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->compute_subslice_mask)));
+ } else {
+ for (s = 0; s < fls(sseu->slice_mask); s++)
+ seq_printf(m, " %s Slice%i subslices: %u\n", type,
+ s, hweight8(sseu->subslice_mask.hsw[s]));
+ }
+}
+
+u16 intel_slicemask_from_xehp_dssmask(intel_sseu_ss_mask_t dss_mask,
+ int dss_per_slice)
+{
+ intel_sseu_ss_mask_t per_slice_mask = {};
+ unsigned long slice_mask = 0;
int i;
- WARN_ON(sizeof(dss_mask) * 8 / dss_per_slice > 8 * sizeof(slice_mask));
+ WARN_ON(DIV_ROUND_UP(XEHP_BITMAP_BITS(dss_mask), dss_per_slice) >
+ 8 * sizeof(slice_mask));
- for (i = 0; dss_mask; i++) {
- if (dss_mask & GENMASK(dss_per_slice - 1, 0))
+ bitmap_fill(per_slice_mask.xehp, dss_per_slice);
+ for (i = 0; !bitmap_empty(dss_mask.xehp, XEHP_BITMAP_BITS(dss_mask)); i++) {
+ if (bitmap_intersects(dss_mask.xehp, per_slice_mask.xehp, dss_per_slice))
slice_mask |= BIT(i);
- dss_mask >>= dss_per_slice;
+ bitmap_shift_right(dss_mask.xehp, dss_mask.xehp, dss_per_slice,
+ XEHP_BITMAP_BITS(dss_mask));
}
return slice_mask;
}
-
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index 5c078df4729c..aa87d3832d60 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -25,12 +25,16 @@ struct drm_printer;
/*
* Maximum number of subslices that can exist within a HSW-style slice. This
* is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
- * GEN_MAX_DSS value below).
+ * I915_MAX_SS_FUSE_BITS value below).
*/
#define GEN_MAX_SS_PER_HSW_SLICE 6
-/* Maximum number of DSS on newer platforms (Xe_HP and beyond). */
-#define GEN_MAX_DSS 32
+/*
+ * Maximum number of 32-bit registers used by hardware to express the
+ * enabled/disabled subslices.
+ */
+#define I915_MAX_SS_FUSE_REGS 2
+#define I915_MAX_SS_FUSE_BITS (I915_MAX_SS_FUSE_REGS * 32)
/* Maximum number of EUs that can exist within a subslice or DSS. */
#define GEN_MAX_EUS_PER_SS 16
@@ -38,7 +42,7 @@ struct drm_printer;
#define SSEU_MAX(a, b) ((a) > (b) ? (a) : (b))
/* The maximum number of bits needed to express each subslice/DSS independently */
-#define GEN_SS_MASK_SIZE SSEU_MAX(GEN_MAX_DSS, \
+#define GEN_SS_MASK_SIZE SSEU_MAX(I915_MAX_SS_FUSE_BITS, \
GEN_MAX_HSW_SLICES * GEN_MAX_SS_PER_HSW_SLICE)
#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
@@ -49,15 +53,28 @@ struct drm_printer;
#define GEN_DSS_PER_CSLICE 8
#define GEN_DSS_PER_MSLICE 8
-#define GEN_MAX_GSLICES (GEN_MAX_DSS / GEN_DSS_PER_GSLICE)
-#define GEN_MAX_CSLICES (GEN_MAX_DSS / GEN_DSS_PER_CSLICE)
+#define GEN_MAX_GSLICES (I915_MAX_SS_FUSE_BITS / GEN_DSS_PER_GSLICE)
+#define GEN_MAX_CSLICES (I915_MAX_SS_FUSE_BITS / GEN_DSS_PER_CSLICE)
+
+typedef union {
+ u8 hsw[GEN_MAX_HSW_SLICES];
+
+ /* Bitmap compatible with linux/bitmap.h; may exceed size of u64 */
+ unsigned long xehp[BITS_TO_LONGS(I915_MAX_SS_FUSE_BITS)];
+} intel_sseu_ss_mask_t;
+
+#define XEHP_BITMAP_BITS(mask) ((int)BITS_PER_TYPE(typeof(mask.xehp)))
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_SS_MASK_SIZE];
- u8 geometry_subslice_mask[GEN_SS_MASK_SIZE];
- u8 compute_subslice_mask[GEN_SS_MASK_SIZE];
- u8 eu_mask[GEN_SS_MASK_SIZE * GEN_MAX_EU_STRIDE];
+ intel_sseu_ss_mask_t subslice_mask;
+ intel_sseu_ss_mask_t geometry_subslice_mask;
+ intel_sseu_ss_mask_t compute_subslice_mask;
+ union {
+ u16 hsw[GEN_MAX_HSW_SLICES][GEN_MAX_SS_PER_HSW_SLICE];
+ u16 xehp[I915_MAX_SS_FUSE_BITS];
+ } eu_mask;
+
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -66,14 +83,16 @@ struct sseu_dev_info {
u8 has_slice_pg:1;
u8 has_subslice_pg:1;
u8 has_eu_pg:1;
+ /*
+ * For Xe_HP and beyond, the hardware no longer has traditional slices
+ * so we just report the entire DSS pool under a fake "slice 0."
+ */
+ u8 has_xehp_dss:1;
/* Topology fields */
u8 max_slices;
u8 max_subslices;
u8 max_eus_per_subslice;
-
- u8 ss_stride;
- u8 eu_stride;
};
/*
@@ -91,7 +110,7 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
{
struct intel_sseu value = {
.slice_mask = sseu->slice_mask,
- .subslice_mask = sseu->subslice_mask[0],
+ .subslice_mask = sseu->subslice_mask.hsw[0],
.min_eus_per_subslice = sseu->max_eus_per_subslice,
.max_eus_per_subslice = sseu->max_eus_per_subslice,
};
@@ -103,18 +122,28 @@ static inline bool
intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
int subslice)
{
- u8 mask;
- int ss_idx = subslice / BITS_PER_BYTE;
-
if (slice >= sseu->max_slices ||
subslice >= sseu->max_subslices)
return false;
- GEM_BUG_ON(ss_idx >= sseu->ss_stride);
-
- mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx];
+ if (sseu->has_xehp_dss)
+ return test_bit(subslice, sseu->subslice_mask.xehp);
+ else
+ return sseu->subslice_mask.hsw[slice] & BIT(subslice);
+}
- return mask & BIT(subslice % BITS_PER_BYTE);
+/*
+ * Used to obtain the index of the first DSS. Can start searching from the
+ * beginning of a specific dss group (e.g., gslice, cslice, etc.) if
+ * groupsize and groupnum are non-zero.
+ */
+static inline unsigned int
+intel_sseu_find_first_xehp_dss(const struct sseu_dev_info *sseu, int groupsize,
+ int groupnum)
+{
+ return find_next_bit(sseu->subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->subslice_mask),
+ groupnum * groupsize);
}
void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
@@ -124,14 +153,10 @@ unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
unsigned int
-intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
+intel_sseu_get_hsw_subslices(const struct sseu_dev_info *sseu, u8 slice);
-u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
-
-u32 intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu);
-
-void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
- u8 *subslice_mask, u32 ss_mask);
+intel_sseu_ss_mask_t
+intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu);
void intel_sseu_info_init(struct intel_gt *gt);
@@ -143,6 +168,15 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
const struct sseu_dev_info *sseu,
struct drm_printer *p);
-u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice);
+u16 intel_slicemask_from_xehp_dssmask(intel_sseu_ss_mask_t dss_mask, int dss_per_slice);
+
+int intel_sseu_copy_eumask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu);
+int intel_sseu_copy_ssmask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu);
+
+void intel_sseu_print_ss_info(const char *type,
+ const struct sseu_dev_info *sseu,
+ struct seq_file *m);
#endif /* __INTEL_SSEU_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
index 2d5d011e01db..c2ee5e1826b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -4,6 +4,7 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/bitmap.h>
#include <linux/string_helpers.h>
#include "i915_drv.h"
@@ -11,14 +12,6 @@
#include "intel_gt_regs.h"
#include "intel_sseu_debugfs.h"
-static void sseu_copy_subslices(const struct sseu_dev_info *sseu,
- int slice, u8 *to_mask)
-{
- int offset = slice * sseu->ss_stride;
-
- memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
-}
-
static void cherryview_sseu_device_status(struct intel_gt *gt,
struct sseu_dev_info *sseu)
{
@@ -41,7 +34,7 @@ static void cherryview_sseu_device_status(struct intel_gt *gt,
continue;
sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] |= BIT(ss);
+ sseu->subslice_mask.hsw[0] |= BIT(ss);
eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
@@ -92,7 +85,7 @@ static void gen11_sseu_device_status(struct intel_gt *gt,
continue;
sseu->slice_mask |= BIT(s);
- sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
+ sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
@@ -147,21 +140,17 @@ static void gen9_sseu_device_status(struct intel_gt *gt,
sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(gt->i915))
- sseu_copy_subslices(&info->sseu, s,
- sseu->subslice_mask);
+ sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
- u8 ss_idx = s * info->sseu.ss_stride +
- ss / BITS_PER_BYTE;
if (IS_GEN9_LP(gt->i915)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
- sseu->subslice_mask[ss_idx] |=
- BIT(ss % BITS_PER_BYTE);
+ sseu->subslice_mask.hsw[s] |= BIT(ss);
}
eu_cnt = eu_reg[2 * s + ss / 2] & eu_mask[ss % 2];
@@ -188,8 +177,7 @@ static void bdw_sseu_device_status(struct intel_gt *gt,
if (sseu->slice_mask) {
sseu->eu_per_subslice = info->sseu.eu_per_subslice;
for (s = 0; s < fls(sseu->slice_mask); s++)
- sseu_copy_subslices(&info->sseu, s,
- sseu->subslice_mask);
+ sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
sseu->eu_total = sseu->eu_per_subslice *
intel_sseu_subslice_total(sseu);
@@ -208,7 +196,6 @@ static void i915_print_sseu_info(struct seq_file *m,
const struct sseu_dev_info *sseu)
{
const char *type = is_available_info ? "Available" : "Enabled";
- int s;
seq_printf(m, " %s Slice Mask: %04x\n", type,
sseu->slice_mask);
@@ -216,10 +203,7 @@ static void i915_print_sseu_info(struct seq_file *m,
hweight8(sseu->slice_mask));
seq_printf(m, " %s Subslice Total: %u\n", type,
intel_sseu_subslice_total(sseu));
- for (s = 0; s < fls(sseu->slice_mask); s++) {
- seq_printf(m, " %s Slice%i subslices: %u\n", type,
- s, intel_sseu_subslices_per_slice(sseu, s));
- }
+ intel_sseu_print_ss_info(type, sseu, m);
seq_printf(m, " %s EU Total: %u\n", type,
sseu->eu_total);
seq_printf(m, " %s EU Per Subslice: %u\n", type,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index a05c4b99b3fb..3213c593a55f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -9,6 +9,7 @@
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_regs.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
@@ -776,7 +777,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
goto done;
- if (IS_DG2(i915))
+ if (IS_PONTEVECCHIO(i915))
+ ; /* noop; none at this time */
+ else if (IS_DG2(i915))
dg2_ctx_workarounds_init(engine, wal);
else if (IS_XEHPSDV(i915))
; /* noop; none at this time */
@@ -948,8 +951,8 @@ gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* on s/ss combo, the read should be done with read_subslice_reg.
*/
slice = ffs(sseu->slice_mask) - 1;
- GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
- subslice = ffs(intel_sseu_get_subslices(sseu, slice));
+ GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
+ subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
GEM_BUG_ON(!subslice);
subslice--;
@@ -1080,18 +1083,17 @@ static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
gt->default_steering.instanceid = subslice;
if (drm_debug_enabled(DRM_UT_DRIVER))
- intel_gt_report_steering(&p, gt, false);
+ intel_gt_mcr_report_steering(&p, gt, false);
}
static void
icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = &gt->info.sseu;
- unsigned int slice, subslice;
+ unsigned int subslice;
GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
- slice = 0;
/*
* Although a platform may have subslices, we need to always steer
@@ -1102,7 +1104,7 @@ icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
* one of the higher subslices, we run the risk of reading back 0's or
* random garbage.
*/
- subslice = __ffs(intel_sseu_get_subslices(sseu, slice));
+ subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
/*
* If the subslice we picked above also steers us to a valid L3 bank,
@@ -1112,7 +1114,7 @@ icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
if (gt->info.l3bank_mask & BIT(subslice))
gt->steering_table[L3BANK] = NULL;
- __add_mcr_wa(gt, wal, slice, subslice);
+ __add_mcr_wa(gt, wal, 0, subslice);
}
static void
@@ -1120,7 +1122,6 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = &gt->info.sseu;
unsigned long slice, subslice = 0, slice_mask = 0;
- u64 dss_mask = 0;
u32 lncf_mask = 0;
int i;
@@ -1151,8 +1152,8 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
*/
/* Find the potential gslice candidates */
- dss_mask = intel_sseu_get_subslices(sseu, 0);
- slice_mask = intel_slicemask_from_dssmask(dss_mask, GEN_DSS_PER_GSLICE);
+ slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
+ GEN_DSS_PER_GSLICE);
/*
* Find the potential LNCF candidates. Either LNCF within a valid
@@ -1177,9 +1178,8 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
}
slice = __ffs(slice_mask);
- subslice = __ffs(dss_mask >> (slice * GEN_DSS_PER_GSLICE));
- WARN_ON(subslice > GEN_DSS_PER_GSLICE);
- WARN_ON(dss_mask >> (slice * GEN_DSS_PER_GSLICE) == 0);
+ subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
+ GEN_DSS_PER_GSLICE;
__add_mcr_wa(gt, wal, slice, subslice);
@@ -1197,6 +1197,20 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
+pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ unsigned int dss;
+
+ /*
+ * Setup implicit steering for COMPUTE and DSS ranges to the first
+ * non-fused-off DSS. All other types of MCR registers will be
+ * explicitly steered.
+ */
+ dss = intel_sseu_find_first_xehp_dss(&gt->info.sseu, 0, 0);
+ __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
+}
+
+static void
icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
@@ -1487,6 +1501,18 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
* performance guide section.
*/
wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS);
+
+ /* Wa_14015795083 */
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+}
+
+static void
+pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ pvc_init_mcr(gt, wal);
+
+ /* Wa_14015795083 */
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
}
static void
@@ -1494,7 +1520,9 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
- if (IS_DG2(i915))
+ if (IS_PONTEVECCHIO(i915))
+ pvc_gt_workarounds_init(gt, wal);
+ else if (IS_DG2(i915))
dg2_gt_workarounds_init(gt, wal);
else if (IS_XEHPSDV(i915))
xehpsdv_gt_workarounds_init(gt, wal);
@@ -1596,13 +1624,13 @@ wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal)
u32 val, old = 0;
/* open-coded rmw due to steering */
- old = wa->clr ? intel_gt_read_register_fw(gt, wa->reg) : 0;
+ old = wa->clr ? intel_gt_mcr_read_any_fw(gt, wa->reg) : 0;
val = (old & ~wa->clr) | wa->set;
if (val != old || !wa->clr)
intel_uncore_write_fw(uncore, wa->reg, val);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- wa_verify(wa, intel_gt_read_register_fw(gt, wa->reg),
+ wa_verify(wa, intel_gt_mcr_read_any_fw(gt, wa->reg),
wal->name, "application");
}
@@ -1633,7 +1661,7 @@ static bool wa_list_verify(struct intel_gt *gt,
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
ok &= wa_verify(wa,
- intel_gt_read_register_fw(gt, wa->reg),
+ intel_gt_mcr_read_any_fw(gt, wa->reg),
wal->name, from);
intel_uncore_forcewake_put__locked(uncore, fw);
@@ -1924,6 +1952,32 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
}
}
+static void blacklist_trtt(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ /*
+ * Prevent read/write access to [0x4400, 0x4600) which covers
+ * the TRTT range across all engines. Note that normally userspace
+ * cannot access the other engines' trtt control, but for simplicity
+ * we cover the entire range on each engine.
+ */
+ whitelist_reg_ext(w, _MMIO(0x4400),
+ RING_FORCE_TO_NONPRIV_DENY |
+ RING_FORCE_TO_NONPRIV_RANGE_64);
+ whitelist_reg_ext(w, _MMIO(0x4500),
+ RING_FORCE_TO_NONPRIV_DENY |
+ RING_FORCE_TO_NONPRIV_RANGE_64);
+}
+
+static void pvc_whitelist_build(struct intel_engine_cs *engine)
+{
+ allow_read_ctx_timestamp(engine);
+
+ /* Wa_16014440446:pvc */
+ blacklist_trtt(engine);
+}
+
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -1931,7 +1985,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
wa_init_start(w, "whitelist", engine->name);
- if (IS_DG2(i915))
+ if (IS_PONTEVECCHIO(i915))
+ pvc_whitelist_build(engine);
+ else if (IS_DG2(i915))
dg2_whitelist_build(engine);
else if (IS_XEHPSDV(i915))
xehpsdv_whitelist_build(engine);
@@ -1994,27 +2050,44 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
static void
engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
- u8 mocs;
+ u8 mocs_w, mocs_r;
/*
- * RING_CMD_CCTL are need to be programed to un-cached
- * for memory writes and reads outputted by Command
- * Streamers on Gen12 onward platforms.
+ * RING_CMD_CCTL specifies the default MOCS entry that will be used
+ * by the command streamer when executing commands that don't have
+ * a way to explicitly specify a MOCS setting. The default should
+ * usually reference whichever MOCS entry corresponds to uncached
+ * behavior, although use of a WB cached entry is recommended by the
+ * spec in certain circumstances on specific platforms.
*/
if (GRAPHICS_VER(engine->i915) >= 12) {
- mocs = engine->gt->mocs.uc_index;
+ mocs_r = engine->gt->mocs.uc_index;
+ mocs_w = engine->gt->mocs.uc_index;
+
+ if (HAS_L3_CCS_READ(engine->i915) &&
+ engine->class == COMPUTE_CLASS) {
+ mocs_r = engine->gt->mocs.wb_index;
+
+ /*
+ * Even on the few platforms where MOCS 0 is a
+ * legitimate table entry, it's never the correct
+ * setting to use here; we can assume the MOCS init
+ * just forgot to initialize wb_index.
+ */
+ drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
+ }
+
wa_masked_field_set(wal,
RING_CMD_CCTL(engine->mmio_base),
CMD_CCTL_MOCS_MASK,
- CMD_CCTL_MOCS_OVERRIDE(mocs, mocs));
+ CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
}
}
static bool needs_wa_1308578152(struct intel_engine_cs *engine)
{
- u64 dss_mask = intel_sseu_get_subslices(&engine->gt->info.sseu, 0);
-
- return (dss_mask & GENMASK(GEN_DSS_PER_GSLICE - 1, 0)) == 0;
+ return intel_sseu_find_first_xehp_dss(&engine->gt->info.sseu, 0, 0) >=
+ GEN_DSS_PER_GSLICE;
}
static void
@@ -2023,9 +2096,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
if (IS_DG2(i915)) {
- /* Wa_14015227452:dg2 */
- wa_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
-
/* Wa_1509235366:dg2 */
wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
GLOBAL_INVALIDATION_MODE);
@@ -2036,12 +2106,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* performance guide section.
*/
wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
-
- /* Wa_18018781329:dg2 */
- wa_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
- wa_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
- wa_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
- wa_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
}
if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
@@ -2160,6 +2224,16 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB);
}
+ if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G10(i915)) {
+ /* Wa_22014600077:dg2 */
+ wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
+ 0 /* Wa_14012342262 :write-only reg, so skip
+ verification */,
+ true);
+ }
+
if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
/*
@@ -2583,6 +2657,15 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
}
+static void
+ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
+ /* Wa_14014999345:pvc */
+ wa_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
+ }
+}
+
/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
@@ -2597,6 +2680,15 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
{
struct drm_i915_private *i915 = engine->i915;
+ if (IS_PONTEVECCHIO(i915)) {
+ /*
+ * The following is not actually a "workaround" but rather
+ * a recommended tuning setting documented in the bspec's
+ * performance guide section.
+ */
+ wa_write(wal, XEHPC_L3SCRUB, SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ }
+
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_masked_en(wal,
@@ -2629,9 +2721,21 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
GLOBAL_INVALIDATION_MODE);
}
- if (IS_DG2(i915)) {
- /* Wa_22014226127:dg2 */
+ if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
+ /* Wa_14015227452:dg2,pvc */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
+
+ /* Wa_22014226127:dg2,pvc */
wa_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
+
+ /* Wa_16015675438:dg2,pvc */
+ wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
+
+ /* Wa_18018781329:dg2,pvc */
+ wa_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
}
}
@@ -2651,7 +2755,9 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
general_render_compute_wa_init(engine, wal);
- if (engine->class == RENDER_CLASS)
+ if (engine->class == COMPUTE_CLASS)
+ ccs_engine_wa_init(engine, wal);
+ else if (engine->class == RENDER_CLASS)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 83ff4c2e57c5..6493265d5f64 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -976,6 +976,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
{
struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine, *other;
+ struct active_engine *threads;
enum intel_engine_id id, tmp;
struct hang h;
int err = 0;
@@ -996,8 +997,11 @@ static int __igt_reset_engines(struct intel_gt *gt,
h.ctx->sched.priority = 1024;
}
+ threads = kmalloc_array(I915_NUM_ENGINES, sizeof(*threads), GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
for_each_engine(engine, gt, id) {
- struct active_engine threads[I915_NUM_ENGINES] = {};
unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
bool using_guc = intel_engine_uses_guc(engine);
@@ -1016,7 +1020,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
break;
}
- memset(threads, 0, sizeof(threads));
+ memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES);
for_each_engine(other, gt, tmp) {
struct task_struct *tsk;
@@ -1236,6 +1240,7 @@ unwind:
break;
}
}
+ kfree(threads);
if (intel_gt_is_wedged(gt))
err = -EIO;
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index 2cd184ab32b1..cfd736d88939 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -31,7 +31,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
val = gpu_freq;
- if (snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ if (snb_pcode_read(llc_to_gt(llc)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&val, NULL)) {
pr_err("Failed to read freq table[%d], range [%d, %d]\n",
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 6a69ac0184ad..cfb4708dd62e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -521,7 +521,7 @@ static void show_pcu_config(struct intel_rps *rps)
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
int ia_freq = gpu_freq;
- snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
pr_info("%5d %5d %5d\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
index 62cb4254a77a..4c840a2639dc 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -122,6 +122,12 @@ enum slpc_param_id {
SLPC_MAX_PARAM = 32,
};
+enum slpc_media_ratio_mode {
+ SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL = 0,
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE = 1,
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO = 2,
+};
+
enum slpc_event_id {
SLPC_EVENT_RESET = 0,
SLPC_EVENT_SHUTDOWN = 1,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2c4ad4a65089..2706a8c65090 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -310,8 +310,8 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
if (IS_DG2(gt->i915))
flags |= GUC_WA_DUAL_QUEUE;
- /* Wa_22011802037: graphics version 12 */
- if (GRAPHICS_VER(gt->i915) == 12)
+ /* Wa_22011802037: graphics version 11/12 */
+ if (IS_GRAPHICS_VER(gt->i915, 11, 12))
flags |= GUC_WA_PRE_PARSER;
/* Wa_16011777198:dg2 */
@@ -327,6 +327,10 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_FOREVER))
flags |= GUC_WA_CONTEXT_ISOLATION;
+ /* Wa_16015675438 */
+ if (!RCS_MASK(gt))
+ flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
+
return flags;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 3f3373f68123..d0d99f178f2d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -230,6 +230,14 @@ struct intel_guc {
* @shift: Right shift value for the gpm timestamp
*/
u32 shift;
+
+ /**
+ * @last_stat_jiffies: jiffies at last actual stats collection time
+ * We use this timestamp to ensure we don't oversample the
+ * stats because runtime power management events can trigger
+ * stats collection at much higher rates than required.
+ */
+ unsigned long last_stat_jiffies;
} timestamp;
#ifdef CONFIG_DRM_I915_SELFTEST
@@ -443,7 +451,7 @@ int intel_guc_global_policies_update(struct intel_guc *guc);
void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
void intel_guc_submission_reset_prepare(struct intel_guc *guc);
-void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
+void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled);
void intel_guc_submission_reset_finish(struct intel_guc *guc);
void intel_guc_submission_cancel_requests(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 3eabf4cf8eec..ba7541f3ca61 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -7,6 +7,7 @@
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_lrc.h"
#include "gt/shmem_utils.h"
@@ -313,7 +314,7 @@ static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
* tracking, it is easier to just program the default steering for all
* regs that don't need a non-default one.
*/
- intel_gt_get_valid_steering_for_reg(gt, reg, &group, &inst);
+ intel_gt_mcr_get_nonterminated_steering(gt, reg, &group, &inst);
entry.flags |= GUC_REGSET_STEERING(group, inst);
slot = __mmio_reg_add(regset, &entry);
@@ -457,7 +458,7 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
{
info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], RCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS], CCS_MASK(gt));
- info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], 1);
+ info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], BCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_VIDEO_CLASS], VDBOX_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS], VEBOX_MASK(gt));
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index c4e25966d3e9..97a32e610c30 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -420,72 +420,6 @@ guc_capture_get_device_reglist(struct intel_guc *guc)
return default_lists;
}
-static const char *
-__stringify_owner(u32 owner)
-{
- switch (owner) {
- case GUC_CAPTURE_LIST_INDEX_PF:
- return "PF";
- case GUC_CAPTURE_LIST_INDEX_VF:
- return "VF";
- default:
- return "unknown";
- }
-
- return "";
-}
-
-static const char *
-__stringify_type(u32 type)
-{
- switch (type) {
- case GUC_CAPTURE_LIST_TYPE_GLOBAL:
- return "Global";
- case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
- return "Class";
- case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
- return "Instance";
- default:
- return "unknown";
- }
-
- return "";
-}
-
-static const char *
-__stringify_engclass(u32 class)
-{
- switch (class) {
- case GUC_RENDER_CLASS:
- return "Render";
- case GUC_VIDEO_CLASS:
- return "Video";
- case GUC_VIDEOENHANCE_CLASS:
- return "VideoEnhance";
- case GUC_BLITTER_CLASS:
- return "Blitter";
- case GUC_COMPUTE_CLASS:
- return "Compute";
- default:
- return "unknown";
- }
-
- return "";
-}
-
-static void
-guc_capture_warn_with_list_info(struct drm_i915_private *i915, char *msg,
- u32 owner, u32 type, u32 classid)
-{
- if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
- drm_dbg(&i915->drm, "GuC-capture: %s for %s %s-Registers.\n", msg,
- __stringify_owner(owner), __stringify_type(type));
- else
- drm_dbg(&i915->drm, "GuC-capture: %s for %s %s-Registers on %s-Engine\n", msg,
- __stringify_owner(owner), __stringify_type(type),
- __stringify_engclass(classid));
-}
-
static int
guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
struct guc_mmio_reg *ptr, u16 num_entries)
@@ -501,11 +435,8 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
return -ENODEV;
match = guc_capture_get_one_list(reglists, owner, type, classid);
- if (!match) {
- guc_capture_warn_with_list_info(i915, "Missing register list init", owner, type,
- classid);
+ if (!match)
return -ENODATA;
- }
for (i = 0; i < num_entries && i < match->num_regs; ++i) {
ptr[i].offset = match->list[i].reg.reg;
@@ -556,7 +487,6 @@ int
intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
size_t *size)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct intel_guc_state_capture *gc = guc->capture;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
int num_regs;
@@ -570,11 +500,8 @@ intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 cl
}
num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
- if (!num_regs) {
- guc_capture_warn_with_list_info(i915, "Missing register list size",
- owner, type, classid);
+ if (!num_regs)
return -ENODATA;
- }
*size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
(num_regs * sizeof(struct guc_mmio_reg)));
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 42cb7a9a6199..b3c9a9327f76 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -105,6 +105,7 @@
#define GUC_WA_PRE_PARSER BIT(14)
#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
#define GUC_WA_POLLCS BIT(18)
+#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index 79c66b6b51a3..4781fccc2687 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -94,9 +94,9 @@ static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig
static bool has_table(struct drm_i915_private *i915)
{
- if (IS_ALDERLAKE_P(i915))
+ if (IS_ALDERLAKE_P(i915) && !IS_ADLP_N(i915))
return true;
- if (IS_DG2(i915))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return true;
return false;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 78d2989fe917..02311ad90264 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -588,7 +588,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
/*
* We require SSE 4.1 for fast reads from the GuC log buffer and
* it should be present on the chipsets supporting GuC based
- * submisssions.
+ * submissions.
*/
if (!i915_has_memcpy_from_wc()) {
ret = -ENXIO;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index e00661fb0853..8f8dd05835c5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -49,7 +49,6 @@ static int guc_action_control_gucrc(struct intel_guc *guc, bool enable)
static int __guc_rc_control(struct intel_guc *guc, bool enable)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_device *drm = &guc_to_gt(guc)->i915->drm;
int ret;
if (!intel_uc_uses_guc_rc(&gt->uc))
@@ -60,8 +59,8 @@ static int __guc_rc_control(struct intel_guc *guc, bool enable)
ret = guc_action_control_gucrc(guc, enable);
if (ret) {
- drm_err(drm, "Failed to %s GuC RC (%pe)\n",
- str_enable_disable(enable), ERR_PTR(ret));
+ i915_probe_error(guc_to_gt(guc)->i915, "Failed to %s GuC RC (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index ad570fa002a6..8dc063f087eb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -96,6 +96,7 @@
#define GUC_SHIM_CONTROL2 _MMIO(0xc068)
#define GUC_IS_PRIVILEGED (1<<29)
+#define GSC_LOADS_HUC (1<<30)
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 1db833da42df..ec9c4ca0f615 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -98,6 +98,30 @@ static u32 slpc_get_state(struct intel_guc_slpc *slpc)
return data->header.global_state;
}
+static int guc_action_slpc_set_param_nb(struct intel_guc *guc, u8 id, u32 value)
+{
+ u32 request[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
+ id,
+ value,
+ };
+ int ret;
+
+ ret = intel_guc_send_nb(guc, request, ARRAY_SIZE(request), 0);
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value)
+{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+
+ GEM_BUG_ON(id >= SLPC_MAX_PARAM);
+
+ return guc_action_slpc_set_param_nb(guc, id, value);
+}
+
static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
{
u32 request[] = {
@@ -208,12 +232,14 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- ret = slpc_set_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
- freq);
+ /* Non-blocking request will avoid stalls */
+ ret = slpc_set_param_nb(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ freq);
if (ret)
- i915_probe_error(i915, "Unable to force min freq to %u: %d",
- freq, ret);
+ drm_notice(&i915->drm,
+ "Failed to send set_param for min freq(%d): (%d)\n",
+ freq, ret);
}
return ret;
@@ -222,6 +248,7 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
static void slpc_boost_work(struct work_struct *work)
{
struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
+ int err;
/*
* Raise min freq to boost. It's possible that
@@ -231,8 +258,9 @@ static void slpc_boost_work(struct work_struct *work)
*/
mutex_lock(&slpc->lock);
if (atomic_read(&slpc->num_waiters)) {
- slpc_force_min_freq(slpc, slpc->boost_freq);
- slpc->num_boosts++;
+ err = slpc_force_min_freq(slpc, slpc->boost_freq);
+ if (!err)
+ slpc->num_boosts++;
}
mutex_unlock(&slpc->lock);
}
@@ -260,6 +288,7 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
slpc->boost_freq = 0;
atomic_set(&slpc->num_waiters, 0);
slpc->num_boosts = 0;
+ slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
mutex_init(&slpc->lock);
INIT_WORK(&slpc->boost_work, slpc_boost_work);
@@ -506,6 +535,22 @@ int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
return ret;
}
+int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ if (!HAS_MEDIA_RATIO_MODE(i915))
+ return -ENODEV;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_MEDIA_FF_RATIO_MODE,
+ val);
+ return ret;
+}
+
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
{
u32 pm_intrmsk_mbz = 0;
@@ -654,6 +699,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
return ret;
}
+ /* Set cached media freq ratio mode */
+ intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 0caa8fee3c04..82a98f78f96c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -38,6 +38,7 @@ int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val);
int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p);
+int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val);
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
index bf5b9a563c09..73d208123528 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -29,6 +29,9 @@ struct intel_guc_slpc {
u32 min_freq_softlimit;
u32 max_freq_softlimit;
+ /* cached media ratio mode */
+ u32 media_ratio_mode;
+
/* Protects set/reset of boost freq
* and value of num_waiters
*/
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 75291e9846c5..40f726c61e95 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1314,6 +1314,8 @@ static void __update_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
+ guc->timestamp.last_stat_jiffies = jiffies;
+
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
@@ -1386,6 +1388,17 @@ void intel_guc_busyness_park(struct intel_gt *gt)
return;
cancel_delayed_work(&guc->timestamp.work);
+
+ /*
+ * Before parking, we should sample engine busyness stats if we need to.
+ * We can skip it if we are less than half a ping from the last time we
+ * sampled the busyness stats.
+ */
+ if (guc->timestamp.last_stat_jiffies &&
+ !time_after(jiffies, guc->timestamp.last_stat_jiffies +
+ (guc->timestamp.ping_delay / 2)))
+ return;
+
__update_guc_busyness_stats(guc);
}
@@ -1527,87 +1540,18 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
lrc_update_regs(ce, engine, head);
}
-static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
-{
- static const i915_reg_t _reg[I915_NUM_ENGINES] = {
- [RCS0] = MSG_IDLE_CS,
- [BCS0] = MSG_IDLE_BCS,
- [VCS0] = MSG_IDLE_VCS0,
- [VCS1] = MSG_IDLE_VCS1,
- [VCS2] = MSG_IDLE_VCS2,
- [VCS3] = MSG_IDLE_VCS3,
- [VCS4] = MSG_IDLE_VCS4,
- [VCS5] = MSG_IDLE_VCS5,
- [VCS6] = MSG_IDLE_VCS6,
- [VCS7] = MSG_IDLE_VCS7,
- [VECS0] = MSG_IDLE_VECS0,
- [VECS1] = MSG_IDLE_VECS1,
- [VECS2] = MSG_IDLE_VECS2,
- [VECS3] = MSG_IDLE_VECS3,
- [CCS0] = MSG_IDLE_CS,
- [CCS1] = MSG_IDLE_CS,
- [CCS2] = MSG_IDLE_CS,
- [CCS3] = MSG_IDLE_CS,
- };
- u32 val;
-
- if (!_reg[engine->id].reg)
- return 0;
-
- val = intel_uncore_read(engine->uncore, _reg[engine->id]);
-
- /* bits[29:25] & bits[13:9] >> shift */
- return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
-}
-
-static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
-{
- int ret;
-
- /* Ensure GPM receives fw up/down after CS is stopped */
- udelay(1);
-
- /* Wait for forcewake request to complete in GPM */
- ret = __intel_wait_for_register_fw(gt->uncore,
- GEN9_PWRGT_DOMAIN_STATUS,
- fw_mask, fw_mask, 5000, 0, NULL);
-
- /* Ensure CS receives fw ack from GPM */
- udelay(1);
-
- if (ret)
- GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
-}
-
-/*
- * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
- * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
- * pending status is indicated by bits[13:9] (masked by bits[ 29:25]) in the
- * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
- * are concerned only with the gt reset here, we use a logical OR of pending
- * forcewakeups from all reset domains and then wait for them to complete by
- * querying PWRGT_DOMAIN_STATUS.
- */
static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
{
- u32 fw_pending;
-
- if (GRAPHICS_VER(engine->i915) != 12)
+ if (!IS_GRAPHICS_VER(engine->i915, 11, 12))
return;
- /*
- * Wa_22011802037
- * TODO: Occasionally trying to stop the cs times out, but does not
- * adversely affect functionality. The timeout is set as a config
- * parameter that defaults to 100ms. Assuming that this timeout is
- * sufficient for any pending MI_FORCEWAKEs to complete, ignore the
- * timeout returned here until it is root caused.
- */
intel_engine_stop_cs(engine);
- fw_pending = __cs_pending_mi_force_wakes(engine);
- if (fw_pending)
- __gpm_wait_for_fw_complete(engine->gt, fw_pending);
+ /*
+ * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
+ * to wait for any pending mi force wakeups
+ */
+ intel_engine_wait_for_pending_mi_fw(engine);
}
static void guc_reset_nop(struct intel_engine_cs *engine)
@@ -1654,9 +1598,9 @@ __unwind_incomplete_requests(struct intel_context *ce)
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
-static void __guc_reset_context(struct intel_context *ce, bool stalled)
+static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
{
- bool local_stalled;
+ bool guilty;
struct i915_request *rq;
unsigned long flags;
u32 head;
@@ -1684,7 +1628,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
if (!intel_context_is_pinned(ce))
goto next_context;
- local_stalled = false;
+ guilty = false;
rq = intel_context_find_active_request(ce);
if (!rq) {
head = ce->ring->tail;
@@ -1692,14 +1636,14 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
}
if (i915_request_started(rq))
- local_stalled = true;
+ guilty = stalled & ce->engine->mask;
GEM_BUG_ON(i915_active_is_idle(&ce->active));
head = intel_ring_wrap(ce->ring, rq->head);
- __i915_request_reset(rq, local_stalled && stalled);
+ __i915_request_reset(rq, guilty);
out_replay:
- guc_reset_state(ce, head, local_stalled && stalled);
+ guc_reset_state(ce, head, guilty);
next_context:
if (i != number_children)
ce = list_next_entry(ce, parallel.child_link);
@@ -1709,7 +1653,7 @@ next_context:
intel_context_put(parent);
}
-void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
+void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
{
struct intel_context *ce;
unsigned long index;
@@ -2394,6 +2338,26 @@ static int guc_context_policy_init(struct intel_context *ce, bool loop)
return ret;
}
+static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
+{
+ /*
+ * this matches the mapping we do in map_i915_prio_to_guc_prio()
+ * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
+ */
+ switch (prio) {
+ default:
+ MISSING_CASE(prio);
+ fallthrough;
+ case GUC_CLIENT_PRIORITY_KMD_NORMAL:
+ return GEN12_CTX_PRIORITY_NORMAL;
+ case GUC_CLIENT_PRIORITY_NORMAL:
+ return GEN12_CTX_PRIORITY_LOW;
+ case GUC_CLIENT_PRIORITY_HIGH:
+ case GUC_CLIENT_PRIORITY_KMD_HIGH:
+ return GEN12_CTX_PRIORITY_HIGH;
+ }
+}
+
static void prepare_context_registration_info(struct intel_context *ce,
struct guc_ctxt_registration_info *info)
{
@@ -2420,6 +2384,8 @@ static void prepare_context_registration_info(struct intel_context *ce,
*/
info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
+ if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
+ info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
/*
@@ -2768,7 +2734,9 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
__guc_context_set_context_policies(guc, &policy, true);
}
-static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
+static void
+guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
+ unsigned int preempt_timeout_ms)
{
struct intel_guc *guc = ce_to_guc(ce);
struct intel_runtime_pm *runtime_pm =
@@ -2807,7 +2775,8 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
* gets kicked off the HW ASAP.
*/
with_intel_runtime_pm(runtime_pm, wakeref) {
- __guc_context_set_preemption_timeout(guc, guc_id, 1);
+ __guc_context_set_preemption_timeout(guc, guc_id,
+ preempt_timeout_ms);
__guc_context_sched_disable(guc, ce, guc_id);
}
} else {
@@ -2815,7 +2784,7 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_set_preemption_timeout(guc,
ce->guc_id.id,
- 1);
+ preempt_timeout_ms);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
}
@@ -3168,7 +3137,7 @@ static const struct intel_context_ops guc_context_ops = {
.unpin = guc_context_unpin,
.post_unpin = guc_context_post_unpin,
- .ban = guc_context_ban,
+ .revoke = guc_context_revoke,
.cancel_request = guc_context_cancel_request,
@@ -3417,7 +3386,7 @@ static const struct intel_context_ops virtual_guc_context_ops = {
.unpin = guc_virtual_context_unpin,
.post_unpin = guc_context_post_unpin,
- .ban = guc_context_ban,
+ .revoke = guc_context_revoke,
.cancel_request = guc_context_cancel_request,
@@ -3506,7 +3475,7 @@ static const struct intel_context_ops virtual_parent_context_ops = {
.unpin = guc_parent_context_unpin,
.post_unpin = guc_context_post_unpin,
- .ban = guc_context_ban,
+ .revoke = guc_context_revoke,
.cancel_request = guc_context_cancel_request,
@@ -4206,7 +4175,7 @@ static void guc_context_replay(struct intel_context *ce)
{
struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
- __guc_reset_context(ce, true);
+ __guc_reset_context(ce, ce->engine->mask);
tasklet_hi_schedule(&sched_engine->tasklet);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 556829de9c17..3bb8838e325a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "intel_guc_reg.h"
#include "intel_huc.h"
#include "i915_drv.h"
@@ -17,11 +18,15 @@
* capabilities by adding HuC specific commands to batch buffers.
*
* The kernel driver is only responsible for loading the HuC firmware and
- * triggering its security authentication, which is performed by the GuC. For
- * The GuC to correctly perform the authentication, the HuC binary must be
- * loaded before the GuC one. Loading the HuC is optional; however, not using
- * the HuC might negatively impact power usage and/or performance of media
- * workloads, depending on the use-cases.
+ * triggering its security authentication, which is performed by the GuC on
+ * older platforms and by the GSC on newer ones. For the GuC to correctly
+ * perform the authentication, the HuC binary must be loaded before the GuC one.
+ * Loading the HuC is optional; however, not using the HuC might negatively
+ * impact power usage and/or performance of media workloads, depending on the
+ * use-cases.
+ * HuC must be reloaded on events that cause the WOPCM to lose its contents
+ * (S3/S4, FLR); GuC-authenticated HuC must also be reloaded on GuC/GT reset,
+ * while GSC-managed HuC will survive that.
*
* See https://github.com/intel/media-driver for the latest details on HuC
* functionality.
@@ -54,11 +59,51 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
+#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
+static int check_huc_loading_mode(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ bool fw_needs_gsc = intel_huc_is_loaded_by_gsc(huc);
+ bool hw_uses_gsc = false;
+
+ /*
+ * The fuse for HuC load via GSC is only valid on platforms that have
+ * GuC deprivilege.
+ */
+ if (HAS_GUC_DEPRIVILEGE(gt->i915))
+ hw_uses_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) &
+ GSC_LOADS_HUC;
+
+ if (fw_needs_gsc != hw_uses_gsc) {
+ drm_err(&gt->i915->drm,
+ "mismatch between HuC FW (%s) and HW (%s) load modes\n",
+ HUC_LOAD_MODE_STRING(fw_needs_gsc),
+ HUC_LOAD_MODE_STRING(hw_uses_gsc));
+ return -ENOEXEC;
+ }
+
+ /* make sure we can access the GSC via the mei driver if we need it */
+ if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) &&
+ fw_needs_gsc) {
+ drm_info(&gt->i915->drm,
+ "Can't load HuC due to missing MEI modules\n");
+ return -EIO;
+ }
+
+ drm_dbg(&gt->i915->drm, "GSC loads huc=%s\n", str_yes_no(fw_needs_gsc));
+
+ return 0;
+}
+
int intel_huc_init(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
int err;
+ err = check_huc_loading_mode(huc);
+ if (err)
+ goto out;
+
err = intel_uc_fw_init(&huc->fw);
if (err)
goto out;
@@ -68,7 +113,7 @@ int intel_huc_init(struct intel_huc *huc)
return 0;
out:
- i915_probe_error(i915, "failed with %d\n", err);
+ drm_info(&i915->drm, "HuC init failed with %d\n", err);
return err;
}
@@ -96,17 +141,20 @@ int intel_huc_auth(struct intel_huc *huc)
struct intel_guc *guc = &gt->uc.guc;
int ret;
- GEM_BUG_ON(intel_huc_is_authenticated(huc));
-
if (!intel_uc_fw_is_loaded(&huc->fw))
return -ENOEXEC;
+ /* GSC will do the auth */
+ if (intel_huc_is_loaded_by_gsc(huc))
+ return -ENODEV;
+
ret = i915_inject_probe_error(gt->i915, -ENXIO);
if (ret)
goto fail;
- ret = intel_guc_auth_huc(guc,
- intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
+ GEM_BUG_ON(intel_uc_fw_is_running(&huc->fw));
+
+ ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
goto fail;
@@ -133,6 +181,18 @@ fail:
return ret;
}
+static bool huc_is_authenticated(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ intel_wakeref_t wakeref;
+ u32 status = 0;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ status = intel_uncore_read(gt->uncore, huc->status.reg);
+
+ return (status & huc->status.mask) == huc->status.value;
+}
+
/**
* intel_huc_check_status() - check HuC status
* @huc: intel_huc structure
@@ -150,10 +210,6 @@ fail:
*/
int intel_huc_check_status(struct intel_huc *huc)
{
- struct intel_gt *gt = huc_to_gt(huc);
- intel_wakeref_t wakeref;
- u32 status = 0;
-
switch (__intel_uc_fw_status(&huc->fw)) {
case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
return -ENODEV;
@@ -167,10 +223,17 @@ int intel_huc_check_status(struct intel_huc *huc)
break;
}
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- status = intel_uncore_read(gt->uncore, huc->status.reg);
+ return huc_is_authenticated(huc);
+}
- return (status & huc->status.mask) == huc->status.value;
+void intel_huc_update_auth_status(struct intel_huc *huc)
+{
+ if (!intel_uc_fw_is_loadable(&huc->fw))
+ return;
+
+ if (huc_is_authenticated(huc))
+ intel_uc_fw_change_status(&huc->fw,
+ INTEL_UC_FIRMWARE_RUNNING);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 73ec670800f2..d7e25b6e879e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -27,6 +27,7 @@ int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
int intel_huc_check_status(struct intel_huc *huc);
+void intel_huc_update_auth_status(struct intel_huc *huc);
static inline int intel_huc_sanitize(struct intel_huc *huc)
{
@@ -50,9 +51,9 @@ static inline bool intel_huc_is_used(struct intel_huc *huc)
return intel_uc_fw_is_available(&huc->fw);
}
-static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
+static inline bool intel_huc_is_loaded_by_gsc(const struct intel_huc *huc)
{
- return intel_uc_fw_is_running(&huc->fw);
+ return huc->fw.loaded_via_gsc;
}
void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index e5ef509c70e8..9d6ab1e01639 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -8,7 +8,7 @@
#include "i915_drv.h"
/**
- * intel_huc_fw_upload() - load HuC uCode to device
+ * intel_huc_fw_upload() - load HuC uCode to device via DMA transfer
* @huc: intel_huc structure
*
* Called from intel_uc_init_hw() during driver load, resume from sleep and
@@ -21,6 +21,9 @@
*/
int intel_huc_fw_upload(struct intel_huc *huc)
{
+ if (intel_huc_is_loaded_by_gsc(huc))
+ return -ENODEV;
+
/* HW doesn't look at destination address for HuC, so set it to 0 */
return intel_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 8c9ef690ac9d..f2e7c82985ef 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -45,6 +45,10 @@ static void uc_expand_default_options(struct intel_uc *uc)
/* Default: enable HuC authentication and GuC submission */
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
+
+ /* XEHPSDV and PVC do not use HuC */
+ if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
+ i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
}
/* Reset GuC providing us with fresh state for both GuC and HuC.
@@ -323,17 +327,10 @@ static int __uc_init(struct intel_uc *uc)
if (ret)
return ret;
- if (intel_uc_uses_huc(uc)) {
- ret = intel_huc_init(huc);
- if (ret)
- goto out_guc;
- }
+ if (intel_uc_uses_huc(uc))
+ intel_huc_init(huc);
return 0;
-
-out_guc:
- intel_guc_fini(guc);
- return ret;
}
static void __uc_fini(struct intel_uc *uc)
@@ -509,7 +506,16 @@ static int __uc_init_hw(struct intel_uc *uc)
if (ret)
goto err_log_capture;
- intel_huc_auth(huc);
+ /*
+ * GSC-loaded HuC is authenticated by the GSC, so we don't need to
+ * trigger the auth here. However, given that the HuC loaded this way
+ * survive GT reset, we still need to update our SW bookkeeping to make
+ * sure it reflects the correct HW status.
+ */
+ if (intel_huc_is_loaded_by_gsc(huc))
+ intel_huc_update_auth_status(huc);
+ else
+ intel_huc_auth(huc);
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_enable(guc);
@@ -595,7 +601,7 @@ sanitize:
__uc_sanitize(uc);
}
-void intel_uc_reset(struct intel_uc *uc, bool stalled)
+void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
{
struct intel_guc *guc = &uc->guc;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 866b462821c0..a8f38c2c60e2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -42,7 +42,7 @@ void intel_uc_driver_late_release(struct intel_uc *uc);
void intel_uc_driver_remove(struct intel_uc *uc);
void intel_uc_init_mmio(struct intel_uc *uc);
void intel_uc_reset_prepare(struct intel_uc *uc);
-void intel_uc_reset(struct intel_uc *uc, bool stalled);
+void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled);
void intel_uc_reset_finish(struct intel_uc *uc);
void intel_uc_cancel_requests(struct intel_uc *uc);
void intel_uc_suspend(struct intel_uc *uc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index d078f884b5e3..c06e83872c34 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -156,7 +156,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
};
- static const struct uc_fw_platform_requirement *fw_blobs;
+ const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
u32 fw_count;
u8 rev = INTEL_REVID(i915);
@@ -301,45 +301,31 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
}
}
-/**
- * intel_uc_fw_fetch - fetch uC firmware
- * @uc_fw: uC firmware
- *
- * Fetch uC firmware into GEM obj.
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
+static int check_gsc_manifest(const struct firmware *fw,
+ struct intel_uc_fw *uc_fw)
{
- struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
- struct device *dev = i915->drm.dev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
+ u32 *dw = (u32 *)fw->data;
+ u32 version = dw[HUC_GSC_VERSION_DW];
- GEM_BUG_ON(!i915->wopcm.size);
- GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
-
- err = i915_inject_probe_error(i915, -ENXIO);
- if (err)
- goto fail;
+ uc_fw->major_ver_found = FIELD_GET(HUC_GSC_MAJOR_VER_MASK, version);
+ uc_fw->minor_ver_found = FIELD_GET(HUC_GSC_MINOR_VER_MASK, version);
- __force_fw_fetch_failures(uc_fw, -EINVAL);
- __force_fw_fetch_failures(uc_fw, -ESTALE);
+ return 0;
+}
- err = request_firmware(&fw, uc_fw->path, dev);
- if (err)
- goto fail;
+static int check_ccs_header(struct drm_i915_private *i915,
+ const struct firmware *fw,
+ struct intel_uc_fw *uc_fw)
+{
+ struct uc_css_header *css;
+ size_t size;
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
- err = -ENODATA;
- goto fail;
+ return -ENODATA;
}
css = (struct uc_css_header *)fw->data;
@@ -352,8 +338,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
"%s firmware %s: unexpected header size: %zu != %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
- err = -EPROTO;
- goto fail;
+ return -EPROTO;
}
/* uCode size must calculated from other sizes */
@@ -368,8 +353,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, size);
- err = -ENOEXEC;
- goto fail;
+ return -ENOEXEC;
}
/* Sanity check whether this fw is not larger than whole WOPCM memory */
@@ -378,8 +362,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
size, (size_t)i915->wopcm.size);
- err = -E2BIG;
- goto fail;
+ return -E2BIG;
}
/* Get version numbers from the CSS header */
@@ -388,6 +371,49 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
css->sw_version);
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+ uc_fw->private_data_size = css->private_data_size;
+
+ return 0;
+}
+
+/**
+ * intel_uc_fw_fetch - fetch uC firmware
+ * @uc_fw: uC firmware
+ *
+ * Fetch uC firmware into GEM obj.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
+ struct device *dev = i915->drm.dev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ int err;
+
+ GEM_BUG_ON(!i915->wopcm.size);
+ GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
+
+ err = i915_inject_probe_error(i915, -ENXIO);
+ if (err)
+ goto fail;
+
+ __force_fw_fetch_failures(uc_fw, -EINVAL);
+ __force_fw_fetch_failures(uc_fw, -ESTALE);
+
+ err = request_firmware(&fw, uc_fw->path, dev);
+ if (err)
+ goto fail;
+
+ if (uc_fw->loaded_via_gsc)
+ err = check_gsc_manifest(fw, uc_fw);
+ else
+ err = check_ccs_header(i915, fw, uc_fw);
+ if (err)
+ goto fail;
+
if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
@@ -400,9 +426,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
}
}
- if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
- uc_fw->private_data_size = css->private_data_size;
-
if (HAS_LMEM(i915)) {
obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
if (!IS_ERR(obj))
@@ -470,7 +493,10 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
if (i915_gem_object_is_lmem(obj))
pte_flags |= PTE_LM;
- ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
+ if (ggtt->vm.raw_insert_entries)
+ ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
+ else
+ ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
}
static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 3229018877d3..4f169035f504 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -102,6 +102,8 @@ struct intel_uc_fw {
u32 ucode_size;
u32 private_data_size;
+
+ bool loaded_via_gsc;
};
#ifdef CONFIG_DRM_I915_DEBUG_GUC
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
index e41ffc7a7fbc..b05e0e35b734 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -39,6 +39,11 @@
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
* in fw. So driver will load a truncated firmware in this case.
+ *
+ * Starting from DG2, the HuC is loaded by the GSC instead of i915. The GSC
+ * firmware performs all the required integrity checks, we just need to check
+ * the version. Note that the header for GSC-managed blobs is different from the
+ * CSS used for dma-loaded firmwares.
*/
struct uc_css_header {
@@ -78,4 +83,8 @@ struct uc_css_header {
} __packed;
static_assert(sizeof(struct uc_css_header) == 128);
+#define HUC_GSC_VERSION_DW 44
+#define HUC_GSC_MAJOR_VER_MASK (0xFF << 0)
+#define HUC_GSC_MINOR_VER_MASK (0xFF << 16)
+
#endif /* _INTEL_UC_FW_ABI_H */
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b9eb75a2b400..0ba2a3455d99 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -428,7 +428,7 @@ struct cmd_info {
#define R_VECS BIT(VECS0)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
- u16 rings;
+ intel_engine_mask_t rings;
/* devices that support this cmd: SNB/IVB/HSW/... */
u16 devices;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 9c5cc2800975..b4f69364f9a1 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -51,7 +51,7 @@ static int preallocated_oos_pages = 8192;
static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
{
- struct kvm *kvm = vgpu->kvm;
+ struct kvm *kvm = vgpu->vfio_device.kvm;
int idx;
bool ret;
@@ -1185,7 +1185,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
if (!vgpu->attached)
return -EINVAL;
- pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry));
+ pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
return -EINVAL;
return PageTransHuge(pfn_to_page(pfn));
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 03ecffc2ba56..aee1a45da74b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -227,11 +227,7 @@ struct intel_vgpu {
struct mutex cache_lock;
struct notifier_block iommu_notifier;
- struct notifier_block group_notifier;
- struct kvm *kvm;
- struct work_struct release_work;
atomic_t released;
- struct vfio_group *vfio_group;
struct kvm_page_track_notifier_node track_node;
#define NR_BKT (1 << 18)
@@ -732,7 +728,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
{
if (!vgpu->attached)
return -ESRCH;
- return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
+ return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
}
/**
@@ -750,7 +746,7 @@ static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
{
if (!vgpu->attached)
return -ESRCH;
- return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
+ return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
}
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 0787ba5c301f..e2f6c56ab342 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -228,8 +228,6 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
}
}
-static void intel_vgpu_release_work(struct work_struct *work);
-
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
@@ -243,7 +241,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
- ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1);
+ ret = vfio_unpin_pages(&vgpu->vfio_device, &cur_gfn, 1);
drm_WARN_ON(&i915->drm, ret != 1);
}
}
@@ -266,8 +264,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
- ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ ret = vfio_pin_pages(&vgpu->vfio_device, &cur_gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
cur_gfn, ret);
@@ -761,23 +759,6 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int intel_vgpu_group_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct intel_vgpu *vgpu =
- container_of(nb, struct intel_vgpu, group_notifier);
-
- /* the only action we care about */
- if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
- vgpu->kvm = data;
-
- if (!data)
- schedule_work(&vgpu->release_work);
- }
-
- return NOTIFY_OK;
-}
-
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
{
struct intel_vgpu *itr;
@@ -789,7 +770,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
if (!itr->attached)
continue;
- if (vgpu->kvm == itr->kvm) {
+ if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
ret = true;
goto out;
}
@@ -804,61 +785,44 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned long events;
int ret;
- struct vfio_group *vfio_group;
vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
- vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events,
- &vgpu->iommu_notifier);
+ ret = vfio_register_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, &events,
+ &vgpu->iommu_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
goto out;
}
- events = VFIO_GROUP_NOTIFY_SET_KVM;
- ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events,
- &vgpu->group_notifier);
- if (ret != 0) {
- gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
- ret);
- goto undo_iommu;
- }
-
- vfio_group =
- vfio_group_get_external_user_from_dev(vgpu->vfio_device.dev);
- if (IS_ERR_OR_NULL(vfio_group)) {
- ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
- gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
- goto undo_register;
- }
- vgpu->vfio_group = vfio_group;
-
ret = -EEXIST;
if (vgpu->attached)
- goto undo_group;
+ goto undo_iommu;
ret = -ESRCH;
- if (!vgpu->kvm || vgpu->kvm->mm != current->mm) {
+ if (!vgpu->vfio_device.kvm ||
+ vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- goto undo_group;
+ goto undo_iommu;
}
+ kvm_get_kvm(vgpu->vfio_device.kvm);
+
ret = -EEXIST;
if (__kvmgt_vgpu_exist(vgpu))
- goto undo_group;
+ goto undo_iommu;
vgpu->attached = true;
- kvm_get_kvm(vgpu->kvm);
kvmgt_protect_table_init(vgpu);
gvt_cache_init(vgpu);
vgpu->track_node.track_write = kvmgt_page_track_write;
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
- kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node);
+ kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
+ &vgpu->track_node);
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
&vgpu->nr_cache_entries);
@@ -868,17 +832,9 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
atomic_set(&vgpu->released, 0);
return 0;
-undo_group:
- vfio_group_put_external_user(vgpu->vfio_group);
- vgpu->vfio_group = NULL;
-
-undo_register:
- vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY,
- &vgpu->group_notifier);
-
undo_iommu:
- vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
+ vfio_unregister_notifier(vfio_dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->iommu_notifier);
out:
return ret;
}
@@ -894,8 +850,9 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
}
}
-static void __intel_vgpu_release(struct intel_vgpu *vgpu)
+static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret;
@@ -907,41 +864,24 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
intel_gvt_release_vgpu(vgpu);
- ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
+ ret = vfio_unregister_notifier(&vgpu->vfio_device, VFIO_IOMMU_NOTIFY,
+ &vgpu->iommu_notifier);
drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for iommu failed: %d\n", ret);
- ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_GROUP_NOTIFY,
- &vgpu->group_notifier);
- drm_WARN(&i915->drm, ret,
- "vfio_unregister_notifier for group failed: %d\n", ret);
-
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
- kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node);
- kvm_put_kvm(vgpu->kvm);
+ kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
+ &vgpu->track_node);
kvmgt_protect_table_destroy(vgpu);
gvt_cache_destroy(vgpu);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
- vfio_group_put_external_user(vgpu->vfio_group);
- vgpu->kvm = NULL;
vgpu->attached = false;
-}
-
-static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
-{
- __intel_vgpu_release(vfio_dev_to_vgpu(vfio_dev));
-}
-
-static void intel_vgpu_release_work(struct work_struct *work)
-{
- struct intel_vgpu *vgpu =
- container_of(work, struct intel_vgpu, release_work);
- __intel_vgpu_release(vgpu);
+ if (vgpu->vfio_device.kvm)
+ kvm_put_kvm(vgpu->vfio_device.kvm);
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1690,7 +1630,6 @@ static int intel_vgpu_probe(struct mdev_device *mdev)
return PTR_ERR(vgpu);
}
- INIT_WORK(&vgpu->release_work, intel_vgpu_release_work);
vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev,
&intel_vgpu_dev_ops);
@@ -1728,7 +1667,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = {
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
{
- struct kvm *kvm = info->kvm;
+ struct kvm *kvm = info->vfio_device.kvm;
struct kvm_memory_slot *slot;
int idx;
@@ -1758,7 +1697,7 @@ out:
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
{
- struct kvm *kvm = info->kvm;
+ struct kvm *kvm = info->vfio_device.kvm;
struct kvm_memory_slot *slot;
int idx;
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 90b0ce5051af..deb8a8b76965 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -100,6 +100,9 @@
#include "intel_region_ttm.h"
#include "vlv_suspend.h"
+/* Intel Rapid Start Technology ACPI device name */
+static const char irst_name[] = "INT3392";
+
static const struct drm_driver i915_drm_driver;
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
@@ -520,6 +523,22 @@ mask_err:
return ret;
}
+static int i915_pcode_init(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ int id, ret;
+
+ for_each_gt(gt, i915, id) {
+ ret = intel_pcode_init(gt->uncore);
+ if (ret) {
+ drm_err(&gt->i915->drm, "gt%d: intel_pcode_init failed %d\n", id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
* i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
@@ -530,6 +549,7 @@ mask_err:
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *root_pdev;
int ret;
if (i915_inject_probe_failure(dev_priv))
@@ -629,7 +649,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_opregion_setup(dev_priv);
- ret = intel_pcode_init(dev_priv);
+ ret = i915_pcode_init(dev_priv);
if (ret)
goto err_msi;
@@ -641,6 +661,15 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_bw_init_hw(dev_priv);
+ /*
+ * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+ * This should be totally removed when we handle the pci states properly
+ * on runtime PM and on s2idle cases.
+ */
+ root_pdev = pcie_find_root_port(pdev);
+ if (root_pdev)
+ pci_d3cold_disable(root_pdev);
+
return 0;
err_msi:
@@ -664,11 +693,16 @@ err_perf:
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *root_pdev;
i915_perf_fini(dev_priv);
if (pdev->msi_enabled)
pci_disable_msi(pdev);
+
+ root_pdev = pcie_find_root_port(pdev);
+ if (root_pdev)
+ pci_d3cold_enable(root_pdev);
}
/**
@@ -813,8 +847,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct intel_device_info *match_info =
- (struct intel_device_info *)ent->driver_data;
struct drm_i915_private *i915;
int ret;
@@ -823,7 +855,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
- if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5)
+ if (!i915->params.nuclear_pageflip && DISPLAY_VER(i915) < 5)
i915->drm.driver_features &= ~DRIVER_ATOMIC;
ret = pci_enable_device(pdev);
@@ -1051,8 +1083,6 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable(&i915->runtime_pm);
intel_power_domains_disable(i915);
- i915_gem_suspend(i915);
-
if (HAS_DISPLAY(i915)) {
drm_kms_helper_poll_disable(&i915->drm);
@@ -1069,6 +1099,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_dmc_ucode_suspend(i915);
+ i915_gem_suspend(i915);
+
/*
* The only requirement is to reboot with display DC states disabled,
* for now leaving all display power wells in the INIT power domain
@@ -1152,6 +1184,8 @@ static int i915_drm_suspend(struct drm_device *dev)
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ i915_gem_drain_freed_objects(dev_priv);
+
return 0;
}
@@ -1193,14 +1227,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
goto out;
}
- /*
- * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
- * This should be totally removed when we handle the pci states properly
- * on runtime PM and on s2idle cases.
- */
- if (suspend_to_idle(dev_priv))
- pci_d3cold_disable(pdev);
-
pci_disable_device(pdev);
/*
* During hibernation on some platforms the BIOS may try to access
@@ -1251,7 +1277,7 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- ret = intel_pcode_init(dev_priv);
+ ret = i915_pcode_init(dev_priv);
if (ret)
return ret;
@@ -1365,8 +1391,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
pci_set_master(pdev);
- pci_d3cold_enable(pdev);
-
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
ret = vlv_resume_prepare(dev_priv, false);
@@ -1425,6 +1449,8 @@ static int i915_pm_suspend(struct device *kdev)
return -ENODEV;
}
+ i915_ggtt_mark_pte_lost(i915, false);
+
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1477,6 +1503,14 @@ static int i915_pm_resume(struct device *kdev)
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ /*
+ * If IRST is enabled, or if we can't detect whether it's enabled,
+ * then we must assume we lost the GGTT page table entries, since
+ * they are not retained if IRST decided to enter S4.
+ */
+ if (!IS_ENABLED(CONFIG_ACPI) || acpi_dev_present(irst_name, NULL, -1))
+ i915_ggtt_mark_pte_lost(i915, true);
+
return i915_drm_resume(&i915->drm);
}
@@ -1536,6 +1570,9 @@ static int i915_pm_restore_early(struct device *kdev)
static int i915_pm_restore(struct device *kdev)
{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ i915_ggtt_mark_pte_lost(i915, true);
return i915_pm_resume(kdev);
}
@@ -1543,13 +1580,12 @@ static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
+ drm_dbg(&dev_priv->drm, "Suspending device\n");
disable_rpm_wakeref_asserts(rpm);
@@ -1589,12 +1625,6 @@ static int intel_runtime_suspend(struct device *kdev)
drm_err(&dev_priv->drm,
"Unclaimed access detected prior to suspending\n");
- /*
- * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
- * This should be totally removed when we handle the pci states properly
- * on runtime PM and on s2idle cases.
- */
- pci_d3cold_disable(pdev);
rpm->suspended = true;
/*
@@ -1625,7 +1655,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_enable(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
+ drm_dbg(&dev_priv->drm, "Device suspended\n");
return 0;
}
@@ -1633,20 +1663,18 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
+ drm_dbg(&dev_priv->drm, "Resuming device\n");
drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
disable_rpm_wakeref_asserts(rpm);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
rpm->suspended = false;
- pci_d3cold_enable(pdev);
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
drm_dbg(&dev_priv->drm,
"Unclaimed access during suspend, bios?\n");
@@ -1683,7 +1711,7 @@ static int intel_runtime_resume(struct device *kdev)
drm_err(&dev_priv->drm,
"Runtime resume failed, disabling it (%d)\n", ret);
else
- drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
+ drm_dbg(&dev_priv->drm, "Device resumed\n");
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index 18d38cb59923..b09d1d386574 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -116,8 +116,9 @@ show_client_class(struct seq_file *m,
total += busy_add(ctx, class);
rcu_read_unlock();
- seq_printf(m, "drm-engine-%s:\t%llu ns\n",
- uabi_class_names[class], total);
+ if (capacity)
+ seq_printf(m, "drm-engine-%s:\t%llu ns\n",
+ uabi_class_names[class], total);
if (capacity > 1)
seq_printf(m, "drm-engine-capacity-%s:\t%u\n",
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index f796c5e8e060..69496af996d9 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -11,7 +11,7 @@
#include <linux/spinlock.h>
#include <linux/xarray.h>
-#include "gt/intel_engine_types.h"
+#include <uapi/drm/i915_drm.h>
#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 00d7eeae33bd..c22f29c3faa0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -37,7 +37,6 @@
#include <drm/drm_connector.h>
#include <drm/ttm/ttm_device.h>
-#include "display/intel_bios.h"
#include "display/intel_cdclk.h"
#include "display/intel_display.h"
#include "display/intel_display_power.h"
@@ -194,12 +193,6 @@ struct drm_i915_display_funcs {
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-enum drrs_type {
- DRRS_TYPE_NONE,
- DRRS_TYPE_STATIC,
- DRRS_TYPE_SEAMLESS,
-};
-
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
@@ -308,76 +301,19 @@ struct intel_vbt_data {
/* bdb version */
u16 version;
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
/* Feature bits */
unsigned int int_tv_support:1;
- unsigned int lvds_dither:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int int_lvds_support:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
- unsigned int panel_type:4;
int lvds_ssc_freq;
- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
enum drm_panel_orientation orientation;
bool override_afc_startup;
u8 override_afc_startup_val;
- u8 seamless_drrs_min_refresh_rate;
- enum drrs_type drrs_type;
-
- struct {
- int rate;
- int lanes;
- int preemphasis;
- int vswing;
- int bpp;
- struct edp_power_seq pps;
- u8 drrs_msa_timing_delay;
- bool low_vswing;
- bool initialized;
- bool hobl;
- } edp;
-
- struct {
- bool enable;
- bool full_link;
- bool require_aux_wakeup;
- int idle_frames;
- int tp1_wakeup_time_us;
- int tp2_tp3_wakeup_time_us;
- int psr2_tp2_tp3_wakeup_time_us;
- } psr;
-
- struct {
- u16 pwm_freq_hz;
- u16 brightness_precision_bits;
- bool present;
- bool active_low_pwm;
- u8 min_brightness; /* min_brightness/255 of max */
- u8 controller; /* brightness controller number */
- enum intel_backlight_type type;
- } backlight;
-
- /* MIPI DSI */
- struct {
- u16 panel_id;
- struct mipi_config *config;
- struct mipi_pps_data *pps;
- u16 bl_ports;
- u16 cabc_ports;
- u8 seq_version;
- u32 size;
- u8 *data;
- const u8 *sequence[MIPI_SEQ_MAX];
- u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
- enum drm_panel_orientation orientation;
- } dsi;
-
int crt_ddc_pin;
struct list_head display_devices;
@@ -943,6 +879,7 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
+#define INTEL_BASEDIE_STEP(__i915) (RUNTIME_INFO(__i915)->step.basedie_step)
#define IS_DISPLAY_STEP(__i915, since, until) \
(drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
@@ -956,6 +893,10 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
(drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
+#define IS_BASEDIE_STEP(__i915, since, until) \
+ (drm_WARN_ON(&(__i915)->drm, INTEL_BASEDIE_STEP(__i915) == STEP_NONE), \
+ INTEL_BASEDIE_STEP(__i915) >= (since) && INTEL_BASEDIE_STEP(__i915) < (until))
+
static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info *info,
enum intel_platform p)
@@ -1208,6 +1149,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_DG2(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
+#define IS_PVC_BD_STEP(__i915, since, until) \
+ (IS_PONTEVECCHIO(__i915) && \
+ IS_BASEDIE_STEP(__i915, since, until))
+
+#define IS_PVC_CT_STEP(__i915, since, until) \
+ (IS_PONTEVECCHIO(__i915) && \
+ IS_GRAPHICS_STEP(__i915, since, until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
@@ -1223,6 +1172,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
})
#define RCS_MASK(gt) \
ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
+#define BCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
#define VDBOX_MASK(gt) \
ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
#define VEBOX_MASK(gt) \
@@ -1230,6 +1181,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define CCS_MASK(gt) \
ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
+#define HAS_MEDIA_RATIO_MODE(dev_priv) (INTEL_INFO(dev_priv)->has_media_ratio_mode)
+
/*
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
* All later gens can run the final buffer from the ppgtt
@@ -1329,9 +1282,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
-#define HAS_MSLICES(dev_priv) \
- (INTEL_INFO(dev_priv)->has_mslices)
-
/*
* Set this flag, when platform requires 64K GTT page sizes or larger for
* device local memory access.
@@ -1370,6 +1320,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10))
+#define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read)
+
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
@@ -1388,7 +1340,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* Only valid when HAS_DISPLAY() is true */
#define INTEL_DISPLAY_ENABLED(dev_priv) \
- (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
+ (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), \
+ !(dev_priv)->params.disable_display && \
+ !intel_opregion_headless_sku(dev_priv))
#define HAS_GUC_DEPRIVILEGE(dev_priv) \
(INTEL_INFO(dev_priv)->has_guc_deprivilege)
@@ -1401,6 +1355,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915))
+#define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline)
+
+#define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
+
/* i915_gem.c */
void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index d0752e5553db..68d8d52bd541 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -26,7 +26,6 @@
#define __I915_GEM_H__
#include <linux/bug.h>
-#include <linux/interrupt.h>
#include <drm/drm_drv.h>
@@ -54,9 +53,6 @@ struct drm_i915_private;
} while(0)
#define GEM_WARN_ON(expr) WARN_ON(expr)
-#define GEM_DEBUG_DECL(var) var
-#define GEM_DEBUG_EXEC(expr) expr
-#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
#else
@@ -66,9 +62,6 @@ struct drm_i915_private;
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
-#define GEM_DEBUG_DECL(var)
-#define GEM_DEBUG_EXEC(expr) do { } while (0)
-#define GEM_DEBUG_BUG_ON(expr)
#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
#endif
@@ -91,36 +84,4 @@ struct drm_i915_private;
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
-static inline void tasklet_lock(struct tasklet_struct *t)
-{
- while (!tasklet_trylock(t))
- cpu_relax();
-}
-
-static inline bool tasklet_is_locked(const struct tasklet_struct *t)
-{
- return test_bit(TASKLET_STATE_RUN, &t->state);
-}
-
-static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
-{
- if (!atomic_fetch_inc(&t->count))
- tasklet_unlock_spin_wait(t);
-}
-
-static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
-{
- return !atomic_read(&t->count);
-}
-
-static inline bool __tasklet_enable(struct tasklet_struct *t)
-{
- return atomic_dec_and_test(&t->count);
-}
-
-static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
-{
- return test_bit(TASKLET_STATE_SCHED, &t->state);
-}
-
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index c12a0adefda5..6fd15b39570c 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -148,14 +148,21 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = intel_engines_has_context_isolation(i915);
break;
case I915_PARAM_SLICE_MASK:
+ /* Not supported from Xe_HP onward; use topology queries */
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ return -EINVAL;
+
value = sseu->slice_mask;
if (!value)
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
+ /* Not supported from Xe_HP onward; use topology queries */
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ return -EINVAL;
+
/* Only copy bits from the first slice */
- memcpy(&value, sseu->subslice_mask,
- min(sseu->ss_stride, (u8)sizeof(value)));
+ value = intel_sseu_get_hsw_subslices(sseu, 0);
if (!value)
return -ENODEV;
break;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0512c66fa4f3..f9b1969ed7ed 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -581,6 +581,15 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
}
+ if (GRAPHICS_VER(m->i915) >= 11) {
+ err_printf(m, " NOPID: 0x%08x\n", ee->nopid);
+ err_printf(m, " EXCC: 0x%08x\n", ee->excc);
+ err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
+ err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop);
+ err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
+ err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
+ err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
+ }
if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -1095,8 +1104,12 @@ i915_vma_coredump_create(const struct intel_gt *gt,
for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
mutex_lock(&ggtt->error_mutex);
- ggtt->vm.insert_page(&ggtt->vm, dma, slot,
- I915_CACHE_NONE, 0);
+ if (ggtt->vm.raw_insert_page)
+ ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
+ else
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
mb();
s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
@@ -1224,6 +1237,16 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
ee->ipehr = ENGINE_READ(engine, IPEHR);
}
+ if (GRAPHICS_VER(i915) >= 11) {
+ ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
+ ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
+ ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
+ ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
+ ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
+ ee->nopid = ENGINE_READ(engine, RING_NOPID);
+ ee->excc = ENGINE_READ(engine, RING_EXCC);
+ }
+
intel_engine_get_instdone(engine, &ee->instdone);
ee->instpm = ENGINE_READ(engine, RING_INSTPM);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index a611abacd9c2..55a143b92d10 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -84,6 +84,13 @@ struct intel_engine_coredump {
u32 fault_reg;
u64 faddr;
u32 rc_psmi; /* sleep state */
+ u32 nopid;
+ u32 excc;
+ u32 cmd_cctl;
+ u32 cscmdop;
+ u32 ctx_sr_ctl;
+ u32 dma_faddr_hi;
+ u32 dma_faddr_lo;
struct intel_instdone instdone;
/* GuC matched capture-lists info */
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index acf688b698c3..5edc8fbf1dff 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -38,43 +38,43 @@
.display.ver = (x)
#define I845_PIPE_OFFSETS \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
}
#define I9XX_PIPE_OFFSETS \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
}
#define IVB_PIPE_OFFSETS \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
}
#define HSW_PIPE_OFFSETS \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
[TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
@@ -82,44 +82,44 @@
}
#define CHV_PIPE_OFFSETS \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
}
#define I845_CURSOR_OFFSETS \
- .cursor_offsets = { \
+ .display.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
}
#define I9XX_CURSOR_OFFSETS \
- .cursor_offsets = { \
+ .display.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = CURSOR_B_OFFSET, \
}
#define CHV_CURSOR_OFFSETS \
- .cursor_offsets = { \
+ .display.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = CURSOR_B_OFFSET, \
[PIPE_C] = CHV_CURSOR_C_OFFSET, \
}
#define IVB_CURSOR_OFFSETS \
- .cursor_offsets = { \
+ .display.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = IVB_CURSOR_B_OFFSET, \
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
}
#define TGL_CURSOR_OFFSETS \
- .cursor_offsets = { \
+ .display.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = IVB_CURSOR_B_OFFSET, \
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
@@ -127,30 +127,33 @@
}
#define I9XX_COLORS \
- .color = { .gamma_lut_size = 256 }
+ .display.color = { .gamma_lut_size = 256 }
#define I965_COLORS \
- .color = { .gamma_lut_size = 129, \
+ .display.color = { .gamma_lut_size = 129, \
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
#define ILK_COLORS \
- .color = { .gamma_lut_size = 1024 }
+ .display.color = { .gamma_lut_size = 1024 }
#define IVB_COLORS \
- .color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
+ .display.color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
#define CHV_COLORS \
- .color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
- .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
- .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ .display.color = { \
+ .degamma_lut_size = 65, .gamma_lut_size = 257, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
#define GLK_COLORS \
- .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \
- .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
- DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ .display.color = { \
+ .degamma_lut_size = 33, .gamma_lut_size = 1024, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
}
#define ICL_COLORS \
- .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145, \
- .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
- DRM_COLOR_LUT_EQUAL_CHANNELS, \
- .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ .display.color = { \
+ .degamma_lut_size = 33, .gamma_lut_size = 262145, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
/* Keep in gen based order, and chronological order within a gen */
@@ -171,6 +174,7 @@
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
+ .has_3d_pipeline = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.platform_engine_mask = BIT(RCS0), \
@@ -190,6 +194,7 @@
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
+ .has_3d_pipeline = 1, \
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
@@ -232,6 +237,7 @@ static const struct intel_device_info i865g_info = {
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.platform_engine_mask = BIT(RCS0), \
+ .has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
.dma_mask_size = 32, \
@@ -323,6 +329,7 @@ static const struct intel_device_info pnv_m_info = {
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.platform_engine_mask = BIT(RCS0), \
+ .has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
.dma_mask_size = 36, \
@@ -374,6 +381,7 @@ static const struct intel_device_info gm45_info = {
.display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
+ .has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
@@ -405,6 +413,7 @@ static const struct intel_device_info ilk_m_info = {
.display.has_hotplug = 1, \
.display.fbc_mask = BIT(INTEL_FBC_A), \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
@@ -456,6 +465,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.display.has_hotplug = 1, \
.display.fbc_mask = BIT(INTEL_FBC_A), \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
@@ -529,7 +539,7 @@ static const struct intel_device_info vlv_info = {
.has_snoop = true,
.has_coherent_ggtt = false,
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
- .display_mmio_offset = VLV_DISPLAY_BASE,
+ .display.mmio_offset = VLV_DISPLAY_BASE,
I9XX_PIPE_OFFSETS,
I9XX_CURSOR_OFFSETS,
I965_COLORS,
@@ -627,7 +637,7 @@ static const struct intel_device_info chv_info = {
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
- .display_mmio_offset = VLV_DISPLAY_BASE,
+ .display.mmio_offset = VLV_DISPLAY_BASE,
CHV_PIPE_OFFSETS,
CHV_CURSOR_OFFSETS,
CHV_COLORS,
@@ -649,8 +659,8 @@ static const struct intel_device_info chv_info = {
.display.has_ipc = 1, \
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
- .dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \
- .dbuf.slice_mask = BIT(DBUF_S1)
+ .display.dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \
+ .display.dbuf.slice_mask = BIT(DBUF_S1)
#define SKL_PLATFORM \
GEN9_FEATURES, \
@@ -685,13 +695,14 @@ static const struct intel_device_info skl_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
- .dbuf.slice_mask = BIT(DBUF_S1), \
+ .display.dbuf.slice_mask = BIT(DBUF_S1), \
.display.has_hotplug = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
+ .has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
@@ -722,14 +733,14 @@ static const struct intel_device_info skl_gt4_info = {
static const struct intel_device_info bxt_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_BROXTON),
- .dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */
+ .display.dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */
};
static const struct intel_device_info glk_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_GEMINILAKE),
.display.ver = 10,
- .dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
+ .display.dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
GLK_COLORS,
};
@@ -801,7 +812,7 @@ static const struct intel_device_info cml_gt2_info = {
.display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
@@ -809,7 +820,7 @@ static const struct intel_device_info cml_gt2_info = {
[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
@@ -819,8 +830,8 @@ static const struct intel_device_info cml_gt2_info = {
}, \
GEN(11), \
ICL_COLORS, \
- .dbuf.size = 2048, \
- .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
+ .display.dbuf.size = 2048, \
+ .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
.display.has_dsc = 1, \
.has_coherent_ggtt = false, \
.has_logical_ring_elsq = 1
@@ -854,7 +865,7 @@ static const struct intel_device_info jsl_info = {
.display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
@@ -862,7 +873,7 @@ static const struct intel_device_info jsl_info = {
[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
@@ -929,22 +940,15 @@ static const struct intel_device_info adl_s_info = {
.dma_mask_size = 39,
};
-#define XE_LPD_CURSOR_OFFSETS \
- .cursor_offsets = { \
- [PIPE_A] = CURSOR_A_OFFSET, \
- [PIPE_B] = IVB_CURSOR_B_OFFSET, \
- [PIPE_C] = IVB_CURSOR_C_OFFSET, \
- [PIPE_D] = TGL_CURSOR_D_OFFSET, \
- }
-
#define XE_LPD_FEATURES \
.display.abox_mask = GENMASK(1, 0), \
- .color = { .degamma_lut_size = 128, .gamma_lut_size = 1024, \
- .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
- DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ .display.color = { \
+ .degamma_lut_size = 128, .gamma_lut_size = 1024, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
}, \
- .dbuf.size = 4096, \
- .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
+ .display.dbuf.size = 4096, \
+ .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
BIT(DBUF_S4), \
.display.has_ddi = 1, \
.display.has_dmc = 1, \
@@ -959,7 +963,7 @@ static const struct intel_device_info adl_s_info = {
.display.has_psr = 1, \
.display.ver = 13, \
.display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
@@ -967,7 +971,7 @@ static const struct intel_device_info adl_s_info = {
[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
@@ -975,7 +979,7 @@ static const struct intel_device_info adl_s_info = {
[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
- XE_LPD_CURSOR_OFFSETS
+ TGL_CURSOR_OFFSETS
static const struct intel_device_info adl_p_info = {
GEN12_FEATURES,
@@ -1005,6 +1009,7 @@ static const struct intel_device_info adl_p_info = {
.graphics.rel = 50, \
XE_HP_PAGE_SIZES, \
.dma_mask_size = 46, \
+ .has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
.has_flat_ccs = 1, \
.has_global_mocs = 1, \
@@ -1012,7 +1017,7 @@ static const struct intel_device_info adl_p_info = {
.has_llc = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_elsq = 1, \
- .has_mslices = 1, \
+ .has_mslice_steering = 1, \
.has_rc6 = 1, \
.has_reset_engine = 1, \
.has_rps = 1, \
@@ -1033,6 +1038,7 @@ static const struct intel_device_info xehpsdv_info = {
.display = { },
.has_64k_pages = 1,
.needs_compact_pt = 1,
+ .has_media_ratio_mode = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
@@ -1054,6 +1060,7 @@ static const struct intel_device_info xehpsdv_info = {
.has_guc_deprivilege = 1, \
.has_heci_pxp = 1, \
.needs_compact_pt = 1, \
+ .has_media_ratio_mode = 1, \
.platform_engine_mask = \
BIT(RCS0) | BIT(BCS0) | \
BIT(VECS0) | BIT(VECS1) | \
@@ -1077,7 +1084,12 @@ static const struct intel_device_info ats_m_info = {
#define XE_HPC_FEATURES \
XE_HP_FEATURES, \
- .dma_mask_size = 52
+ .dma_mask_size = 52, \
+ .has_3d_pipeline = 0, \
+ .has_guc_deprivilege = 1, \
+ .has_l3_ccs_read = 1, \
+ .has_mslice_steering = 0, \
+ .has_one_eu_per_fuse_bit = 1
__maybe_unused
static const struct intel_device_info pvc_info = {
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 3e3b09588fd3..958b37123bf1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1047,7 +1047,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
GEM_BUG_ON(!pmu->base.event_init);
/* Select the first online CPU as a designated reader. */
- if (!cpumask_weight(&i915_pmu_cpumask))
+ if (cpumask_empty(&i915_pmu_cpumask))
cpumask_set_cpu(cpu, &i915_pmu_cpumask);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 7584cec53d5d..0094f67c63f2 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -31,10 +31,12 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
static int fill_topology_info(const struct sseu_dev_info *sseu,
struct drm_i915_query_item *query_item,
- const u8 *subslice_mask)
+ intel_sseu_ss_mask_t subslice_mask)
{
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
+ int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int ret;
BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
@@ -43,8 +45,8 @@ static int fill_topology_info(const struct sseu_dev_info *sseu,
return -ENODEV;
slice_length = sizeof(sseu->slice_mask);
- subslice_length = sseu->max_slices * sseu->ss_stride;
- eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
+ subslice_length = sseu->max_slices * ss_stride;
+ eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
total_length = sizeof(topo) + slice_length + subslice_length +
eu_length;
@@ -59,9 +61,9 @@ static int fill_topology_info(const struct sseu_dev_info *sseu,
topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
topo.subslice_offset = slice_length;
- topo.subslice_stride = sseu->ss_stride;
+ topo.subslice_stride = ss_stride;
topo.eu_offset = slice_length + subslice_length;
- topo.eu_stride = sseu->eu_stride;
+ topo.eu_stride = eu_stride;
if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
@@ -71,15 +73,15 @@ static int fill_topology_info(const struct sseu_dev_info *sseu,
&sseu->slice_mask, slice_length))
return -EFAULT;
- if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
- sizeof(topo) + slice_length),
- subslice_mask, subslice_length))
+ if (intel_sseu_copy_ssmask_to_user(u64_to_user_ptr(query_item->data_ptr +
+ sizeof(topo) + slice_length),
+ sseu))
return -EFAULT;
- if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
- sizeof(topo) +
- slice_length + subslice_length),
- sseu->eu_mask, eu_length))
+ if (intel_sseu_copy_eumask_to_user(u64_to_user_ptr(query_item->data_ptr +
+ sizeof(topo) +
+ slice_length + subslice_length),
+ sseu))
return -EFAULT;
return total_length;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4f5a51bb9e1e..3168d7007e10 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -115,7 +115,7 @@
* #define GEN8_BAR _MMIO(0xb888)
*/
-#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display_mmio_offset)
+#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display.mmio_offset)
/*
* Given the first two numbers __a and __b of arbitrarily many evenly spaced
@@ -161,16 +161,15 @@
* Device info offset array based helpers for groups of registers with unevenly
* spaced base offsets.
*/
-#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
- INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
- DISPLAY_MMIO_BASE(dev_priv))
-#define _TRANS2(tran, reg) (INTEL_INFO(dev_priv)->trans_offsets[(tran)] - \
- INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
- DISPLAY_MMIO_BASE(dev_priv))
-#define _MMIO_TRANS2(tran, reg) _MMIO(_TRANS2(tran, reg))
-#define _CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
- INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
- DISPLAY_MMIO_BASE(dev_priv))
+#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->display.pipe_offsets[(pipe)] - \
+ INTEL_INFO(dev_priv)->display.pipe_offsets[PIPE_A] + \
+ DISPLAY_MMIO_BASE(dev_priv) + (reg))
+#define _MMIO_TRANS2(tran, reg) _MMIO(INTEL_INFO(dev_priv)->display.trans_offsets[(tran)] - \
+ INTEL_INFO(dev_priv)->display.trans_offsets[TRANSCODER_A] + \
+ DISPLAY_MMIO_BASE(dev_priv) + (reg))
+#define _MMIO_CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->display.cursor_offsets[(pipe)] - \
+ INTEL_INFO(dev_priv)->display.cursor_offsets[PIPE_A] + \
+ DISPLAY_MMIO_BASE(dev_priv) + (reg))
#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
@@ -976,6 +975,14 @@
#define GEN12_COMPUTE2_RING_BASE 0x1e000
#define GEN12_COMPUTE3_RING_BASE 0x26000
#define BLT_RING_BASE 0x22000
+#define XEHPC_BCS1_RING_BASE 0x3e0000
+#define XEHPC_BCS2_RING_BASE 0x3e2000
+#define XEHPC_BCS3_RING_BASE 0x3e4000
+#define XEHPC_BCS4_RING_BASE 0x3e6000
+#define XEHPC_BCS5_RING_BASE 0x3e8000
+#define XEHPC_BCS6_RING_BASE 0x3ea000
+#define XEHPC_BCS7_RING_BASE 0x3ec000
+#define XEHPC_BCS8_RING_BASE 0x3ee000
#define DG1_GSC_HECI1_BASE 0x00258000
#define DG1_GSC_HECI2_BASE 0x00259000
#define DG2_GSC_HECI1_BASE 0x00373000
@@ -1846,6 +1853,7 @@
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
+#define PVC_RP_STATE_CAP _MMIO(0x281014)
#define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
@@ -2162,7 +2170,7 @@
*/
#define _SRD_CTL_A 0x60800
#define _SRD_CTL_EDP 0x6f800
-#define EDP_PSR_CTL(tran) _MMIO(_TRANS2(tran, _SRD_CTL_A))
+#define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A)
#define EDP_PSR_ENABLE (1 << 31)
#define BDW_PSR_SINGLE_FRAME (1 << 30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
@@ -2208,11 +2216,11 @@
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
-#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_TRANS2(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) + 4) /* 5 registers */
#define _SRD_STATUS_A 0x60840
#define _SRD_STATUS_EDP 0x6f840
-#define EDP_PSR_STATUS(tran) _MMIO(_TRANS2(tran, _SRD_STATUS_A))
+#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A)
#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
#define EDP_PSR_STATUS_STATE_SHIFT 29
#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
@@ -2239,13 +2247,13 @@
#define _SRD_PERF_CNT_A 0x60844
#define _SRD_PERF_CNT_EDP 0x6f844
-#define EDP_PSR_PERF_CNT(tran) _MMIO(_TRANS2(tran, _SRD_PERF_CNT_A))
+#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
/* PSR_MASK on SKL+ */
#define _SRD_DEBUG_A 0x60860
#define _SRD_DEBUG_EDP 0x6f860
-#define EDP_PSR_DEBUG(tran) _MMIO(_TRANS2(tran, _SRD_DEBUG_A))
+#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A)
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
@@ -2320,7 +2328,7 @@
#define _PSR2_SU_STATUS_A 0x60914
#define _PSR2_SU_STATUS_EDP 0x6f914
-#define _PSR2_SU_STATUS(tran, index) _MMIO(_TRANS2(tran, _PSR2_SU_STATUS_A) + (index) * 4)
+#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(tran, _PSR2_SU_STATUS_A + (index) * 4)
#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3))
#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
@@ -4319,12 +4327,12 @@
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
-#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
-#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
-#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
-#define CURSIZE(pipe) _CURSOR2(pipe, _CURASIZE)
-#define CUR_FBC_CTL(pipe) _CURSOR2(pipe, _CUR_FBC_CTL_A)
-#define CURSURFLIVE(pipe) _CURSOR2(pipe, _CURASURFLIVE)
+#define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR)
+#define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE)
+#define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS)
+#define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE)
+#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A)
+#define CURSURFLIVE(pipe) _MMIO_CURSOR2(pipe, _CURASURFLIVE)
#define CURSOR_A_OFFSET 0x70080
#define CURSOR_B_OFFSET 0x700c0
@@ -4399,7 +4407,7 @@
#define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
-#define DSPGAMC(plane, i) _MMIO(_PIPE2(plane, _DSPAGAMC) + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
+#define DSPGAMC(plane, i) _MMIO_PIPE2(plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
@@ -6689,6 +6697,9 @@
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1 << 31)
+#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
+#define GEN6_PCODE_MB_PARAM1 REG_GENMASK(15, 8)
+#define GEN6_PCODE_MB_COMMAND REG_GENMASK(7, 0)
#define GEN6_PCODE_ERROR_MASK 0xFF
#define GEN6_PCODE_SUCCESS 0x0
#define GEN6_PCODE_ILLEGAL_CMD 0x1
@@ -6755,6 +6766,14 @@
#define DG1_UNCORE_GET_INIT_STATUS 0x0
#define DG1_UNCORE_INIT_STATUS_COMPLETE 0x1
#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
+#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* xehpsdv, pvc */
+/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
+#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
+#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
+/* PCODE_MBOX_DOMAIN_* - mailbox domain IDs */
+/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
+#define PCODE_MBOX_DOMAIN_NONE 0x0
+#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -6774,163 +6793,12 @@
(((reg) & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
#define GEN7_L3CDERRST1_ENABLE (1 << 7)
-/* Audio */
-#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
-#define INTEL_AUDIO_DEVCL 0x808629FB
-#define INTEL_AUDIO_DEVBLC 0x80862801
-#define INTEL_AUDIO_DEVCTG 0x80862802
-
-#define G4X_AUD_CNTL_ST _MMIO(0x620B4)
-#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
-#define G4X_ELDV_DEVCTG (1 << 14)
-#define G4X_ELD_ADDR_MASK (0xf << 5)
-#define G4X_ELD_ACK (1 << 4)
-#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C)
-
-#define _IBX_HDMIW_HDMIEDID_A 0xE2050
-#define _IBX_HDMIW_HDMIEDID_B 0xE2150
-#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
- _IBX_HDMIW_HDMIEDID_B)
-#define _IBX_AUD_CNTL_ST_A 0xE20B4
-#define _IBX_AUD_CNTL_ST_B 0xE21B4
-#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
- _IBX_AUD_CNTL_ST_B)
-#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
-#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
-#define IBX_ELD_ACK (1 << 4)
-#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0)
-#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
-#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
-
-#define _CPT_HDMIW_HDMIEDID_A 0xE5050
-#define _CPT_HDMIW_HDMIEDID_B 0xE5150
-#define CPT_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B)
-#define _CPT_AUD_CNTL_ST_A 0xE50B4
-#define _CPT_AUD_CNTL_ST_B 0xE51B4
-#define CPT_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B)
-#define CPT_AUD_CNTRL_ST2 _MMIO(0xE50C0)
-
-#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
-#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
-#define VLV_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B)
-#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
-#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
-#define VLV_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B)
-#define VLV_AUD_CNTL_ST2 _MMIO(VLV_DISPLAY_BASE + 0x620C0)
-
/* These are the 4 32-bit write offset registers for each stream
* output buffer. It determines the offset from the
* 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
*/
#define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4)
-#define _IBX_AUD_CONFIG_A 0xe2000
-#define _IBX_AUD_CONFIG_B 0xe2100
-#define IBX_AUD_CFG(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B)
-#define _CPT_AUD_CONFIG_A 0xe5000
-#define _CPT_AUD_CONFIG_B 0xe5100
-#define CPT_AUD_CFG(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B)
-#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
-#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
-#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
-
-#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
-#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
-#define AUD_CONFIG_UPPER_N_SHIFT 20
-#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
-#define AUD_CONFIG_LOWER_N_SHIFT 4
-#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
-#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
-#define AUD_CONFIG_N(n) \
- (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
- (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
-#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
-
-/* HSW Audio */
-#define _HSW_AUD_CONFIG_A 0x65000
-#define _HSW_AUD_CONFIG_B 0x65100
-#define HSW_AUD_CFG(trans) _MMIO_TRANS(trans, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
-
-#define _HSW_AUD_MISC_CTRL_A 0x65010
-#define _HSW_AUD_MISC_CTRL_B 0x65110
-#define HSW_AUD_MISC_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
-
-#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
-#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
-#define HSW_AUD_M_CTS_ENABLE(trans) _MMIO_TRANS(trans, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
-#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
-#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
-#define AUD_CONFIG_M_MASK 0xfffff
-
-#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
-#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
-#define HSW_AUD_DIP_ELD_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
-
-/* Audio Digital Converter */
-#define _HSW_AUD_DIG_CNVT_1 0x65080
-#define _HSW_AUD_DIG_CNVT_2 0x65180
-#define AUD_DIG_CNVT(trans) _MMIO_TRANS(trans, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
-#define DIP_PORT_SEL_MASK 0x3
-
-#define _HSW_AUD_EDID_DATA_A 0x65050
-#define _HSW_AUD_EDID_DATA_B 0x65150
-#define HSW_AUD_EDID_DATA(trans) _MMIO_TRANS(trans, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
-
-#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
-#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
-#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
-#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
-#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
-#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
-
-#define _AUD_TCA_DP_2DOT0_CTRL 0x650bc
-#define _AUD_TCB_DP_2DOT0_CTRL 0x651bc
-#define AUD_DP_2DOT0_CTRL(trans) _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL)
-#define AUD_ENABLE_SDP_SPLIT REG_BIT(31)
-
-#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
-#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
-
-#define AUD_FREQ_CNTRL _MMIO(0x65900)
-#define AUD_PIN_BUF_CTL _MMIO(0x48414)
-#define AUD_PIN_BUF_ENABLE REG_BIT(31)
-
-#define AUD_TS_CDCLK_M _MMIO(0x65ea0)
-#define AUD_TS_CDCLK_M_EN REG_BIT(31)
-#define AUD_TS_CDCLK_N _MMIO(0x65ea4)
-
-/* Display Audio Config Reg */
-#define AUD_CONFIG_BE _MMIO(0x65ef0)
-#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
-#define HBLANK_EARLY_ENABLE_TGL(pipe) (0x1 << (24 + (pipe)))
-#define HBLANK_START_COUNT_MASK(pipe) (0x7 << (3 + ((pipe) * 6)))
-#define HBLANK_START_COUNT(pipe, val) (((val) & 0x7) << (3 + ((pipe)) * 6))
-#define NUMBER_SAMPLES_PER_LINE_MASK(pipe) (0x3 << ((pipe) * 6))
-#define NUMBER_SAMPLES_PER_LINE(pipe, val) (((val) & 0x3) << ((pipe) * 6))
-
-#define HBLANK_START_COUNT_8 0
-#define HBLANK_START_COUNT_16 1
-#define HBLANK_START_COUNT_32 2
-#define HBLANK_START_COUNT_64 3
-#define HBLANK_START_COUNT_96 4
-#define HBLANK_START_COUNT_128 5
-
/*
* HSW - ICL power wells
*
@@ -8476,23 +8344,6 @@ enum skl_power_gate {
#define SGGI_DIS REG_BIT(15)
#define SGR_DIS REG_BIT(13)
-#define XEHPSDV_TILE0_ADDR_RANGE _MMIO(0x4900)
-#define XEHPSDV_TILE_LMEM_RANGE_SHIFT 8
-
-#define XEHPSDV_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
-#define XEHPSDV_CCS_BASE_SHIFT 8
-
-/* gamt regs */
-#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
-#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
-#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
-#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
-#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
-
-#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */
-#define MMCD_PCLA (1 << 31)
-#define MMCD_HOTSPOT_EN (1 << 27)
-
#define _ICL_PHY_MISC_A 0x64C00
#define _ICL_PHY_MISC_B 0x64C04
#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 73d5195146b0..62fad16a55e8 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -60,7 +60,7 @@ static struct kmem_cache *slab_execute_cbs;
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
- return dev_name(to_request(fence)->engine->i915->drm.dev);
+ return dev_name(to_request(fence)->i915->drm.dev);
}
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
@@ -134,17 +134,42 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->semaphore);
/*
- * Keep one request on each engine for reserved use under mempressure,
+ * Keep one request on each engine for reserved use under mempressure
* do not use with virtual engines as this really is only needed for
* kernel contexts.
+ *
+ * We do not hold a reference to the engine here and so have to be
+ * very careful in what rq->engine we poke. The virtual engine is
+ * referenced via the rq->context and we released that ref during
+ * i915_request_retire(), ergo we must not dereference a virtual
+ * engine here. Not that we would want to, as the only consumer of
+ * the reserved engine->request_pool is the power management parking,
+ * which must-not-fail, and that is only run on the physical engines.
+ *
+ * Since the request must have been executed to be have completed,
+ * we know that it will have been processed by the HW and will
+ * not be unsubmitted again, so rq->engine and rq->execution_mask
+ * at this point is stable. rq->execution_mask will be a single
+ * bit if the last and _only_ engine it could execution on was a
+ * physical engine, if it's multiple bits then it started on and
+ * could still be on a virtual engine. Thus if the mask is not a
+ * power-of-two we assume that rq->engine may still be a virtual
+ * engine and so a dangling invalid pointer that we cannot dereference
+ *
+ * For example, consider the flow of a bonded request through a virtual
+ * engine. The request is created with a wide engine mask (all engines
+ * that we might execute on). On processing the bond, the request mask
+ * is reduced to one or more engines. If the request is subsequently
+ * bound to a single engine, it will then be constrained to only
+ * execute on that engine and never returned to the virtual engine
+ * after timeslicing away, see __unwind_incomplete_requests(). Thus we
+ * know that if the rq->execution_mask is a single bit, rq->engine
+ * can be a physical engine with the exact corresponding mask.
*/
if (!intel_engine_is_virtual(rq->engine) &&
- !cmpxchg(&rq->engine->request_pool, NULL, rq)) {
- intel_context_put(rq->context);
+ is_power_of_2(rq->execution_mask) &&
+ !cmpxchg(&rq->engine->request_pool, NULL, rq))
return;
- }
-
- intel_context_put(rq->context);
kmem_cache_free(slab_requests, rq);
}
@@ -611,7 +636,7 @@ bool __i915_request_submit(struct i915_request *request)
goto active;
}
- if (unlikely(intel_context_is_banned(request->context)))
+ if (unlikely(!intel_context_is_schedulable(request->context)))
i915_request_set_error_once(request, -EIO);
if (unlikely(fatal_error(request->fence.error)))
@@ -921,22 +946,11 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
}
}
- /*
- * Hold a reference to the intel_context over life of an i915_request.
- * Without this an i915_request can exist after the context has been
- * destroyed (e.g. request retired, context closed, but user space holds
- * a reference to the request from an out fence). In the case of GuC
- * submission + virtual engine, the engine that the request references
- * is also destroyed which can trigger bad pointer dref in fence ops
- * (e.g. i915_fence_get_driver_name). We could likely change these
- * functions to avoid touching the engine but let's just be safe and
- * hold the intel_context reference. In execlist mode the request always
- * eventually points to a physical engine so this isn't an issue.
- */
- rq->context = intel_context_get(ce);
+ rq->context = ce;
rq->engine = ce->engine;
rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
+ rq->i915 = ce->engine->i915;
ret = intel_timeline_get_seqno(tl, rq, &seqno);
if (ret)
@@ -1008,7 +1022,6 @@ err_unwind:
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
err_free:
- intel_context_put(ce);
kmem_cache_free(slab_requests, rq);
err_unreserve:
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 28b1f9db5487..47041ec68df8 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -196,6 +196,8 @@ struct i915_request {
struct dma_fence fence;
spinlock_t lock;
+ struct drm_i915_private *i915;
+
/**
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 0b9b86af6c7f..c229c91071d7 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include "i915_scheduler_types.h"
+#include "i915_tasklet.h"
struct drm_printer;
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 8521daba212a..1e2750210831 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -166,7 +166,14 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_coredump *gpu;
- ssize_t ret;
+ ssize_t ret = 0;
+
+ /*
+ * FIXME: Concurrent clients triggering resets and reading + clearing
+ * dumps can cause inconsistent sysfs reads when a user calls in with a
+ * non-zero offset to complete a prior partial read but the
+ * gpu_coredump has been cleared or replaced.
+ */
gpu = i915_first_error_state(i915);
if (IS_ERR(gpu)) {
@@ -178,8 +185,10 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
const char *str = "No error state collected\n";
size_t len = strlen(str);
- ret = min_t(size_t, count, len - off);
- memcpy(buf, str + off, ret);
+ if (off < len) {
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
}
return ret;
@@ -259,4 +268,6 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
device_remove_bin_file(kdev, &dpf_attrs_1);
device_remove_bin_file(kdev, &dpf_attrs);
+
+ kobject_put(dev_priv->sysfs_gt);
}
diff --git a/drivers/gpu/drm/i915/i915_tasklet.h b/drivers/gpu/drm/i915/i915_tasklet.h
new file mode 100644
index 000000000000..5d7069bdf2c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_tasklet.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __I915_TASKLET_H__
+#define __I915_TASKLET_H__
+
+#include <linux/interrupt.h>
+
+static inline void tasklet_lock(struct tasklet_struct *t)
+{
+ while (!tasklet_trylock(t))
+ cpu_relax();
+}
+
+static inline bool tasklet_is_locked(const struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_RUN, &t->state);
+}
+
+static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
+{
+ if (!atomic_fetch_inc(&t->count))
+ tasklet_unlock_spin_wait(t);
+}
+
+static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
+{
+ return !atomic_read(&t->count);
+}
+
+static inline bool __tasklet_enable(struct tasklet_struct *t)
+{
+ return atomic_dec_and_test(&t->count);
+}
+
+static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_SCHED, &t->state);
+}
+
+#endif /* __I915_TASKLET_H__ */
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index ea7648e3aa0e..c10d68cdc3ca 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -115,39 +115,6 @@ bool i915_error_injected(void);
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
-static inline bool
-__check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
-{
- size_t sz;
-
- if (check_mul_overflow(count, arr, &sz))
- return false;
-
- if (check_add_overflow(sz, base, &sz))
- return false;
-
- *size = sz;
- return true;
-}
-
-/**
- * check_struct_size() - Calculate size of structure with trailing array.
- * @p: Pointer to the structure.
- * @member: Name of the array member.
- * @n: Number of elements in the array.
- * @sz: Total size of structure and array
- *
- * Calculates size of memory needed for structure @p followed by an
- * array of @n @member elements, like struct_size() but reports
- * whether it overflowed, and the resultant size in @sz
- *
- * Return: false if the calculation overflowed.
- */
-#define check_struct_size(p, member, n, sz) \
- likely(__check_struct_size(sizeof(*(p)), \
- sizeof(*(p)->member) + __must_be_array((p)->member), \
- n, sz))
-
#define ptr_mask_bits(ptr, n) ({ \
unsigned long __v = (unsigned long)(ptr); \
(typeof(ptr))(__v & -BIT(n)); \
@@ -184,8 +151,6 @@ __check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
#define struct_member(T, member) (((T *)0)->member)
-#define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
-
#define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \
@@ -228,11 +193,6 @@ static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
})
-static inline u64 ptr_to_u64(const void *ptr)
-{
- return (uintptr_t)ptr;
-}
-
#define u64_to_ptr(T, x) ({ \
typecheck(u64, x); \
(T *)(uintptr_t)(x); \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4f6db539571a..5d5828b9a242 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -23,6 +23,7 @@
*/
#include <linux/sched/mm.h>
+#include <linux/dma-fence-array.h>
#include <drm/drm_gem.h>
#include "display/intel_frontbuffer.h"
@@ -550,13 +551,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
return IOMEM_ERR_PTR(-EINVAL);
- if (!i915_gem_object_is_lmem(vma->obj)) {
- if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
- err = -ENODEV;
- goto err;
- }
- }
-
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
@@ -569,20 +563,33 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
* of pages, that way we can also drop the
* I915_BO_ALLOC_CONTIGUOUS when allocating the object.
*/
- if (i915_gem_object_is_lmem(vma->obj))
+ if (i915_gem_object_is_lmem(vma->obj)) {
ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
vma->obj->base.size);
- else
+ } else if (i915_vma_is_map_and_fenceable(vma)) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
vma->node.size);
+ } else {
+ ptr = (void __iomem *)
+ i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err;
+ }
+ ptr = page_pack_bits(ptr, 1);
+ }
+
if (ptr == NULL) {
err = -ENOMEM;
goto err;
}
if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
- io_mapping_unmap(ptr);
+ if (page_unmask_bits(ptr))
+ __i915_gem_object_release_map(vma->obj);
+ else
+ io_mapping_unmap(ptr);
ptr = vma->iomap;
}
}
@@ -596,7 +603,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
i915_vma_set_ggtt_write(vma);
/* NB Access through the GTT requires the device to be awake. */
- return ptr;
+ return page_mask_bits(ptr);
err_unpin:
__i915_vma_unpin(vma);
@@ -614,6 +621,8 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
{
GEM_BUG_ON(vma->iomap == NULL);
+ /* XXX We keep the mapping until __i915_vma_unbind()/evict() */
+
i915_vma_flush_writes(vma);
i915_vma_unpin_fence(vma);
@@ -1762,7 +1771,10 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
if (vma->iomap == NULL)
return;
- io_mapping_unmap(vma->iomap);
+ if (page_unmask_bits(vma->iomap))
+ __i915_gem_object_release_map(vma->obj);
+ else
+ io_mapping_unmap(vma->iomap);
vma->iomap = NULL;
}
@@ -1823,6 +1835,21 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
if (unlikely(err))
return err;
+ /*
+ * Reserve fences slot early to prevent an allocation after preparing
+ * the workload and associating fences with dma_resv.
+ */
+ if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
+ struct dma_fence *curr;
+ int idx;
+
+ dma_fence_array_for_each(curr, idx, fence)
+ ;
+ err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
+ if (unlikely(err))
+ return err;
+ }
+
if (flags & EXEC_OBJECT_WRITE) {
struct intel_frontbuffer *front;
@@ -1832,31 +1859,23 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
i915_active_add_request(&front->write, rq);
intel_frontbuffer_put(front);
}
+ }
- if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
- err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
- if (unlikely(err))
- return err;
- }
+ if (fence) {
+ struct dma_fence *curr;
+ enum dma_resv_usage usage;
+ int idx;
- if (fence) {
- dma_resv_add_fence(vma->obj->base.resv, fence,
- DMA_RESV_USAGE_WRITE);
+ obj->read_domains = 0;
+ if (flags & EXEC_OBJECT_WRITE) {
+ usage = DMA_RESV_USAGE_WRITE;
obj->write_domain = I915_GEM_DOMAIN_RENDER;
- obj->read_domains = 0;
- }
- } else {
- if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
- err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
- if (unlikely(err))
- return err;
+ } else {
+ usage = DMA_RESV_USAGE_READ;
}
- if (fence) {
- dma_resv_add_fence(vma->obj->base.resv, fence,
- DMA_RESV_USAGE_READ);
- obj->write_domain = 0;
- }
+ dma_fence_array_for_each(curr, idx, fence)
+ dma_resv_add_fence(vma->obj->base.resv, curr, usage);
}
if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
@@ -1899,9 +1918,11 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
/* release the fence reg _after_ flushing */
i915_vma_revoke_fence(vma);
- __i915_vma_iounmap(vma);
clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
+
+ __i915_vma_iounmap(vma);
+
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index e7d2cf7d65c8..1c150cd7dceb 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -143,6 +143,7 @@ enum intel_ppgtt_type {
func(needs_compact_pt); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
+ func(has_3d_pipeline); \
func(has_4tile); \
func(has_flat_ccs); \
func(has_global_mocs); \
@@ -150,11 +151,14 @@ enum intel_ppgtt_type {
func(has_heci_pxp); \
func(has_heci_gscfi); \
func(has_guc_deprivilege); \
+ func(has_l3_ccs_read); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
- func(has_mslices); \
+ func(has_media_ratio_mode); \
+ func(has_mslice_steering); \
+ func(has_one_eu_per_fuse_bit); \
func(has_pooled_eu); \
func(has_pxp); \
func(has_rc6); \
@@ -210,8 +214,6 @@ struct intel_device_info {
u32 memory_regions; /* regions supported by the HW */
- u32 display_mmio_offset;
-
u8 gt; /* GT number, 0 if undefined */
#define DEFINE_FLAG(name) u8 name:1
@@ -227,27 +229,30 @@ struct intel_device_info {
u8 fbc_mask;
u8 abox_mask;
+ struct {
+ u16 size; /* in blocks */
+ u8 slice_mask;
+ } dbuf;
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
- } display;
- struct {
- u16 size; /* in blocks */
- u8 slice_mask;
- } dbuf;
-
- /* Register offsets for the various display pipes and transcoders */
- int pipe_offsets[I915_MAX_TRANSCODERS];
- int trans_offsets[I915_MAX_TRANSCODERS];
- int cursor_offsets[I915_MAX_PIPES];
-
- struct color_luts {
- u32 degamma_lut_size;
- u32 gamma_lut_size;
- u32 degamma_lut_tests;
- u32 gamma_lut_tests;
- } color;
+ /* Global register offset for the display engine */
+ u32 mmio_offset;
+
+ /* Register offsets for the various display pipes and transcoders */
+ u32 pipe_offsets[I915_MAX_TRANSCODERS];
+ u32 trans_offsets[I915_MAX_TRANSCODERS];
+ u32 cursor_offsets[I915_MAX_PIPES];
+
+ struct {
+ u32 degamma_lut_size;
+ u32 gamma_lut_size;
+ u32 degamma_lut_tests;
+ u32 gamma_lut_tests;
+ } color;
+ } display;
};
struct intel_runtime_info {
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 2b9e7833da96..437447119770 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -393,7 +393,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
u32 val = 0;
int ret;
- ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 72dac1718f3e..157e166672d7 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -3,10 +3,12 @@
* Copyright © 2020 Intel Corporation
*/
+#include "display/intel_audio_regs.h"
#include "display/intel_dmc_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
#include "gvt/gvt.h"
+
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index e2b2bbdc0714..0fec25be146a 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -25,7 +25,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
drm_WARN_ON(&dev_priv->drm,
GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
- /* PantherPoint is CPT compatible */
+ /* PPT is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
@@ -47,7 +47,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
- /* WildcatPoint is LPT compatible */
+ /* WPT is LPT compatible */
return PCH_LPT;
case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
@@ -55,7 +55,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
- /* WildcatPoint is LPT compatible */
+ /* WPT is LPT compatible */
return PCH_LPT;
case INTEL_PCH_SPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
@@ -99,14 +99,14 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
!IS_COFFEELAKE(dev_priv) &&
!IS_COMETLAKE(dev_priv) &&
!IS_ROCKETLAKE(dev_priv));
- /* CometPoint is CNP Compatible */
+ /* CMP is CNP compatible */
return PCH_CNP;
case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
drm_WARN_ON(&dev_priv->drm,
!IS_COFFEELAKE(dev_priv) &&
!IS_COMETLAKE(dev_priv));
- /* Comet Lake V PCH is based on KBP, which is SPT compatible */
+ /* CMP-V is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
@@ -116,7 +116,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
- return PCH_MCC;
+ /* MCC is TGP compatible */
+ return PCH_TGP;
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
@@ -127,7 +128,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
- return PCH_JSP;
+ /* JSP is ICP compatible */
+ return PCH_ICP;
case INTEL_PCH_ADP_DEVICE_ID_TYPE:
case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index b7a8cf409d48..7c8ce9781d1a 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -22,10 +22,8 @@ enum intel_pch {
PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
PCH_CNP, /* Cannon/Comet Lake PCH */
- PCH_ICP, /* Ice Lake PCH */
- PCH_JSP, /* Jasper Lake PCH */
- PCH_MCC, /* Mule Creek Canyon PCH */
- PCH_TGP, /* Tiger Lake PCH */
+ PCH_ICP, /* Ice Lake/Jasper Lake PCH */
+ PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
PCH_ADP, /* Alder Lake PCH */
/* Fake PCHs, functionality handled on the same PCI dev */
@@ -68,8 +66,6 @@ enum intel_pch {
#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
-#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
-#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index ac727546868e..a234d9b4ed14 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -52,14 +52,12 @@ static int gen7_check_mailbox_status(u32 mbox)
}
}
-static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox,
+static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox,
u32 *val, u32 *val1,
int fast_timeout_us, int slow_timeout_ms,
bool is_read)
{
- struct intel_uncore *uncore = &i915->uncore;
-
- lockdep_assert_held(&i915->sb_lock);
+ lockdep_assert_held(&uncore->i915->sb_lock);
/*
* GEN6_PCODE_* are outside of the forcewake domain, we can use
@@ -88,22 +86,22 @@ static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox,
if (is_read && val1)
*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
- if (GRAPHICS_VER(i915) > 6)
+ if (GRAPHICS_VER(uncore->i915) > 6)
return gen7_check_mailbox_status(mbox);
else
return gen6_check_mailbox_status(mbox);
}
-int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1)
+int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
{
int err;
- mutex_lock(&i915->sb_lock);
- err = __snb_pcode_rw(i915, mbox, val, val1, 500, 20, true);
- mutex_unlock(&i915->sb_lock);
+ mutex_lock(&uncore->i915->sb_lock);
+ err = __snb_pcode_rw(uncore, mbox, val, val1, 500, 20, true);
+ mutex_unlock(&uncore->i915->sb_lock);
if (err) {
- drm_dbg(&i915->drm,
+ drm_dbg(&uncore->i915->drm,
"warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
mbox, __builtin_return_address(0), err);
}
@@ -111,18 +109,18 @@ int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1)
return err;
}
-int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
+int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
int fast_timeout_us, int slow_timeout_ms)
{
int err;
- mutex_lock(&i915->sb_lock);
- err = __snb_pcode_rw(i915, mbox, &val, NULL,
+ mutex_lock(&uncore->i915->sb_lock);
+ err = __snb_pcode_rw(uncore, mbox, &val, NULL,
fast_timeout_us, slow_timeout_ms, false);
- mutex_unlock(&i915->sb_lock);
+ mutex_unlock(&uncore->i915->sb_lock);
if (err) {
- drm_dbg(&i915->drm,
+ drm_dbg(&uncore->i915->drm,
"warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
val, mbox, __builtin_return_address(0), err);
}
@@ -130,18 +128,18 @@ int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
return err;
}
-static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
+static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
u32 *status)
{
- *status = __snb_pcode_rw(i915, mbox, &request, NULL, 500, 0, true);
+ *status = __snb_pcode_rw(uncore, mbox, &request, NULL, 500, 0, true);
return (*status == 0) && ((request & reply_mask) == reply);
}
/**
* skl_pcode_request - send PCODE request until acknowledgment
- * @i915: device private
+ * @uncore: uncore
* @mbox: PCODE mailbox ID the request is targeted for
* @request: request ID
* @reply_mask: mask used to check for request acknowledgment
@@ -158,16 +156,16 @@ static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
* other error as reported by PCODE.
*/
-int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms)
{
u32 status;
int ret;
- mutex_lock(&i915->sb_lock);
+ mutex_lock(&uncore->i915->sb_lock);
#define COND \
- skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
+ skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status)
/*
* Prime the PCODE by doing a request first. Normally it guarantees
@@ -193,35 +191,58 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
* requests, and for any quirks of the PCODE firmware that delays
* the request completion.
*/
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(&uncore->i915->drm,
"PCODE timeout, retrying with preemption disabled\n");
- drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
+ drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3);
preempt_disable();
ret = wait_for_atomic(COND, 50);
preempt_enable();
out:
- mutex_unlock(&i915->sb_lock);
+ mutex_unlock(&uncore->i915->sb_lock);
return status ? status : ret;
#undef COND
}
-int intel_pcode_init(struct drm_i915_private *i915)
+int intel_pcode_init(struct intel_uncore *uncore)
{
- int ret = 0;
+ if (!IS_DGFX(uncore->i915))
+ return 0;
+
+ return skl_pcode_request(uncore, DG1_PCODE_STATUS,
+ DG1_UNCORE_GET_INIT_STATUS,
+ DG1_UNCORE_INIT_STATUS_COMPLETE,
+ DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+}
+
+int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
+{
+ intel_wakeref_t wakeref;
+ u32 mbox;
+ int err;
- if (!IS_DGFX(i915))
- return ret;
+ mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
- ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
- DG1_UNCORE_GET_INIT_STATUS,
- DG1_UNCORE_INIT_STATUS_COMPLETE,
- DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+ with_intel_runtime_pm(uncore->rpm, wakeref)
+ err = snb_pcode_read(uncore, mbox, val, NULL);
- drm_dbg(&i915->drm, "PCODE init status %d\n", ret);
+ return err;
+}
- if (ret)
- drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
+int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val)
+{
+ intel_wakeref_t wakeref;
+ u32 mbox;
+ int err;
- return ret;
+ mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
+
+ with_intel_runtime_pm(uncore->rpm, wakeref)
+ err = snb_pcode_write(uncore, mbox, val);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h
index 0962a17fac48..8d2198e29422 100644
--- a/drivers/gpu/drm/i915/intel_pcode.h
+++ b/drivers/gpu/drm/i915/intel_pcode.h
@@ -8,17 +8,23 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_uncore;
-int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1);
-int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
+int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1);
+int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
int fast_timeout_us, int slow_timeout_ms);
-#define snb_pcode_write(i915, mbox, val) \
- snb_pcode_write_timeout(i915, mbox, val, 500, 0)
+#define snb_pcode_write(uncore, mbox, val) \
+ snb_pcode_write_timeout(uncore, mbox, val, 500, 0)
-int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
-int intel_pcode_init(struct drm_i915_private *i915);
+int intel_pcode_init(struct intel_uncore *uncore);
+
+/*
+ * Helpers for dGfx PCODE mailbox command formatting
+ */
+int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val);
+int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val);
#endif /* _INTEL_PCODE_H */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ee0047fdc95d..f06babdb3a8c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,6 +30,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
@@ -2862,7 +2863,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
}
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[8])
+ u16 wm[])
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -2874,7 +2875,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY,
+ ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
&val, NULL);
if (ret) {
@@ -2893,7 +2894,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY,
+ ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
&val, NULL);
if (ret) {
drm_err(&dev_priv->drm,
@@ -3679,7 +3680,7 @@ intel_sagv_block_time(struct drm_i915_private *dev_priv)
u32 val = 0;
int ret;
- ret = snb_pcode_read(dev_priv,
+ ret = snb_pcode_read(&dev_priv->uncore,
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
&val, NULL);
if (ret) {
@@ -3748,7 +3749,7 @@ static void skl_sagv_enable(struct drm_i915_private *dev_priv)
return;
drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
- ret = snb_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ ret = snb_pcode_write(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
@@ -3781,7 +3782,7 @@ static void skl_sagv_disable(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ ret = skl_pcode_request(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
1);
@@ -4100,8 +4101,8 @@ static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->dbuf.size /
- hweight8(INTEL_INFO(dev_priv)->dbuf.slice_mask);
+ return INTEL_INFO(dev_priv)->display.dbuf.size /
+ hweight8(INTEL_INFO(dev_priv)->display.dbuf.slice_mask);
}
static void
@@ -4120,7 +4121,7 @@ skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
ddb->end = fls(slice_mask) * slice_size;
WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > INTEL_INFO(dev_priv)->dbuf.size);
+ WARN_ON(ddb->end > INTEL_INFO(dev_priv)->display.dbuf.size);
}
static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
@@ -4368,9 +4369,9 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
skl_ddb_entry_init_from_hw(ddb_y, val);
}
-void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
+static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
@@ -4950,7 +4951,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
return data_rate;
}
-const struct skl_wm_level *
+static const struct skl_wm_level *
skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id,
int level)
@@ -4963,7 +4964,7 @@ skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
return &wm->wm[level];
}
-const struct skl_wm_level *
+static const struct skl_wm_level *
skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id)
{
@@ -5915,8 +5916,8 @@ void skl_write_cursor_wm(struct intel_plane *plane,
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
-bool skl_wm_level_equals(const struct skl_wm_level *l1,
- const struct skl_wm_level *l2)
+static bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2)
{
return l1->enable == l2->enable &&
l1->ignore_lines == l2->ignore_lines &&
@@ -6095,7 +6096,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
- INTEL_INFO(dev_priv)->dbuf.slice_mask,
+ INTEL_INFO(dev_priv)->display.dbuf.slice_mask,
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus));
}
@@ -6488,8 +6489,8 @@ static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
}
-void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
- struct skl_pipe_wm *out)
+static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
+ struct skl_pipe_wm *out)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -7166,6 +7167,126 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ struct skl_pipe_wm wm;
+ } *hw;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ int level, max_level = ilk_wm_max_level(dev_priv);
+ struct intel_plane *plane;
+ u8 hw_enabled_slices;
+
+ if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
+ return;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
+
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+
+ if (DISPLAY_VER(dev_priv) >= 11 &&
+ hw_enabled_slices != dev_priv->dbuf.enabled_slices)
+ drm_err(&dev_priv->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ dev_priv->dbuf.enabled_slices,
+ hw_enabled_slices);
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
+
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
+ continue;
+
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(dev_priv) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(dev_priv) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
+ }
+
+ kfree(hw);
+}
+
void intel_enable_ipc(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -7513,10 +7634,9 @@ static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
static void dg2_init_clock_gating(struct drm_i915_private *i915)
{
- /* Wa_22010954014:dg2_g10 */
- if (IS_DG2_G10(i915))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
- SGSI_SIDECLK_DIS);
+ /* Wa_22010954014:dg2 */
+ intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
+ SGSI_SIDECLK_DIS);
/*
* Wa_14010733611:dg2_g10
@@ -7527,6 +7647,17 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915)
SGR_DIS | SGGI_DIS);
}
+static void pvc_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ /* Wa_14012385139:pvc */
+ if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
+
+ /* Wa_22010954014:pvc */
+ if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
+}
+
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (!HAS_PCH_CNP(dev_priv))
@@ -7943,6 +8074,7 @@ static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs =
.init_clock_gating = platform##_init_clock_gating, \
}
+CG_FUNCS(pvc);
CG_FUNCS(dg2);
CG_FUNCS(xehpsdv);
CG_FUNCS(adlp);
@@ -7981,7 +8113,9 @@ CG_FUNCS(nop);
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_DG2(dev_priv))
+ if (IS_PONTEVECCHIO(dev_priv))
+ dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs;
+ else if (IS_DG2(dev_priv))
dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
else if (IS_XEHPSDV(dev_priv))
dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 50604cf7398c..945503ae493e 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -35,15 +35,12 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state);
u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
-void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb_y,
- struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry);
-void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
- struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
void skl_wm_sanitize(struct drm_i915_private *dev_priv);
@@ -51,13 +48,6 @@ bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
-const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id,
- int level);
-const struct skl_wm_level *skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id);
-bool skl_wm_level_equals(const struct skl_wm_level *l1,
- const struct skl_wm_level *l2);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx);
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index 74e8e4680028..42b3133d8387 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -135,6 +135,8 @@ static const struct intel_step_info adlp_n_revids[] = {
[0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_D0 },
};
+static void pvc_step_init(struct drm_i915_private *i915, int pci_revid);
+
void intel_step_init(struct drm_i915_private *i915)
{
const struct intel_step_info *revids = NULL;
@@ -142,7 +144,10 @@ void intel_step_init(struct drm_i915_private *i915)
int revid = INTEL_REVID(i915);
struct intel_step_info step = {};
- if (IS_DG2_G10(i915)) {
+ if (IS_PONTEVECCHIO(i915)) {
+ pvc_step_init(i915, revid);
+ return;
+ } else if (IS_DG2_G10(i915)) {
revids = dg2_g10_revid_step_tbl;
size = ARRAY_SIZE(dg2_g10_revid_step_tbl);
} else if (IS_DG2_G11(i915)) {
@@ -235,6 +240,69 @@ void intel_step_init(struct drm_i915_private *i915)
RUNTIME_INFO(i915)->step = step;
}
+#define PVC_BD_REVID GENMASK(5, 3)
+#define PVC_CT_REVID GENMASK(2, 0)
+
+static const int pvc_bd_subids[] = {
+ [0x0] = STEP_A0,
+ [0x3] = STEP_B0,
+ [0x4] = STEP_B1,
+ [0x5] = STEP_B3,
+};
+
+static const int pvc_ct_subids[] = {
+ [0x3] = STEP_A0,
+ [0x5] = STEP_B0,
+ [0x6] = STEP_B1,
+ [0x7] = STEP_C0,
+};
+
+static int
+pvc_step_lookup(struct drm_i915_private *i915, const char *type,
+ const int *table, int size, int subid)
+{
+ if (subid < size && table[subid] != STEP_NONE)
+ return table[subid];
+
+ drm_warn(&i915->drm, "Unknown %s id 0x%02x\n", type, subid);
+
+ /*
+ * As on other platforms, try to use the next higher ID if we land on a
+ * gap in the table.
+ */
+ while (subid < size && table[subid] == STEP_NONE)
+ subid++;
+
+ if (subid < size) {
+ drm_dbg(&i915->drm, "Using steppings for %s id 0x%02x\n",
+ type, subid);
+ return table[subid];
+ }
+
+ drm_dbg(&i915->drm, "Using future steppings\n");
+ return STEP_FUTURE;
+}
+
+/*
+ * PVC needs special handling since we don't lookup the
+ * revid in a table, but rather specific bitfields within
+ * the revid for various components.
+ */
+static void pvc_step_init(struct drm_i915_private *i915, int pci_revid)
+{
+ int ct_subid, bd_subid;
+
+ bd_subid = FIELD_GET(PVC_BD_REVID, pci_revid);
+ ct_subid = FIELD_GET(PVC_CT_REVID, pci_revid);
+
+ RUNTIME_INFO(i915)->step.basedie_step =
+ pvc_step_lookup(i915, "Base Die", pvc_bd_subids,
+ ARRAY_SIZE(pvc_bd_subids), bd_subid);
+ RUNTIME_INFO(i915)->step.graphics_step =
+ pvc_step_lookup(i915, "Compute Tile", pvc_ct_subids,
+ ARRAY_SIZE(pvc_ct_subids), ct_subid);
+}
+
#define STEP_NAME_CASE(name) \
case STEP_##name: \
return #name;
diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h
index d71a99bd5179..a6b12bfa9744 100644
--- a/drivers/gpu/drm/i915/intel_step.h
+++ b/drivers/gpu/drm/i915/intel_step.h
@@ -11,9 +11,10 @@
struct drm_i915_private;
struct intel_step_info {
- u8 graphics_step;
+ u8 graphics_step; /* Represents the compute tile on Xe_HPC */
u8 display_step;
u8 media_step;
+ u8 basedie_step;
};
#define STEP_ENUM_VAL(name) STEP_##name,
@@ -25,6 +26,7 @@ struct intel_step_info {
func(B0) \
func(B1) \
func(B2) \
+ func(B3) \
func(C0) \
func(C1) \
func(D0) \
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 83517a703eb6..a852c471d1b3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -938,36 +938,32 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
return entry->domains;
}
-#define GEN_FW_RANGE(s, e, d) \
- { .start = (s), .end = (e), .domains = (d) }
-
-/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
-static const struct intel_forcewake_range __vlv_fw_ranges[] = {
- GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
- GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
- GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
-};
-
-#define __fwtable_reg_read_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- if (NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(uncore, offset); \
- __fwd; \
-})
+/*
+ * Shadowed register tables describe special register ranges that i915 is
+ * allowed to write to without acquiring forcewake. If these registers' power
+ * wells are down, the hardware will save values written by i915 to a shadow
+ * copy and automatically transfer them into the real register the next time
+ * the power well is woken up. Shadowing only applies to writes; forcewake
+ * must still be acquired when reading from registers in these ranges.
+ *
+ * The documentation for shadowed registers is somewhat spotty on older
+ * platforms. However missing registers from these lists is non-fatal; it just
+ * means we'll wake up the hardware for some register accesses where we didn't
+ * really need to.
+ *
+ * The ranges listed in these tables must be sorted by offset.
+ *
+ * When adding new tables here, please also add them to
+ * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
+ * scanned for obvious mistakes or typos by the selftests.
+ */
-/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const struct i915_range gen8_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0xA008, .end = 0xA00C },
{ .start = 0x12030, .end = 0x12030 },
{ .start = 0x1a030, .end = 0x1a030 },
{ .start = 0x22030, .end = 0x22030 },
- /* TODO: Other registers are not yet used */
};
static const struct i915_range gen11_shadowed_regs[] = {
@@ -1080,6 +1076,45 @@ static const struct i915_range dg2_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
+static const struct i915_range pvc_shadowed_regs[] = {
+ { .start = 0x2030, .end = 0x2030 },
+ { .start = 0x2510, .end = 0x2550 },
+ { .start = 0xA008, .end = 0xA00C },
+ { .start = 0xA188, .end = 0xA188 },
+ { .start = 0xA278, .end = 0xA278 },
+ { .start = 0xA540, .end = 0xA56C },
+ { .start = 0xC4C8, .end = 0xC4C8 },
+ { .start = 0xC4E0, .end = 0xC4E0 },
+ { .start = 0xC600, .end = 0xC600 },
+ { .start = 0xC658, .end = 0xC658 },
+ { .start = 0x22030, .end = 0x22030 },
+ { .start = 0x22510, .end = 0x22550 },
+ { .start = 0x1C0030, .end = 0x1C0030 },
+ { .start = 0x1C0510, .end = 0x1C0550 },
+ { .start = 0x1C4030, .end = 0x1C4030 },
+ { .start = 0x1C4510, .end = 0x1C4550 },
+ { .start = 0x1C8030, .end = 0x1C8030 },
+ { .start = 0x1C8510, .end = 0x1C8550 },
+ { .start = 0x1D0030, .end = 0x1D0030 },
+ { .start = 0x1D0510, .end = 0x1D0550 },
+ { .start = 0x1D4030, .end = 0x1D4030 },
+ { .start = 0x1D4510, .end = 0x1D4550 },
+ { .start = 0x1D8030, .end = 0x1D8030 },
+ { .start = 0x1D8510, .end = 0x1D8550 },
+ { .start = 0x1E0030, .end = 0x1E0030 },
+ { .start = 0x1E0510, .end = 0x1E0550 },
+ { .start = 0x1E4030, .end = 0x1E4030 },
+ { .start = 0x1E4510, .end = 0x1E4550 },
+ { .start = 0x1E8030, .end = 0x1E8030 },
+ { .start = 0x1E8510, .end = 0x1E8550 },
+ { .start = 0x1F0030, .end = 0x1F0030 },
+ { .start = 0x1F0510, .end = 0x1F0550 },
+ { .start = 0x1F4030, .end = 0x1F4030 },
+ { .start = 0x1F4510, .end = 0x1F4550 },
+ { .start = 0x1F8030, .end = 0x1F8030 },
+ { .start = 0x1F8510, .end = 0x1F8550 },
+};
+
static int mmio_range_cmp(u32 key, const struct i915_range *range)
{
if (key < range->start)
@@ -1107,11 +1142,70 @@ gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
return FORCEWAKE_RENDER;
}
+#define __fwtable_reg_read_fw_domains(uncore, offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (NEEDS_FORCE_WAKE((offset))) \
+ __fwd = find_fw_domain(uncore, offset); \
+ __fwd; \
+})
+
+#define __fwtable_reg_write_fw_domains(uncore, offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ const u32 __offset = (offset); \
+ if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
+ __fwd = find_fw_domain(uncore, __offset); \
+ __fwd; \
+})
+
+#define GEN_FW_RANGE(s, e, d) \
+ { .start = (s), .end = (e), .domains = (d) }
+
+/*
+ * All platforms' forcewake tables below must be sorted by offset ranges.
+ * Furthermore, new forcewake tables added should be "watertight" and have
+ * no gaps between ranges.
+ *
+ * When there are multiple consecutive ranges listed in the bspec with
+ * the same forcewake domain, it is customary to combine them into a single
+ * row in the tables below to keep the tables small and lookups fast.
+ * Likewise, reserved/unused ranges may be combined with the preceding and/or
+ * following ranges since the driver will never be making MMIO accesses in
+ * those ranges.
+ *
+ * For example, if the bspec were to list:
+ *
+ * ...
+ * 0x1000 - 0x1fff: GT
+ * 0x2000 - 0x2cff: GT
+ * 0x2d00 - 0x2fff: unused/reserved
+ * 0x3000 - 0xffff: GT
+ * ...
+ *
+ * these could all be represented by a single line in the code:
+ *
+ * GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
+ *
+ * When adding new forcewake tables here, please also add them to
+ * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
+ * scanned for obvious mistakes or typos by the selftests.
+ */
+
static const struct intel_forcewake_range __gen6_fw_ranges[] = {
GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
};
-/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __vlv_fw_ranges[] = {
+ GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
+};
+
static const struct intel_forcewake_range __chv_fw_ranges[] = {
GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
@@ -1131,16 +1225,6 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
};
-#define __fwtable_reg_write_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- const u32 __offset = (offset); \
- if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
- __fwd = find_fw_domain(uncore, __offset); \
- __fwd; \
-})
-
-/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __gen9_fw_ranges[] = {
GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
@@ -1176,7 +1260,6 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
-/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __gen11_fw_ranges[] = {
GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
@@ -1215,14 +1298,6 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
};
-/*
- * *Must* be sorted by offset ranges! See intel_fw_table_check().
- *
- * Note that the spec lists several reserved/unused ranges that don't
- * actually contain any registers. In the table below we'll combine those
- * reserved ranges with either the preceding or following range to keep the
- * table small and lookups fast.
- */
static const struct intel_forcewake_range __gen12_fw_ranges[] = {
GEN_FW_RANGE(0x0, 0x1fff, 0), /*
0x0 - 0xaff: reserved
@@ -1327,8 +1402,6 @@ static const struct intel_forcewake_range __gen12_fw_ranges[] = {
/*
* Graphics IP version 12.55 brings a slight change to the 0xd800 range,
* switching it from the GT domain to the render domain.
- *
- * *Must* be sorted by offset ranges! See intel_fw_table_check().
*/
#define XEHP_FWRANGES(FW_RANGE_D800) \
GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
@@ -1490,6 +1563,103 @@ static const struct intel_forcewake_range __dg2_fw_ranges[] = {
XEHP_FWRANGES(FORCEWAKE_RENDER)
};
+static const struct intel_forcewake_range __pvc_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, 0),
+ GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0xc00, 0xfff, 0),
+ GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /*
+ 0x4000 - 0x4aff: gt
+ 0x4b00 - 0x4fff: reserved
+ 0x5000 - 0x51ff: gt
+ 0x5200 - 0x52ff: reserved
+ 0x5300 - 0x53ff: gt
+ 0x5400 - 0x7fff: reserved
+ 0x8000 - 0x813f: gt */
+ GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8180, 0x81ff, 0),
+ GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
+ 0x8200 - 0x82ff: gt
+ 0x8300 - 0x84ff: reserved
+ 0x8500 - 0x887f: gt
+ 0x8880 - 0x8a7f: reserved
+ 0x8a80 - 0x8aff: gt
+ 0x8b00 - 0x8fff: reserved
+ 0x9000 - 0x947f: gt
+ 0x9480 - 0x94cf: reserved */
+ GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x9560, 0x967f, 0), /*
+ 0x9560 - 0x95ff: always on
+ 0x9600 - 0x967f: reserved */
+ GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
+ 0x9680 - 0x96ff: render
+ 0x9700 - 0x97ff: reserved */
+ GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
+ 0x9800 - 0xb4ff: gt
+ 0xb500 - 0xbfff: reserved
+ 0xc000 - 0xcfff: gt */
+ GEN_FW_RANGE(0xd000, 0xd3ff, 0),
+ GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
+ 0xdd00 - 0xddff: gt
+ 0xde00 - 0xde7f: reserved */
+ GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
+ 0xde80 - 0xdeff: render
+ 0xdf00 - 0xe1ff: reserved
+ 0xe200 - 0xe7ff: render
+ 0xe800 - 0xe8ff: reserved */
+ GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /*
+ 0xe900 - 0xe9ff: gt
+ 0xea00 - 0xebff: reserved
+ 0xec00 - 0xffff: gt
+ 0x10000 - 0x11fff: reserved */
+ GEN_FW_RANGE(0x12000, 0x12fff, 0), /*
+ 0x12000 - 0x127ff: always on
+ 0x12800 - 0x12fff: reserved */
+ GEN_FW_RANGE(0x13000, 0x23fff, FORCEWAKE_GT), /*
+ 0x13000 - 0x135ff: gt
+ 0x13600 - 0x147ff: reserved
+ 0x14800 - 0x153ff: gt
+ 0x15400 - 0x19fff: reserved
+ 0x1a000 - 0x1ffff: gt
+ 0x20000 - 0x21fff: reserved
+ 0x22000 - 0x23fff: gt */
+ GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
+ 24000 - 0x2407f: always on
+ 24080 - 0x2417f: reserved */
+ GEN_FW_RANGE(0x24180, 0x3ffff, FORCEWAKE_GT), /*
+ 0x24180 - 0x241ff: gt
+ 0x24200 - 0x251ff: reserved
+ 0x25200 - 0x252ff: gt
+ 0x25300 - 0x25fff: reserved
+ 0x26000 - 0x27fff: gt
+ 0x28000 - 0x2ffff: reserved
+ 0x30000 - 0x3ffff: gt */
+ GEN_FW_RANGE(0x40000, 0x1bffff, 0),
+ GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
+ 0x1c0000 - 0x1c2bff: VD0
+ 0x1c2c00 - 0x1c2cff: reserved
+ 0x1c2d00 - 0x1c2dff: VD0
+ 0x1c2e00 - 0x1c3eff: reserved
+ 0x1c3f00 - 0x1c3fff: VD0 */
+ GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /*
+ 0x1c4000 - 0x1c6aff: VD1
+ 0x1c6b00 - 0x1c7eff: reserved
+ 0x1c7f00 - 0x1c7fff: VD1
+ 0x1c8000 - 0x1cffff: reserved */
+ GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /*
+ 0x1d0000 - 0x1d2aff: VD2
+ 0x1d2b00 - 0x1d3eff: reserved
+ 0x1d3f00 - 0x1d3fff: VD2
+ 0x1d4000 - 0x23ffff: reserved */
+ GEN_FW_RANGE(0x240000, 0x3dffff, 0),
+ GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT),
+};
+
static void
ilk_dummy_write(struct intel_uncore *uncore)
{
@@ -2125,7 +2295,11 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges);
+ ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
@@ -2470,118 +2644,6 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
return fw_domains;
}
-/**
- * uncore_rw_with_mcr_steering_fw - Access a register after programming
- * the MCR selector register.
- * @uncore: pointer to struct intel_uncore
- * @reg: register being accessed
- * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
- * @slice: slice number (ignored for multi-cast write)
- * @subslice: sub-slice number (ignored for multi-cast write)
- * @value: register value to be written (ignored for read)
- *
- * Return: 0 for write access. register value for read access.
- *
- * Caller needs to make sure the relevant forcewake wells are up.
- */
-static u32 uncore_rw_with_mcr_steering_fw(struct intel_uncore *uncore,
- i915_reg_t reg, u8 rw_flag,
- int slice, int subslice, u32 value)
-{
- u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
-
- lockdep_assert_held(&uncore->lock);
-
- if (GRAPHICS_VER(uncore->i915) >= 11) {
- mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
- mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
-
- /*
- * Wa_22013088509
- *
- * The setting of the multicast/unicast bit usually wouldn't
- * matter for read operations (which always return the value
- * from a single register instance regardless of how that bit
- * is set), but some platforms have a workaround requiring us
- * to remain in multicast mode for reads. There's no real
- * downside to this, so we'll just go ahead and do so on all
- * platforms; we'll only clear the multicast bit from the mask
- * when exlicitly doing a write operation.
- */
- if (rw_flag == FW_REG_WRITE)
- mcr_mask |= GEN11_MCR_MULTICAST;
- } else {
- mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
- mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
- }
-
- old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
-
- mcr &= ~mcr_mask;
- mcr |= mcr_ss;
- intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
-
- if (rw_flag == FW_REG_READ)
- val = intel_uncore_read_fw(uncore, reg);
- else
- intel_uncore_write_fw(uncore, reg, value);
-
- mcr &= ~mcr_mask;
- mcr |= old_mcr & mcr_mask;
-
- intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
-
- return val;
-}
-
-static u32 uncore_rw_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, u8 rw_flag,
- int slice, int subslice,
- u32 value)
-{
- enum forcewake_domains fw_domains;
- u32 val;
-
- fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
- rw_flag);
- fw_domains |= intel_uncore_forcewake_for_reg(uncore,
- GEN8_MCR_SELECTOR,
- FW_REG_READ | FW_REG_WRITE);
-
- spin_lock_irq(&uncore->lock);
- intel_uncore_forcewake_get__locked(uncore, fw_domains);
-
- val = uncore_rw_with_mcr_steering_fw(uncore, reg, rw_flag,
- slice, subslice, value);
-
- intel_uncore_forcewake_put__locked(uncore, fw_domains);
- spin_unlock_irq(&uncore->lock);
-
- return val;
-}
-
-u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
- i915_reg_t reg, int slice, int subslice)
-{
- return uncore_rw_with_mcr_steering_fw(uncore, reg, FW_REG_READ,
- slice, subslice, 0);
-}
-
-u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, int slice, int subslice)
-{
- return uncore_rw_with_mcr_steering(uncore, reg, FW_REG_READ,
- slice, subslice, 0);
-}
-
-void intel_uncore_write_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, u32 value,
- int slice, int subslice)
-{
- uncore_rw_with_mcr_steering(uncore, reg, FW_REG_WRITE,
- slice, subslice, value);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_uncore.c"
#include "selftests/intel_uncore.c"
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 52fe3d89dd2b..b1fa912a65e7 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -210,14 +210,6 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
-u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
- i915_reg_t reg,
- int slice, int subslice);
-u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, int slice, int subslice);
-void intel_uncore_write_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, u32 value,
- int slice, int subslice);
void
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
void intel_uncore_init_early(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index c9da1015eb42..e888b5124a07 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -9,9 +9,10 @@
#include <drm/drm_print.h>
#include "gt/intel_gt_debugfs.h"
-#include "pxp/intel_pxp.h"
-#include "pxp/intel_pxp_irq.h"
#include "i915_drv.h"
+#include "intel_pxp.h"
+#include "intel_pxp_debugfs.h"
+#include "intel_pxp_irq.h"
static int pxp_info_show(struct seq_file *m, void *data)
{
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index cdd196783535..fda9bb79c049 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -69,6 +69,7 @@ static int intel_shadow_table_check(void)
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
{ dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
+ { pvc_shadowed_regs, ARRAY_SIZE(pvc_shadowed_regs) },
};
const struct i915_range *range;
unsigned int i, j;
@@ -115,6 +116,7 @@ int intel_uncore_mock_selftests(void)
{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
{ __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
{ __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
+ { __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true },
};
int err, i;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index 24147ee7080e..1c70f70247f6 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
index ac45d54acd4e..c29f343f33e5 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -5,7 +5,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 14a058a42854..41799011f73b 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/media-bus-format.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
@@ -21,6 +22,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 2b1fdf2cbbce..6b34fac3f73a 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -18,6 +18,7 @@
#include <video/imx-ipu-v3.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9c8829f945b2..f7863d6dea80 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
if (plane == &ipu_crtc->plane[0]->base)
disable_full = true;
- if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+ if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
disable_partial = true;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 36b32e8806e3..ea5f594955df 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -7,8 +7,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 63ba2ad84679..06723b2e9b84 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -6,6 +6,7 @@
*/
#include <linux/component.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
@@ -14,6 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 8eb0ad501a7b..eb8208bfe5ab 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
@@ -33,6 +34,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
@@ -69,6 +71,7 @@ struct jz_soc_info {
bool map_noncoherent;
bool use_extended_hwdesc;
bool plane_f0_not_working;
+ u32 max_burst;
unsigned int max_width, max_height;
const u32 *formats_f0, *formats_f1;
unsigned int num_formats_f0, num_formats_f1;
@@ -318,8 +321,9 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
regmap_write(priv->map, JZ_REG_LCD_REV, mode->htotal << 16);
}
- regmap_set_bits(priv->map, JZ_REG_LCD_CTRL,
- JZ_LCD_CTRL_OFUP | JZ_LCD_CTRL_BURST_16);
+ regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
+ JZ_LCD_CTRL_OFUP | JZ_LCD_CTRL_BURST_MASK,
+ JZ_LCD_CTRL_OFUP | priv->soc_info->max_burst);
/*
* IPU restart - specify how much time the LCDC will wait before
@@ -1518,6 +1522,7 @@ static const struct jz_soc_info jz4740_soc_info = {
.map_noncoherent = false,
.max_width = 800,
.max_height = 600,
+ .max_burst = JZ_LCD_CTRL_BURST_16,
.formats_f1 = jz4740_formats,
.num_formats_f1 = ARRAY_SIZE(jz4740_formats),
/* JZ4740 has only one plane */
@@ -1529,6 +1534,7 @@ static const struct jz_soc_info jz4725b_soc_info = {
.map_noncoherent = false,
.max_width = 800,
.max_height = 600,
+ .max_burst = JZ_LCD_CTRL_BURST_16,
.formats_f1 = jz4725b_formats_f1,
.num_formats_f1 = ARRAY_SIZE(jz4725b_formats_f1),
.formats_f0 = jz4725b_formats_f0,
@@ -1541,6 +1547,7 @@ static const struct jz_soc_info jz4770_soc_info = {
.map_noncoherent = true,
.max_width = 1280,
.max_height = 720,
+ .max_burst = JZ_LCD_CTRL_BURST_64,
.formats_f1 = jz4770_formats_f1,
.num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
.formats_f0 = jz4770_formats_f0,
@@ -1555,6 +1562,7 @@ static const struct jz_soc_info jz4780_soc_info = {
.plane_f0_not_working = true, /* REVISIT */
.max_width = 4096,
.max_height = 2048,
+ .max_burst = JZ_LCD_CTRL_BURST_64,
.formats_f1 = jz4770_formats_f1,
.num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
.formats_f0 = jz4770_formats_f0,
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h
index cb1d09b62588..e5bd007ea93d 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.h
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.h
@@ -106,6 +106,9 @@
#define JZ_LCD_CTRL_BURST_4 (0x0 << 28)
#define JZ_LCD_CTRL_BURST_8 (0x1 << 28)
#define JZ_LCD_CTRL_BURST_16 (0x2 << 28)
+#define JZ_LCD_CTRL_BURST_32 (0x3 << 28)
+#define JZ_LCD_CTRL_BURST_64 (0x4 << 28)
+#define JZ_LCD_CTRL_BURST_MASK (0x7 << 28)
#define JZ_LCD_CTRL_RGB555 BIT(27)
#define JZ_LCD_CTRL_OFUP BIT(26)
#define JZ_LCD_CTRL_FRC_GRAYSCALE_16 (0x0 << 24)
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index 2737fc521e15..32a50935aa6d 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -24,6 +24,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c
index f6071882054c..cf7cf0b07541 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.c
+++ b/drivers/gpu/drm/kmb/kmb_dsi.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 2735b8eb3537..89d055a089a6 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -5,11 +5,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig
new file mode 100644
index 000000000000..300b2be07385
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/Kconfig
@@ -0,0 +1,9 @@
+config DRM_LOGICVC
+ tristate "LogiCVC DRM"
+ depends on DRM
+ depends on OF || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ DRM display driver for the logiCVC programmable logic block from Xylon
diff --git a/drivers/gpu/drm/logicvc/Makefile b/drivers/gpu/drm/logicvc/Makefile
new file mode 100644
index 000000000000..6e4b01979d38
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/Makefile
@@ -0,0 +1,9 @@
+logicvc-drm-y += \
+ logicvc_crtc.o \
+ logicvc_drm.o \
+ logicvc_interface.o \
+ logicvc_layer.o \
+ logicvc_mode.o \
+ logicvc_of.o
+
+obj-$(CONFIG_DRM_LOGICVC) += logicvc-drm.o
diff --git a/drivers/gpu/drm/logicvc/logicvc_crtc.c b/drivers/gpu/drm/logicvc/logicvc_crtc.c
new file mode 100644
index 000000000000..c94bb9bb456b
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_crtc.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include "logicvc_crtc.h"
+#include "logicvc_drm.h"
+#include "logicvc_interface.h"
+#include "logicvc_layer.h"
+#include "logicvc_regs.h"
+
+#define logicvc_crtc(c) \
+ container_of(c, struct logicvc_crtc, drm_crtc)
+
+static enum drm_mode_status
+logicvc_crtc_mode_valid(struct drm_crtc *drm_crtc,
+ const struct drm_display_mode *mode)
+{
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void logicvc_crtc_atomic_begin(struct drm_crtc *drm_crtc,
+ struct drm_atomic_state *state)
+{
+ struct logicvc_crtc *crtc = logicvc_crtc(drm_crtc);
+ struct drm_crtc_state *old_state =
+ drm_atomic_get_old_crtc_state(state, drm_crtc);
+ struct drm_device *drm_dev = drm_crtc->dev;
+ unsigned long flags;
+
+ /*
+ * We need to grab the pending event here if vblank was already enabled
+ * since we won't get a call to atomic_enable to grab it.
+ */
+ if (drm_crtc->state->event && old_state->active) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ WARN_ON(drm_crtc_vblank_get(drm_crtc) != 0);
+
+ crtc->event = drm_crtc->state->event;
+ drm_crtc->state->event = NULL;
+
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ }
+}
+
+static void logicvc_crtc_atomic_enable(struct drm_crtc *drm_crtc,
+ struct drm_atomic_state *state)
+{
+ struct logicvc_crtc *crtc = logicvc_crtc(drm_crtc);
+ struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
+ struct drm_crtc_state *old_state =
+ drm_atomic_get_old_crtc_state(state, drm_crtc);
+ struct drm_crtc_state *new_state =
+ drm_atomic_get_new_crtc_state(state, drm_crtc);
+ struct drm_display_mode *mode = &new_state->adjusted_mode;
+
+ struct drm_device *drm_dev = drm_crtc->dev;
+ unsigned int hact, hfp, hsl, hbp;
+ unsigned int vact, vfp, vsl, vbp;
+ unsigned long flags;
+ u32 ctrl;
+
+ /* Timings */
+
+ hact = mode->hdisplay;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsl = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vact = mode->vdisplay;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsl = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ regmap_write(logicvc->regmap, LOGICVC_HSYNC_FRONT_PORCH_REG, hfp - 1);
+ regmap_write(logicvc->regmap, LOGICVC_HSYNC_REG, hsl - 1);
+ regmap_write(logicvc->regmap, LOGICVC_HSYNC_BACK_PORCH_REG, hbp - 1);
+ regmap_write(logicvc->regmap, LOGICVC_HRES_REG, hact - 1);
+
+ regmap_write(logicvc->regmap, LOGICVC_VSYNC_FRONT_PORCH_REG, vfp - 1);
+ regmap_write(logicvc->regmap, LOGICVC_VSYNC_REG, vsl - 1);
+ regmap_write(logicvc->regmap, LOGICVC_VSYNC_BACK_PORCH_REG, vbp - 1);
+ regmap_write(logicvc->regmap, LOGICVC_VRES_REG, vact - 1);
+
+ /* Signals */
+
+ ctrl = LOGICVC_CTRL_HSYNC_ENABLE | LOGICVC_CTRL_VSYNC_ENABLE |
+ LOGICVC_CTRL_DE_ENABLE;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl |= LOGICVC_CTRL_HSYNC_INVERT;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl |= LOGICVC_CTRL_VSYNC_INVERT;
+
+ if (logicvc->interface) {
+ struct drm_connector *connector =
+ &logicvc->interface->drm_connector;
+ struct drm_display_info *display_info =
+ &connector->display_info;
+
+ if (display_info->bus_flags & DRM_BUS_FLAG_DE_LOW)
+ ctrl |= LOGICVC_CTRL_DE_INVERT;
+
+ if (display_info->bus_flags &
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ ctrl |= LOGICVC_CTRL_CLOCK_INVERT;
+ }
+
+ regmap_update_bits(logicvc->regmap, LOGICVC_CTRL_REG,
+ LOGICVC_CTRL_HSYNC_ENABLE |
+ LOGICVC_CTRL_HSYNC_INVERT |
+ LOGICVC_CTRL_VSYNC_ENABLE |
+ LOGICVC_CTRL_VSYNC_INVERT |
+ LOGICVC_CTRL_DE_ENABLE |
+ LOGICVC_CTRL_DE_INVERT |
+ LOGICVC_CTRL_PIXEL_INVERT |
+ LOGICVC_CTRL_CLOCK_INVERT, ctrl);
+
+ /* Generate internal state reset. */
+ regmap_write(logicvc->regmap, LOGICVC_DTYPE_REG, 0);
+
+ drm_crtc_vblank_on(drm_crtc);
+
+ /* Register our event after vblank is enabled. */
+ if (drm_crtc->state->event && !old_state->active) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ WARN_ON(drm_crtc_vblank_get(drm_crtc) != 0);
+
+ crtc->event = drm_crtc->state->event;
+ drm_crtc->state->event = NULL;
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ }
+}
+
+static void logicvc_crtc_atomic_disable(struct drm_crtc *drm_crtc,
+ struct drm_atomic_state *state)
+{
+ struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
+ struct drm_device *drm_dev = drm_crtc->dev;
+
+ drm_crtc_vblank_off(drm_crtc);
+
+ /* Disable and clear CRTC bits. */
+ regmap_update_bits(logicvc->regmap, LOGICVC_CTRL_REG,
+ LOGICVC_CTRL_HSYNC_ENABLE |
+ LOGICVC_CTRL_HSYNC_INVERT |
+ LOGICVC_CTRL_VSYNC_ENABLE |
+ LOGICVC_CTRL_VSYNC_INVERT |
+ LOGICVC_CTRL_DE_ENABLE |
+ LOGICVC_CTRL_DE_INVERT |
+ LOGICVC_CTRL_PIXEL_INVERT |
+ LOGICVC_CTRL_CLOCK_INVERT, 0);
+
+ /* Generate internal state reset. */
+ regmap_write(logicvc->regmap, LOGICVC_DTYPE_REG, 0);
+
+ /* Consume any leftover event since vblank is now disabled. */
+ if (drm_crtc->state->event && !drm_crtc->state->active) {
+ spin_lock_irq(&drm_dev->event_lock);
+
+ drm_crtc_send_vblank_event(drm_crtc, drm_crtc->state->event);
+ drm_crtc->state->event = NULL;
+ spin_unlock_irq(&drm_dev->event_lock);
+ }
+}
+
+static const struct drm_crtc_helper_funcs logicvc_crtc_helper_funcs = {
+ .mode_valid = logicvc_crtc_mode_valid,
+ .atomic_begin = logicvc_crtc_atomic_begin,
+ .atomic_enable = logicvc_crtc_atomic_enable,
+ .atomic_disable = logicvc_crtc_atomic_disable,
+};
+
+static int logicvc_crtc_enable_vblank(struct drm_crtc *drm_crtc)
+{
+ struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
+
+ /* Clear any pending V_SYNC interrupt. */
+ regmap_write_bits(logicvc->regmap, LOGICVC_INT_STAT_REG,
+ LOGICVC_INT_STAT_V_SYNC, LOGICVC_INT_STAT_V_SYNC);
+
+ /* Unmask V_SYNC interrupt. */
+ regmap_write_bits(logicvc->regmap, LOGICVC_INT_MASK_REG,
+ LOGICVC_INT_MASK_V_SYNC, 0);
+
+ return 0;
+}
+
+static void logicvc_crtc_disable_vblank(struct drm_crtc *drm_crtc)
+{
+ struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
+
+ /* Mask V_SYNC interrupt. */
+ regmap_write_bits(logicvc->regmap, LOGICVC_INT_MASK_REG,
+ LOGICVC_INT_MASK_V_SYNC, LOGICVC_INT_MASK_V_SYNC);
+}
+
+static const struct drm_crtc_funcs logicvc_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = logicvc_crtc_enable_vblank,
+ .disable_vblank = logicvc_crtc_disable_vblank,
+};
+
+void logicvc_crtc_vblank_handler(struct logicvc_drm *logicvc)
+{
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ struct logicvc_crtc *crtc = logicvc->crtc;
+ unsigned long flags;
+
+ if (!crtc)
+ return;
+
+ drm_crtc_handle_vblank(&crtc->drm_crtc);
+
+ if (crtc->event) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ drm_crtc_send_vblank_event(&crtc->drm_crtc, crtc->event);
+ drm_crtc_vblank_put(&crtc->drm_crtc);
+ crtc->event = NULL;
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ }
+}
+
+int logicvc_crtc_init(struct logicvc_drm *logicvc)
+{
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ struct device *dev = drm_dev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct logicvc_crtc *crtc;
+ struct logicvc_layer *layer_primary;
+ int ret;
+
+ crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return -ENOMEM;
+
+ layer_primary = logicvc_layer_get_primary(logicvc);
+ if (!layer_primary) {
+ drm_err(drm_dev, "Failed to get primary layer\n");
+ return -EINVAL;
+ }
+
+ ret = drm_crtc_init_with_planes(drm_dev, &crtc->drm_crtc,
+ &layer_primary->drm_plane, NULL,
+ &logicvc_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(drm_dev, "Failed to initialize CRTC\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(&crtc->drm_crtc, &logicvc_crtc_helper_funcs);
+
+ crtc->drm_crtc.port = of_graph_get_port_by_id(of_node, 1);
+
+ logicvc->crtc = crtc;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/logicvc/logicvc_crtc.h b/drivers/gpu/drm/logicvc/logicvc_crtc.h
new file mode 100644
index 000000000000..b122901f2936
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_crtc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _LOGICVC_CRTC_H_
+#define _LOGICVC_CRTC_H_
+
+struct drm_pending_vblank_event;
+struct logicvc_drm;
+
+struct logicvc_crtc {
+ struct drm_crtc drm_crtc;
+ struct drm_pending_vblank_event *event;
+};
+
+void logicvc_crtc_vblank_handler(struct logicvc_drm *logicvc);
+int logicvc_crtc_init(struct logicvc_drm *logicvc);
+
+#endif
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
new file mode 100644
index 000000000000..65a050176c33
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_print.h>
+
+#include "logicvc_crtc.h"
+#include "logicvc_drm.h"
+#include "logicvc_interface.h"
+#include "logicvc_mode.h"
+#include "logicvc_layer.h"
+#include "logicvc_of.h"
+#include "logicvc_regs.h"
+
+DEFINE_DRM_GEM_CMA_FOPS(logicvc_drm_fops);
+
+static int logicvc_drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct logicvc_drm *logicvc = logicvc_drm(drm_dev);
+
+ /* Stride is always fixed to its configuration value. */
+ args->pitch = logicvc->config.row_stride * DIV_ROUND_UP(args->bpp, 8);
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm_dev, args);
+}
+
+static struct drm_driver logicvc_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_ATOMIC,
+
+ .fops = &logicvc_drm_fops,
+ .name = "logicvc-drm",
+ .desc = "Xylon LogiCVC DRM driver",
+ .date = "20200403",
+ .major = 1,
+ .minor = 0,
+
+ DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_cma_dumb_create),
+};
+
+static struct regmap_config logicvc_drm_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "logicvc-drm",
+};
+
+static irqreturn_t logicvc_drm_irq_handler(int irq, void *data)
+{
+ struct logicvc_drm *logicvc = data;
+ irqreturn_t ret = IRQ_NONE;
+ u32 stat = 0;
+
+ /* Get pending interrupt sources. */
+ regmap_read(logicvc->regmap, LOGICVC_INT_STAT_REG, &stat);
+
+ /* Clear all pending interrupt sources. */
+ regmap_write(logicvc->regmap, LOGICVC_INT_STAT_REG, stat);
+
+ if (stat & LOGICVC_INT_STAT_V_SYNC) {
+ logicvc_crtc_vblank_handler(logicvc);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
+{
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ struct device *dev = drm_dev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct logicvc_drm_config *config = &logicvc->config;
+ struct device_node *layers_node;
+ int ret;
+
+ logicvc_of_property_parse_bool(of_node, LOGICVC_OF_PROPERTY_DITHERING,
+ &config->dithering);
+ logicvc_of_property_parse_bool(of_node,
+ LOGICVC_OF_PROPERTY_BACKGROUND_LAYER,
+ &config->background_layer);
+ logicvc_of_property_parse_bool(of_node,
+ LOGICVC_OF_PROPERTY_LAYERS_CONFIGURABLE,
+ &config->layers_configurable);
+
+ ret = logicvc_of_property_parse_u32(of_node,
+ LOGICVC_OF_PROPERTY_DISPLAY_INTERFACE,
+ &config->display_interface);
+ if (ret)
+ return ret;
+
+ ret = logicvc_of_property_parse_u32(of_node,
+ LOGICVC_OF_PROPERTY_DISPLAY_COLORSPACE,
+ &config->display_colorspace);
+ if (ret)
+ return ret;
+
+ ret = logicvc_of_property_parse_u32(of_node,
+ LOGICVC_OF_PROPERTY_DISPLAY_DEPTH,
+ &config->display_depth);
+ if (ret)
+ return ret;
+
+ ret = logicvc_of_property_parse_u32(of_node,
+ LOGICVC_OF_PROPERTY_ROW_STRIDE,
+ &config->row_stride);
+ if (ret)
+ return ret;
+
+ layers_node = of_get_child_by_name(of_node, "layers");
+ if (!layers_node) {
+ drm_err(drm_dev, "Missing non-optional layers node\n");
+ return -EINVAL;
+ }
+
+ config->layers_count = of_get_child_count(layers_node);
+ if (!config->layers_count) {
+ drm_err(drm_dev,
+ "Missing a non-optional layers children node\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int logicvc_clocks_prepare(struct logicvc_drm *logicvc)
+{
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ struct device *dev = drm_dev->dev;
+
+ struct {
+ struct clk **clk;
+ char *name;
+ bool optional;
+ } clocks_map[] = {
+ {
+ .clk = &logicvc->vclk,
+ .name = "vclk",
+ .optional = false,
+ },
+ {
+ .clk = &logicvc->vclk2,
+ .name = "vclk2",
+ .optional = true,
+ },
+ {
+ .clk = &logicvc->lvdsclk,
+ .name = "lvdsclk",
+ .optional = true,
+ },
+ {
+ .clk = &logicvc->lvdsclkn,
+ .name = "lvdsclkn",
+ .optional = true,
+ },
+ };
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(clocks_map); i++) {
+ struct clk *clk;
+
+ clk = devm_clk_get(dev, clocks_map[i].name);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) == -ENOENT && clocks_map[i].optional)
+ continue;
+
+ drm_err(drm_dev, "Missing non-optional clock %s\n",
+ clocks_map[i].name);
+
+ ret = PTR_ERR(clk);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ drm_err(drm_dev,
+ "Failed to prepare and enable clock %s\n",
+ clocks_map[i].name);
+ goto error;
+ }
+
+ *clocks_map[i].clk = clk;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < ARRAY_SIZE(clocks_map); i++) {
+ if (!*clocks_map[i].clk)
+ continue;
+
+ clk_disable_unprepare(*clocks_map[i].clk);
+ *clocks_map[i].clk = NULL;
+ }
+
+ return ret;
+}
+
+static int logicvc_clocks_unprepare(struct logicvc_drm *logicvc)
+{
+ struct clk **clocks[] = {
+ &logicvc->vclk,
+ &logicvc->vclk2,
+ &logicvc->lvdsclk,
+ &logicvc->lvdsclkn,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(clocks); i++) {
+ if (!*clocks[i])
+ continue;
+
+ clk_disable_unprepare(*clocks[i]);
+ *clocks[i] = NULL;
+ }
+
+ return 0;
+}
+
+static const struct logicvc_drm_caps logicvc_drm_caps[] = {
+ {
+ .major = 3,
+ .layer_address = false,
+ },
+ {
+ .major = 4,
+ .layer_address = true,
+ },
+ {
+ .major = 5,
+ .layer_address = true,
+ },
+};
+
+static const struct logicvc_drm_caps *
+logicvc_drm_caps_match(struct logicvc_drm *logicvc)
+{
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ const struct logicvc_drm_caps *caps = NULL;
+ unsigned int major, minor;
+ char level;
+ unsigned int i;
+ u32 version;
+
+ regmap_read(logicvc->regmap, LOGICVC_IP_VERSION_REG, &version);
+
+ major = FIELD_GET(LOGICVC_IP_VERSION_MAJOR_MASK, version);
+ minor = FIELD_GET(LOGICVC_IP_VERSION_MINOR_MASK, version);
+ level = FIELD_GET(LOGICVC_IP_VERSION_LEVEL_MASK, version) + 'a';
+
+ for (i = 0; i < ARRAY_SIZE(logicvc_drm_caps); i++) {
+ if (logicvc_drm_caps[i].major &&
+ logicvc_drm_caps[i].major != major)
+ continue;
+
+ if (logicvc_drm_caps[i].minor &&
+ logicvc_drm_caps[i].minor != minor)
+ continue;
+
+ if (logicvc_drm_caps[i].level &&
+ logicvc_drm_caps[i].level != level)
+ continue;
+
+ caps = &logicvc_drm_caps[i];
+ }
+
+ drm_info(drm_dev, "LogiCVC version %d.%02d.%c\n", major, minor, level);
+
+ return caps;
+}
+
+static int logicvc_drm_probe(struct platform_device *pdev)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ struct device_node *reserved_mem_node;
+ struct reserved_mem *reserved_mem = NULL;
+ const struct logicvc_drm_caps *caps;
+ struct logicvc_drm *logicvc;
+ struct device *dev = &pdev->dev;
+ struct drm_device *drm_dev;
+ struct regmap *regmap = NULL;
+ struct resource res;
+ void __iomem *base;
+ int irq;
+ int ret;
+
+ ret = of_reserved_mem_device_init(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err(dev, "Failed to init memory region\n");
+ goto error_early;
+ }
+
+ reserved_mem_node = of_parse_phandle(of_node, "memory-region", 0);
+ if (reserved_mem_node) {
+ reserved_mem = of_reserved_mem_lookup(reserved_mem_node);
+ of_node_put(reserved_mem_node);
+ }
+
+ /* Get regmap from parent if available. */
+ if (of_node->parent)
+ regmap = syscon_node_to_regmap(of_node->parent);
+
+ /* Register our own regmap otherwise. */
+ if (IS_ERR_OR_NULL(regmap)) {
+ ret = of_address_to_resource(of_node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Failed to get resource from address\n");
+ goto error_reserved_mem;
+ }
+
+ base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to map I/O base\n");
+ ret = PTR_ERR(base);
+ goto error_reserved_mem;
+ }
+
+ logicvc_drm_regmap_config.max_register = resource_size(&res) -
+ 4;
+
+ regmap = devm_regmap_init_mmio(dev, base,
+ &logicvc_drm_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to create regmap for I/O\n");
+ ret = PTR_ERR(regmap);
+ goto error_reserved_mem;
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENODEV;
+ goto error_reserved_mem;
+ }
+
+ logicvc = devm_drm_dev_alloc(dev, &logicvc_drm_driver,
+ struct logicvc_drm, drm_dev);
+ if (IS_ERR(logicvc)) {
+ ret = PTR_ERR(logicvc);
+ goto error_reserved_mem;
+ }
+
+ platform_set_drvdata(pdev, logicvc);
+ drm_dev = &logicvc->drm_dev;
+
+ logicvc->regmap = regmap;
+ INIT_LIST_HEAD(&logicvc->layers_list);
+
+ caps = logicvc_drm_caps_match(logicvc);
+ if (!caps) {
+ ret = -EINVAL;
+ goto error_reserved_mem;
+ }
+
+ logicvc->caps = caps;
+
+ if (reserved_mem)
+ logicvc->reserved_mem_base = reserved_mem->base;
+
+ ret = logicvc_clocks_prepare(logicvc);
+ if (ret) {
+ drm_err(drm_dev, "Failed to prepare clocks\n");
+ goto error_reserved_mem;
+ }
+
+ ret = devm_request_irq(dev, irq, logicvc_drm_irq_handler, 0,
+ dev_name(dev), logicvc);
+ if (ret) {
+ drm_err(drm_dev, "Failed to request IRQ\n");
+ goto error_clocks;
+ }
+
+ ret = logicvc_drm_config_parse(logicvc);
+ if (ret && ret != -ENODEV) {
+ drm_err(drm_dev, "Failed to parse config\n");
+ goto error_clocks;
+ }
+
+ ret = drmm_mode_config_init(drm_dev);
+ if (ret) {
+ drm_err(drm_dev, "Failed to init mode config\n");
+ goto error_clocks;
+ }
+
+ ret = logicvc_layers_init(logicvc);
+ if (ret) {
+ drm_err(drm_dev, "Failed to initialize layers\n");
+ goto error_clocks;
+ }
+
+ ret = logicvc_crtc_init(logicvc);
+ if (ret) {
+ drm_err(drm_dev, "Failed to initialize CRTC\n");
+ goto error_clocks;
+ }
+
+ logicvc_layers_attach_crtc(logicvc);
+
+ ret = logicvc_interface_init(logicvc);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ drm_err(drm_dev, "Failed to initialize interface\n");
+
+ goto error_clocks;
+ }
+
+ logicvc_interface_attach_crtc(logicvc);
+
+ ret = logicvc_mode_init(logicvc);
+ if (ret) {
+ drm_err(drm_dev, "Failed to initialize KMS\n");
+ goto error_clocks;
+ }
+
+ ret = drm_dev_register(drm_dev, 0);
+ if (ret) {
+ drm_err(drm_dev, "Failed to register DRM device\n");
+ goto error_mode;
+ }
+
+ drm_fbdev_generic_setup(drm_dev, drm_dev->mode_config.preferred_depth);
+
+ return 0;
+
+error_mode:
+ logicvc_mode_fini(logicvc);
+
+error_clocks:
+ logicvc_clocks_unprepare(logicvc);
+
+error_reserved_mem:
+ of_reserved_mem_device_release(dev);
+
+error_early:
+ return ret;
+}
+
+static int logicvc_drm_remove(struct platform_device *pdev)
+{
+ struct logicvc_drm *logicvc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+
+ drm_dev_unregister(drm_dev);
+ drm_atomic_helper_shutdown(drm_dev);
+
+ logicvc_mode_fini(logicvc);
+
+ logicvc_clocks_unprepare(logicvc);
+
+ of_reserved_mem_device_release(dev);
+
+ return 0;
+}
+
+static const struct of_device_id logicvc_drm_of_table[] = {
+ { .compatible = "xylon,logicvc-3.02.a-display" },
+ { .compatible = "xylon,logicvc-4.01.a-display" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, logicvc_drm_of_table);
+
+static struct platform_driver logicvc_drm_platform_driver = {
+ .probe = logicvc_drm_probe,
+ .remove = logicvc_drm_remove,
+ .driver = {
+ .name = "logicvc-drm",
+ .of_match_table = logicvc_drm_of_table,
+ },
+};
+
+module_platform_driver(logicvc_drm_platform_driver);
+
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_DESCRIPTION("Xylon LogiCVC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.h b/drivers/gpu/drm/logicvc/logicvc_drm.h
new file mode 100644
index 000000000000..e0f4787c69f9
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _LOGICVC_DRM_H_
+#define _LOGICVC_DRM_H_
+
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <drm/drm_device.h>
+
+#define LOGICVC_DISPLAY_INTERFACE_RGB 0
+#define LOGICVC_DISPLAY_INTERFACE_ITU656 1
+#define LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS 2
+#define LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS_CAMERA 3
+#define LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS 4
+#define LOGICVC_DISPLAY_INTERFACE_DVI 5
+
+#define LOGICVC_DISPLAY_COLORSPACE_RGB 0
+#define LOGICVC_DISPLAY_COLORSPACE_YUV422 1
+#define LOGICVC_DISPLAY_COLORSPACE_YUV444 2
+
+#define logicvc_drm(d) \
+ container_of(d, struct logicvc_drm, drm_dev)
+
+struct logicvc_crtc;
+struct logicvc_interface;
+
+struct logicvc_drm_config {
+ u32 display_interface;
+ u32 display_colorspace;
+ u32 display_depth;
+ u32 row_stride;
+ bool dithering;
+ bool background_layer;
+ bool layers_configurable;
+ u32 layers_count;
+};
+
+struct logicvc_drm_caps {
+ unsigned int major;
+ unsigned int minor;
+ char level;
+ bool layer_address;
+};
+
+struct logicvc_drm {
+ const struct logicvc_drm_caps *caps;
+ struct logicvc_drm_config config;
+
+ struct drm_device drm_dev;
+ phys_addr_t reserved_mem_base;
+ struct regmap *regmap;
+
+ struct clk *vclk;
+ struct clk *vclk2;
+ struct clk *lvdsclk;
+ struct clk *lvdsclkn;
+
+ struct list_head layers_list;
+ struct logicvc_crtc *crtc;
+ struct logicvc_interface *interface;
+};
+
+#endif
diff --git a/drivers/gpu/drm/logicvc/logicvc_interface.c b/drivers/gpu/drm/logicvc/logicvc_interface.c
new file mode 100644
index 000000000000..c73592f6c406
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_interface.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/types.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "logicvc_crtc.h"
+#include "logicvc_drm.h"
+#include "logicvc_interface.h"
+#include "logicvc_regs.h"
+
+#define logicvc_interface_from_drm_encoder(c) \
+ container_of(c, struct logicvc_interface, drm_encoder)
+#define logicvc_interface_from_drm_connector(c) \
+ container_of(c, struct logicvc_interface, drm_connector)
+
+static void logicvc_encoder_enable(struct drm_encoder *drm_encoder)
+{
+ struct logicvc_drm *logicvc = logicvc_drm(drm_encoder->dev);
+ struct logicvc_interface *interface =
+ logicvc_interface_from_drm_encoder(drm_encoder);
+
+ regmap_update_bits(logicvc->regmap, LOGICVC_POWER_CTRL_REG,
+ LOGICVC_POWER_CTRL_VIDEO_ENABLE,
+ LOGICVC_POWER_CTRL_VIDEO_ENABLE);
+
+ if (interface->drm_panel) {
+ drm_panel_prepare(interface->drm_panel);
+ drm_panel_enable(interface->drm_panel);
+ }
+}
+
+static void logicvc_encoder_disable(struct drm_encoder *drm_encoder)
+{
+ struct logicvc_interface *interface =
+ logicvc_interface_from_drm_encoder(drm_encoder);
+
+ if (interface->drm_panel) {
+ drm_panel_disable(interface->drm_panel);
+ drm_panel_unprepare(interface->drm_panel);
+ }
+}
+
+static const struct drm_encoder_helper_funcs logicvc_encoder_helper_funcs = {
+ .enable = logicvc_encoder_enable,
+ .disable = logicvc_encoder_disable,
+};
+
+static const struct drm_encoder_funcs logicvc_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int logicvc_connector_get_modes(struct drm_connector *drm_connector)
+{
+ struct logicvc_interface *interface =
+ logicvc_interface_from_drm_connector(drm_connector);
+
+ if (interface->drm_panel)
+ return drm_panel_get_modes(interface->drm_panel, drm_connector);
+
+ WARN_ONCE(1, "Retrieving modes from a native connector is not implemented.");
+
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs logicvc_connector_helper_funcs = {
+ .get_modes = logicvc_connector_get_modes,
+};
+
+static const struct drm_connector_funcs logicvc_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int logicvc_interface_encoder_type(struct logicvc_drm *logicvc)
+{
+ switch (logicvc->config.display_interface) {
+ case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS:
+ case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS_CAMERA:
+ case LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS:
+ return DRM_MODE_ENCODER_LVDS;
+ case LOGICVC_DISPLAY_INTERFACE_DVI:
+ return DRM_MODE_ENCODER_TMDS;
+ case LOGICVC_DISPLAY_INTERFACE_RGB:
+ return DRM_MODE_ENCODER_DPI;
+ default:
+ return DRM_MODE_ENCODER_NONE;
+ }
+}
+
+static int logicvc_interface_connector_type(struct logicvc_drm *logicvc)
+{
+ switch (logicvc->config.display_interface) {
+ case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS:
+ case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS_CAMERA:
+ case LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS:
+ return DRM_MODE_CONNECTOR_LVDS;
+ case LOGICVC_DISPLAY_INTERFACE_DVI:
+ return DRM_MODE_CONNECTOR_DVID;
+ case LOGICVC_DISPLAY_INTERFACE_RGB:
+ return DRM_MODE_CONNECTOR_DPI;
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static bool logicvc_interface_native_connector(struct logicvc_drm *logicvc)
+{
+ switch (logicvc->config.display_interface) {
+ case LOGICVC_DISPLAY_INTERFACE_DVI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void logicvc_interface_attach_crtc(struct logicvc_drm *logicvc)
+{
+ uint32_t possible_crtcs = drm_crtc_mask(&logicvc->crtc->drm_crtc);
+
+ logicvc->interface->drm_encoder.possible_crtcs = possible_crtcs;
+}
+
+int logicvc_interface_init(struct logicvc_drm *logicvc)
+{
+ struct logicvc_interface *interface;
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ struct device *dev = drm_dev->dev;
+ struct device_node *of_node = dev->of_node;
+ int encoder_type = logicvc_interface_encoder_type(logicvc);
+ int connector_type = logicvc_interface_connector_type(logicvc);
+ bool native_connector = logicvc_interface_native_connector(logicvc);
+ int ret;
+
+ interface = devm_kzalloc(dev, sizeof(*interface), GFP_KERNEL);
+ if (!interface) {
+ ret = -ENOMEM;
+ goto error_early;
+ }
+
+ ret = drm_of_find_panel_or_bridge(of_node, 0, 0, &interface->drm_panel,
+ &interface->drm_bridge);
+ if (ret == -EPROBE_DEFER)
+ goto error_early;
+
+ ret = drm_encoder_init(drm_dev, &interface->drm_encoder,
+ &logicvc_encoder_funcs, encoder_type, NULL);
+ if (ret) {
+ drm_err(drm_dev, "Failed to initialize encoder\n");
+ goto error_early;
+ }
+
+ drm_encoder_helper_add(&interface->drm_encoder,
+ &logicvc_encoder_helper_funcs);
+
+ if (native_connector || interface->drm_panel) {
+ ret = drm_connector_init(drm_dev, &interface->drm_connector,
+ &logicvc_connector_funcs,
+ connector_type);
+ if (ret) {
+ drm_err(drm_dev, "Failed to initialize connector\n");
+ goto error_encoder;
+ }
+
+ drm_connector_helper_add(&interface->drm_connector,
+ &logicvc_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(&interface->drm_connector,
+ &interface->drm_encoder);
+ if (ret) {
+ drm_err(drm_dev,
+ "Failed to attach connector to encoder\n");
+ goto error_encoder;
+ }
+ }
+
+ if (interface->drm_bridge) {
+ ret = drm_bridge_attach(&interface->drm_encoder,
+ interface->drm_bridge, NULL, 0);
+ if (ret) {
+ drm_err(drm_dev,
+ "Failed to attach bridge to encoder\n");
+ goto error_encoder;
+ }
+ }
+
+ logicvc->interface = interface;
+
+ return 0;
+
+error_encoder:
+ drm_encoder_cleanup(&interface->drm_encoder);
+
+error_early:
+ return ret;
+}
diff --git a/drivers/gpu/drm/logicvc/logicvc_interface.h b/drivers/gpu/drm/logicvc/logicvc_interface.h
new file mode 100644
index 000000000000..fd709fad54f9
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_interface.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _LOGICVC_INTERFACE_H_
+#define _LOGICVC_INTERFACE_H_
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_panel.h>
+
+struct logicvc_drm;
+
+struct logicvc_interface {
+ struct drm_encoder drm_encoder;
+ struct drm_connector drm_connector;
+
+ struct drm_panel *drm_panel;
+ struct drm_bridge *drm_bridge;
+};
+
+void logicvc_interface_attach_crtc(struct logicvc_drm *logicvc);
+int logicvc_interface_init(struct logicvc_drm *logicvc);
+
+#endif
diff --git a/drivers/gpu/drm/logicvc/logicvc_layer.c b/drivers/gpu/drm/logicvc/logicvc_layer.c
new file mode 100644
index 000000000000..441e3cfce4cf
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_layer.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/of.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
+
+#include "logicvc_crtc.h"
+#include "logicvc_drm.h"
+#include "logicvc_layer.h"
+#include "logicvc_of.h"
+#include "logicvc_regs.h"
+
+#define logicvc_layer(p) \
+ container_of(p, struct logicvc_layer, drm_plane)
+
+static uint32_t logicvc_layer_formats_rgb16[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_INVALID,
+};
+
+static uint32_t logicvc_layer_formats_rgb24[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_INVALID,
+};
+
+/*
+ * What we call depth in this driver only counts color components, not alpha.
+ * This allows us to stay compatible with the LogiCVC bistream definitions.
+ */
+static uint32_t logicvc_layer_formats_rgb24_alpha[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_INVALID,
+};
+
+static struct logicvc_layer_formats logicvc_layer_formats[] = {
+ {
+ .colorspace = LOGICVC_LAYER_COLORSPACE_RGB,
+ .depth = 16,
+ .formats = logicvc_layer_formats_rgb16,
+ },
+ {
+ .colorspace = LOGICVC_LAYER_COLORSPACE_RGB,
+ .depth = 24,
+ .formats = logicvc_layer_formats_rgb24,
+ },
+ {
+ .colorspace = LOGICVC_LAYER_COLORSPACE_RGB,
+ .depth = 24,
+ .alpha = true,
+ .formats = logicvc_layer_formats_rgb24_alpha,
+ },
+ { }
+};
+
+static bool logicvc_layer_format_inverted(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int logicvc_plane_atomic_check(struct drm_plane *drm_plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm_dev = drm_plane->dev;
+ struct logicvc_layer *layer = logicvc_layer(drm_plane);
+ struct logicvc_drm *logicvc = logicvc_drm(drm_dev);
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, drm_plane);
+ struct drm_crtc_state *crtc_state;
+ int min_scale, max_scale;
+ bool can_position;
+ int ret;
+
+ if (!new_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(new_state->state,
+ new_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ if (new_state->crtc_x < 0 || new_state->crtc_y < 0) {
+ drm_err(drm_dev,
+ "Negative on-CRTC positions are not supported.\n");
+ return -EINVAL;
+ }
+
+ if (!logicvc->caps->layer_address) {
+ ret = logicvc_layer_buffer_find_setup(logicvc, layer, new_state,
+ NULL);
+ if (ret) {
+ drm_err(drm_dev, "No viable setup for buffer found.\n");
+ return ret;
+ }
+ }
+
+ min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ max_scale = DRM_PLANE_HELPER_NO_SCALING;
+
+ can_position = (drm_plane->type == DRM_PLANE_TYPE_OVERLAY &&
+ layer->index != (logicvc->config.layers_count - 1) &&
+ logicvc->config.layers_configurable);
+
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ min_scale, max_scale,
+ can_position, true);
+ if (ret) {
+ drm_err(drm_dev, "Invalid plane state\n\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void logicvc_plane_atomic_update(struct drm_plane *drm_plane,
+ struct drm_atomic_state *state)
+{
+ struct logicvc_layer *layer = logicvc_layer(drm_plane);
+ struct logicvc_drm *logicvc = logicvc_drm(drm_plane->dev);
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, drm_plane);
+ struct drm_crtc *drm_crtc = &logicvc->crtc->drm_crtc;
+ struct drm_display_mode *mode = &drm_crtc->state->adjusted_mode;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct logicvc_layer_buffer_setup setup = {};
+ u32 index = layer->index;
+ u32 reg;
+
+ /* Layer dimensions */
+
+ regmap_write(logicvc->regmap, LOGICVC_LAYER_WIDTH_REG(index),
+ new_state->crtc_w - 1);
+ regmap_write(logicvc->regmap, LOGICVC_LAYER_HEIGHT_REG(index),
+ new_state->crtc_h - 1);
+
+ if (logicvc->caps->layer_address) {
+ phys_addr_t fb_addr = drm_fb_cma_get_gem_addr(fb, new_state, 0);
+
+ regmap_write(logicvc->regmap, LOGICVC_LAYER_ADDRESS_REG(index),
+ fb_addr);
+ } else {
+ /* Rely on offsets to configure the address. */
+
+ logicvc_layer_buffer_find_setup(logicvc, layer, new_state,
+ &setup);
+
+ /* Layer memory offsets */
+
+ regmap_write(logicvc->regmap, LOGICVC_BUFFER_SEL_REG,
+ LOGICVC_BUFFER_SEL_VALUE(index, setup.buffer_sel));
+ regmap_write(logicvc->regmap, LOGICVC_LAYER_HOFFSET_REG(index),
+ setup.hoffset);
+ regmap_write(logicvc->regmap, LOGICVC_LAYER_VOFFSET_REG(index),
+ setup.voffset);
+ }
+
+ /* Layer position */
+
+ regmap_write(logicvc->regmap, LOGICVC_LAYER_HPOSITION_REG(index),
+ mode->hdisplay - 1 - new_state->crtc_x);
+
+ /* Vertical position must be set last to sync layer register changes. */
+ regmap_write(logicvc->regmap, LOGICVC_LAYER_VPOSITION_REG(index),
+ mode->vdisplay - 1 - new_state->crtc_y);
+
+ /* Layer alpha */
+
+ if (layer->config.alpha_mode == LOGICVC_LAYER_ALPHA_LAYER) {
+ u32 alpha_bits;
+ u32 alpha_max;
+ u32 alpha;
+
+ switch (layer->config.depth) {
+ case 8:
+ alpha_bits = 3;
+ break;
+ case 16:
+ if (layer->config.colorspace ==
+ LOGICVC_LAYER_COLORSPACE_YUV)
+ alpha_bits = 8;
+ else
+ alpha_bits = 6;
+ break;
+ default:
+ alpha_bits = 8;
+ break;
+ }
+
+ alpha_max = BIT(alpha_bits) - 1;
+ alpha = new_state->alpha * alpha_max / DRM_BLEND_ALPHA_OPAQUE;
+
+ drm_dbg_kms(drm_dev, "Setting layer %d alpha to %d/%d\n", index,
+ alpha, alpha_max);
+
+ regmap_write(logicvc->regmap, LOGICVC_LAYER_ALPHA_REG(index),
+ alpha);
+ }
+
+ /* Layer control */
+
+ reg = LOGICVC_LAYER_CTRL_ENABLE;
+
+ if (logicvc_layer_format_inverted(fb->format->format))
+ reg |= LOGICVC_LAYER_CTRL_PIXEL_FORMAT_INVERT;
+
+ reg |= LOGICVC_LAYER_CTRL_COLOR_KEY_DISABLE;
+
+ regmap_write(logicvc->regmap, LOGICVC_LAYER_CTRL_REG(index), reg);
+}
+
+static void logicvc_plane_atomic_disable(struct drm_plane *drm_plane,
+ struct drm_atomic_state *state)
+{
+ struct logicvc_layer *layer = logicvc_layer(drm_plane);
+ struct logicvc_drm *logicvc = logicvc_drm(drm_plane->dev);
+ u32 index = layer->index;
+
+ regmap_write(logicvc->regmap, LOGICVC_LAYER_CTRL_REG(index), 0);
+}
+
+static struct drm_plane_helper_funcs logicvc_plane_helper_funcs = {
+ .atomic_check = logicvc_plane_atomic_check,
+ .atomic_update = logicvc_plane_atomic_update,
+ .atomic_disable = logicvc_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs logicvc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+int logicvc_layer_buffer_find_setup(struct logicvc_drm *logicvc,
+ struct logicvc_layer *layer,
+ struct drm_plane_state *state,
+ struct logicvc_layer_buffer_setup *setup)
+{
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ struct drm_framebuffer *fb = state->fb;
+ /* All the supported formats have a single data plane. */
+ u32 layer_bytespp = fb->format->cpp[0];
+ u32 layer_stride = layer_bytespp * logicvc->config.row_stride;
+ u32 base_offset = layer->config.base_offset * layer_stride;
+ u32 buffer_offset = layer->config.buffer_offset * layer_stride;
+ u8 buffer_sel = 0;
+ u16 voffset = 0;
+ u16 hoffset = 0;
+ phys_addr_t fb_addr;
+ u32 fb_offset;
+ u32 gap;
+
+ if (!logicvc->reserved_mem_base) {
+ drm_err(drm_dev, "No reserved memory base was registered!\n");
+ return -ENOMEM;
+ }
+
+ fb_addr = drm_fb_cma_get_gem_addr(fb, state, 0);
+ if (fb_addr < logicvc->reserved_mem_base) {
+ drm_err(drm_dev,
+ "Framebuffer memory below reserved memory base!\n");
+ return -EINVAL;
+ }
+
+ fb_offset = (u32) (fb_addr - logicvc->reserved_mem_base);
+
+ if (fb_offset < base_offset) {
+ drm_err(drm_dev,
+ "Framebuffer offset below layer base offset!\n");
+ return -EINVAL;
+ }
+
+ gap = fb_offset - base_offset;
+
+ /* Use the possible video buffers selection. */
+ if (gap && buffer_offset) {
+ buffer_sel = gap / buffer_offset;
+ if (buffer_sel > LOGICVC_BUFFER_SEL_MAX)
+ buffer_sel = LOGICVC_BUFFER_SEL_MAX;
+
+ gap -= buffer_sel * buffer_offset;
+ }
+
+ /* Use the vertical offset. */
+ if (gap && layer_stride && logicvc->config.layers_configurable) {
+ voffset = gap / layer_stride;
+ if (voffset > LOGICVC_LAYER_VOFFSET_MAX)
+ voffset = LOGICVC_LAYER_VOFFSET_MAX;
+
+ gap -= voffset * layer_stride;
+ }
+
+ /* Use the horizontal offset. */
+ if (gap && layer_bytespp && logicvc->config.layers_configurable) {
+ hoffset = gap / layer_bytespp;
+ if (hoffset > LOGICVC_DIMENSIONS_MAX)
+ hoffset = LOGICVC_DIMENSIONS_MAX;
+
+ gap -= hoffset * layer_bytespp;
+ }
+
+ if (gap) {
+ drm_err(drm_dev,
+ "Unable to find layer %d buffer setup for 0x%x byte gap\n",
+ layer->index, fb_offset - base_offset);
+ return -EINVAL;
+ }
+
+ drm_dbg_kms(drm_dev, "Found layer %d buffer setup for 0x%x byte gap:\n",
+ layer->index, fb_offset - base_offset);
+
+ drm_dbg_kms(drm_dev, "- buffer_sel = 0x%x chunks of 0x%x bytes\n",
+ buffer_sel, buffer_offset);
+ drm_dbg_kms(drm_dev, "- voffset = 0x%x chunks of 0x%x bytes\n", voffset,
+ layer_stride);
+ drm_dbg_kms(drm_dev, "- hoffset = 0x%x chunks of 0x%x bytes\n", hoffset,
+ layer_bytespp);
+
+ if (setup) {
+ setup->buffer_sel = buffer_sel;
+ setup->voffset = voffset;
+ setup->hoffset = hoffset;
+ }
+
+ return 0;
+}
+
+static struct logicvc_layer_formats *logicvc_layer_formats_lookup(struct logicvc_layer *layer)
+{
+ bool alpha;
+ unsigned int i = 0;
+
+ alpha = (layer->config.alpha_mode == LOGICVC_LAYER_ALPHA_PIXEL);
+
+ while (logicvc_layer_formats[i].formats) {
+ if (logicvc_layer_formats[i].colorspace == layer->config.colorspace &&
+ logicvc_layer_formats[i].depth == layer->config.depth &&
+ logicvc_layer_formats[i].alpha == alpha)
+ return &logicvc_layer_formats[i];
+
+ i++;
+ }
+
+ return NULL;
+}
+
+static unsigned int logicvc_layer_formats_count(struct logicvc_layer_formats *formats)
+{
+ unsigned int count = 0;
+
+ while (formats->formats[count] != DRM_FORMAT_INVALID)
+ count++;
+
+ return count;
+}
+
+static int logicvc_layer_config_parse(struct logicvc_drm *logicvc,
+ struct logicvc_layer *layer)
+{
+ struct device_node *of_node = layer->of_node;
+ struct logicvc_layer_config *config = &layer->config;
+ int ret;
+
+ logicvc_of_property_parse_bool(of_node,
+ LOGICVC_OF_PROPERTY_LAYER_PRIMARY,
+ &config->primary);
+
+ ret = logicvc_of_property_parse_u32(of_node,
+ LOGICVC_OF_PROPERTY_LAYER_COLORSPACE,
+ &config->colorspace);
+ if (ret)
+ return ret;
+
+ ret = logicvc_of_property_parse_u32(of_node,
+ LOGICVC_OF_PROPERTY_LAYER_DEPTH,
+ &config->depth);
+ if (ret)
+ return ret;
+
+ ret = logicvc_of_property_parse_u32(of_node,
+ LOGICVC_OF_PROPERTY_LAYER_ALPHA_MODE,
+ &config->alpha_mode);
+ if (ret)
+ return ret;
+
+ /*
+ * Memory offset is only relevant without layer address configuration.
+ */
+ if (logicvc->caps->layer_address)
+ return 0;
+
+ ret = logicvc_of_property_parse_u32(of_node,
+ LOGICVC_OF_PROPERTY_LAYER_BASE_OFFSET,
+ &config->base_offset);
+ if (ret)
+ return ret;
+
+ ret = logicvc_of_property_parse_u32(of_node,
+ LOGICVC_OF_PROPERTY_LAYER_BUFFER_OFFSET,
+ &config->buffer_offset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct logicvc_layer *logicvc_layer_get_from_index(struct logicvc_drm *logicvc,
+ u32 index)
+{
+ struct logicvc_layer *layer;
+
+ list_for_each_entry(layer, &logicvc->layers_list, list)
+ if (layer->index == index)
+ return layer;
+
+ return NULL;
+}
+
+struct logicvc_layer *logicvc_layer_get_from_type(struct logicvc_drm *logicvc,
+ enum drm_plane_type type)
+{
+ struct logicvc_layer *layer;
+
+ list_for_each_entry(layer, &logicvc->layers_list, list)
+ if (layer->drm_plane.type == type)
+ return layer;
+
+ return NULL;
+}
+
+struct logicvc_layer *logicvc_layer_get_primary(struct logicvc_drm *logicvc)
+{
+ return logicvc_layer_get_from_type(logicvc, DRM_PLANE_TYPE_PRIMARY);
+}
+
+static int logicvc_layer_init(struct logicvc_drm *logicvc,
+ struct device_node *of_node, u32 index)
+{
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ struct device *dev = drm_dev->dev;
+ struct logicvc_layer *layer = NULL;
+ struct logicvc_layer_formats *formats;
+ unsigned int formats_count;
+ enum drm_plane_type type;
+ unsigned int zpos;
+ int ret;
+
+ layer = devm_kzalloc(dev, sizeof(*layer), GFP_KERNEL);
+ if (!layer) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ layer->of_node = of_node;
+ layer->index = index;
+
+ ret = logicvc_layer_config_parse(logicvc, layer);
+ if (ret) {
+ drm_err(drm_dev, "Failed to parse config for layer #%d\n",
+ index);
+ goto error;
+ }
+
+ formats = logicvc_layer_formats_lookup(layer);
+ if (!formats) {
+ drm_err(drm_dev, "Failed to lookup formats for layer #%d\n",
+ index);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ formats_count = logicvc_layer_formats_count(formats);
+
+ /* The final layer can be configured as a background layer. */
+ if (logicvc->config.background_layer &&
+ index == (logicvc->config.layers_count - 1)) {
+ /*
+ * A zero value for black is only valid for RGB, not for YUV,
+ * so this will need to take the format in account for YUV.
+ */
+ u32 background = 0;
+
+ drm_dbg_kms(drm_dev, "Using layer #%d as background layer\n",
+ index);
+
+ regmap_write(logicvc->regmap, LOGICVC_BACKGROUND_COLOR_REG,
+ background);
+
+ devm_kfree(dev, layer);
+
+ return 0;
+ }
+
+ if (layer->config.primary)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ ret = drm_universal_plane_init(drm_dev, &layer->drm_plane, 0,
+ &logicvc_plane_funcs, formats->formats,
+ formats_count, NULL, type, NULL);
+ if (ret) {
+ drm_err(drm_dev, "Failed to initialize layer plane\n");
+ return ret;
+ }
+
+ drm_plane_helper_add(&layer->drm_plane, &logicvc_plane_helper_funcs);
+
+ zpos = logicvc->config.layers_count - index - 1;
+ drm_dbg_kms(drm_dev, "Giving layer #%d zpos %d\n", index, zpos);
+
+ if (layer->config.alpha_mode == LOGICVC_LAYER_ALPHA_LAYER)
+ drm_plane_create_alpha_property(&layer->drm_plane);
+
+ drm_plane_create_zpos_immutable_property(&layer->drm_plane, zpos);
+
+ drm_dbg_kms(drm_dev, "Registering layer #%d\n", index);
+
+ layer->formats = formats;
+
+ list_add_tail(&layer->list, &logicvc->layers_list);
+
+ return 0;
+
+error:
+ if (layer)
+ devm_kfree(dev, layer);
+
+ return ret;
+}
+
+static void logicvc_layer_fini(struct logicvc_drm *logicvc,
+ struct logicvc_layer *layer)
+{
+ struct device *dev = logicvc->drm_dev.dev;
+
+ list_del(&layer->list);
+ devm_kfree(dev, layer);
+}
+
+void logicvc_layers_attach_crtc(struct logicvc_drm *logicvc)
+{
+ uint32_t possible_crtcs = drm_crtc_mask(&logicvc->crtc->drm_crtc);
+ struct logicvc_layer *layer;
+
+ list_for_each_entry(layer, &logicvc->layers_list, list) {
+ if (layer->drm_plane.type != DRM_PLANE_TYPE_OVERLAY)
+ continue;
+
+ layer->drm_plane.possible_crtcs = possible_crtcs;
+ }
+}
+
+int logicvc_layers_init(struct logicvc_drm *logicvc)
+{
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ struct device *dev = drm_dev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct device_node *layer_node = NULL;
+ struct device_node *layers_node;
+ struct logicvc_layer *layer;
+ struct logicvc_layer *next;
+ int ret = 0;
+
+ layers_node = of_get_child_by_name(of_node, "layers");
+ if (!layers_node) {
+ drm_err(drm_dev, "No layers node found in the description\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ for_each_child_of_node(layers_node, layer_node) {
+ u32 index = 0;
+
+ if (!logicvc_of_node_is_layer(layer_node))
+ continue;
+
+ ret = of_property_read_u32(layer_node, "reg", &index);
+ if (ret)
+ continue;
+
+ layer = logicvc_layer_get_from_index(logicvc, index);
+ if (layer) {
+ drm_err(drm_dev, "Duplicated entry for layer #%d\n",
+ index);
+ continue;
+ }
+
+ ret = logicvc_layer_init(logicvc, layer_node, index);
+ if (ret) {
+ of_node_put(layers_node);
+ goto error;
+ }
+ }
+
+ of_node_put(layers_node);
+
+ return 0;
+
+error:
+ list_for_each_entry_safe(layer, next, &logicvc->layers_list, list)
+ logicvc_layer_fini(logicvc, layer);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/logicvc/logicvc_layer.h b/drivers/gpu/drm/logicvc/logicvc_layer.h
new file mode 100644
index 000000000000..4a4b02e9b819
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_layer.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _LOGICVC_LAYER_H_
+#define _LOGICVC_LAYER_H_
+
+#include <linux/of.h>
+#include <linux/types.h>
+#include <drm/drm_plane.h>
+
+#define LOGICVC_LAYER_COLORSPACE_RGB 0
+#define LOGICVC_LAYER_COLORSPACE_YUV 1
+
+#define LOGICVC_LAYER_ALPHA_LAYER 0
+#define LOGICVC_LAYER_ALPHA_PIXEL 1
+
+struct logicvc_layer_buffer_setup {
+ u8 buffer_sel;
+ u16 voffset;
+ u16 hoffset;
+};
+
+struct logicvc_layer_config {
+ u32 colorspace;
+ u32 depth;
+ u32 alpha_mode;
+ u32 base_offset;
+ u32 buffer_offset;
+ bool primary;
+};
+
+struct logicvc_layer_formats {
+ u32 colorspace;
+ u32 depth;
+ bool alpha;
+ uint32_t *formats;
+};
+
+struct logicvc_layer {
+ struct logicvc_layer_config config;
+ struct logicvc_layer_formats *formats;
+ struct device_node *of_node;
+
+ struct drm_plane drm_plane;
+ struct list_head list;
+ u32 index;
+};
+
+int logicvc_layer_buffer_find_setup(struct logicvc_drm *logicvc,
+ struct logicvc_layer *layer,
+ struct drm_plane_state *state,
+ struct logicvc_layer_buffer_setup *setup);
+struct logicvc_layer *logicvc_layer_get_from_index(struct logicvc_drm *logicvc,
+ u32 index);
+struct logicvc_layer *logicvc_layer_get_from_type(struct logicvc_drm *logicvc,
+ enum drm_plane_type type);
+struct logicvc_layer *logicvc_layer_get_primary(struct logicvc_drm *logicvc);
+void logicvc_layers_attach_crtc(struct logicvc_drm *logicvc);
+int logicvc_layers_init(struct logicvc_drm *logicvc);
+
+#endif
diff --git a/drivers/gpu/drm/logicvc/logicvc_mode.c b/drivers/gpu/drm/logicvc/logicvc_mode.c
new file mode 100644
index 000000000000..11940704f644
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_mode.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/types.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "logicvc_drm.h"
+#include "logicvc_interface.h"
+#include "logicvc_layer.h"
+#include "logicvc_mode.h"
+
+static const struct drm_mode_config_funcs logicvc_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+int logicvc_mode_init(struct logicvc_drm *logicvc)
+{
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+ struct drm_mode_config *mode_config = &drm_dev->mode_config;
+ struct logicvc_layer *layer_primary;
+ uint32_t preferred_depth;
+ int ret;
+
+ ret = drm_vblank_init(drm_dev, mode_config->num_crtc);
+ if (ret) {
+ drm_err(drm_dev, "Failed to initialize vblank\n");
+ return ret;
+ }
+
+ layer_primary = logicvc_layer_get_primary(logicvc);
+ if (!layer_primary) {
+ drm_err(drm_dev, "Failed to get primary layer\n");
+ return -EINVAL;
+ }
+
+ preferred_depth = layer_primary->formats->depth;
+
+ /* DRM counts alpha in depth, our driver doesn't. */
+ if (layer_primary->formats->alpha)
+ preferred_depth += 8;
+
+ mode_config->min_width = 64;
+ mode_config->max_width = 2048;
+ mode_config->min_height = 1;
+ mode_config->max_height = 2048;
+ mode_config->preferred_depth = preferred_depth;
+ mode_config->funcs = &logicvc_mode_config_funcs;
+
+ drm_mode_config_reset(drm_dev);
+
+ drm_kms_helper_poll_init(drm_dev);
+
+ return 0;
+}
+
+void logicvc_mode_fini(struct logicvc_drm *logicvc)
+{
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+
+ drm_kms_helper_poll_fini(drm_dev);
+}
diff --git a/drivers/gpu/drm/logicvc/logicvc_mode.h b/drivers/gpu/drm/logicvc/logicvc_mode.h
new file mode 100644
index 000000000000..fee538ab1b96
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_mode.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _LOGICVC_MODE_H_
+#define _LOGICVC_MODE_H_
+
+struct logicvc_drm;
+
+int logicvc_mode_init(struct logicvc_drm *logicvc);
+void logicvc_mode_fini(struct logicvc_drm *logicvc);
+
+#endif
diff --git a/drivers/gpu/drm/logicvc/logicvc_of.c b/drivers/gpu/drm/logicvc/logicvc_of.c
new file mode 100644
index 000000000000..e0687730e039
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_of.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <drm/drm_print.h>
+
+#include "logicvc_drm.h"
+#include "logicvc_layer.h"
+#include "logicvc_of.h"
+
+static struct logicvc_of_property_sv logicvc_of_display_interface_sv[] = {
+ { "lvds-4bits", LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS },
+ { "lvds-3bits", LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS },
+ { },
+};
+
+static struct logicvc_of_property_sv logicvc_of_display_colorspace_sv[] = {
+ { "rgb", LOGICVC_DISPLAY_COLORSPACE_RGB },
+ { "yuv422", LOGICVC_DISPLAY_COLORSPACE_YUV422 },
+ { "yuv444", LOGICVC_DISPLAY_COLORSPACE_YUV444 },
+ { },
+};
+
+static struct logicvc_of_property_sv logicvc_of_layer_colorspace_sv[] = {
+ { "rgb", LOGICVC_LAYER_COLORSPACE_RGB },
+ { "yuv", LOGICVC_LAYER_COLORSPACE_YUV },
+ { },
+};
+
+static struct logicvc_of_property_sv logicvc_of_layer_alpha_mode_sv[] = {
+ { "layer", LOGICVC_LAYER_ALPHA_LAYER },
+ { "pixel", LOGICVC_LAYER_ALPHA_PIXEL },
+ { },
+};
+
+static struct logicvc_of_property logicvc_of_properties[] = {
+ [LOGICVC_OF_PROPERTY_DISPLAY_INTERFACE] = {
+ .name = "xylon,display-interface",
+ .sv = logicvc_of_display_interface_sv,
+ .range = {
+ LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS,
+ LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS,
+ },
+ },
+ [LOGICVC_OF_PROPERTY_DISPLAY_COLORSPACE] = {
+ .name = "xylon,display-colorspace",
+ .sv = logicvc_of_display_colorspace_sv,
+ .range = {
+ LOGICVC_DISPLAY_COLORSPACE_RGB,
+ LOGICVC_DISPLAY_COLORSPACE_YUV444,
+ },
+ },
+ [LOGICVC_OF_PROPERTY_DISPLAY_DEPTH] = {
+ .name = "xylon,display-depth",
+ .range = { 8, 24 },
+ },
+ [LOGICVC_OF_PROPERTY_ROW_STRIDE] = {
+ .name = "xylon,row-stride",
+ },
+ [LOGICVC_OF_PROPERTY_DITHERING] = {
+ .name = "xylon,dithering",
+ .optional = true,
+ },
+ [LOGICVC_OF_PROPERTY_BACKGROUND_LAYER] = {
+ .name = "xylon,background-layer",
+ .optional = true,
+ },
+ [LOGICVC_OF_PROPERTY_LAYERS_CONFIGURABLE] = {
+ .name = "xylon,layers-configurable",
+ .optional = true,
+ },
+ [LOGICVC_OF_PROPERTY_LAYERS_COUNT] = {
+ .name = "xylon,layers-count",
+ },
+ [LOGICVC_OF_PROPERTY_LAYER_DEPTH] = {
+ .name = "xylon,layer-depth",
+ .range = { 8, 24 },
+ },
+ [LOGICVC_OF_PROPERTY_LAYER_COLORSPACE] = {
+ .name = "xylon,layer-colorspace",
+ .sv = logicvc_of_layer_colorspace_sv,
+ .range = {
+ LOGICVC_LAYER_COLORSPACE_RGB,
+ LOGICVC_LAYER_COLORSPACE_RGB,
+ },
+ },
+ [LOGICVC_OF_PROPERTY_LAYER_ALPHA_MODE] = {
+ .name = "xylon,layer-alpha-mode",
+ .sv = logicvc_of_layer_alpha_mode_sv,
+ .range = {
+ LOGICVC_LAYER_ALPHA_LAYER,
+ LOGICVC_LAYER_ALPHA_PIXEL,
+ },
+ },
+ [LOGICVC_OF_PROPERTY_LAYER_BASE_OFFSET] = {
+ .name = "xylon,layer-base-offset",
+ },
+ [LOGICVC_OF_PROPERTY_LAYER_BUFFER_OFFSET] = {
+ .name = "xylon,layer-buffer-offset",
+ },
+ [LOGICVC_OF_PROPERTY_LAYER_PRIMARY] = {
+ .name = "xylon,layer-primary",
+ .optional = true,
+ },
+};
+
+static int logicvc_of_property_sv_value(struct logicvc_of_property_sv *sv,
+ const char *string, u32 *value)
+{
+ unsigned int i = 0;
+
+ while (sv[i].string) {
+ if (!strcmp(sv[i].string, string)) {
+ *value = sv[i].value;
+ return 0;
+ }
+
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+int logicvc_of_property_parse_u32(struct device_node *of_node,
+ unsigned int index, u32 *target)
+{
+ struct logicvc_of_property *property;
+ const char *string;
+ u32 value;
+ int ret;
+
+ if (index >= LOGICVC_OF_PROPERTY_MAXIMUM)
+ return -EINVAL;
+
+ property = &logicvc_of_properties[index];
+
+ if (!property->optional &&
+ !of_property_read_bool(of_node, property->name))
+ return -ENODEV;
+
+ if (property->sv) {
+ ret = of_property_read_string(of_node, property->name, &string);
+ if (ret)
+ return ret;
+
+ ret = logicvc_of_property_sv_value(property->sv, string,
+ &value);
+ if (ret)
+ return ret;
+ } else {
+ ret = of_property_read_u32(of_node, property->name, &value);
+ if (ret)
+ return ret;
+ }
+
+ if (property->range[0] || property->range[1])
+ if (value < property->range[0] || value > property->range[1])
+ return -ERANGE;
+
+ *target = value;
+
+ return 0;
+}
+
+void logicvc_of_property_parse_bool(struct device_node *of_node,
+ unsigned int index, bool *target)
+{
+ struct logicvc_of_property *property;
+
+ if (index >= LOGICVC_OF_PROPERTY_MAXIMUM) {
+ /* Fallback. */
+ *target = false;
+ return;
+ }
+
+ property = &logicvc_of_properties[index];
+ *target = of_property_read_bool(of_node, property->name);
+}
+
+bool logicvc_of_node_is_layer(struct device_node *of_node)
+{
+ return !of_node_cmp(of_node->name, "layer");
+}
diff --git a/drivers/gpu/drm/logicvc/logicvc_of.h b/drivers/gpu/drm/logicvc/logicvc_of.h
new file mode 100644
index 000000000000..46036e461be9
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_of.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _LOGICVC_OF_H_
+#define _LOGICVC_OF_H_
+
+enum logicvc_of_property_index {
+ LOGICVC_OF_PROPERTY_DISPLAY_INTERFACE = 0,
+ LOGICVC_OF_PROPERTY_DISPLAY_COLORSPACE,
+ LOGICVC_OF_PROPERTY_DISPLAY_DEPTH,
+ LOGICVC_OF_PROPERTY_ROW_STRIDE,
+ LOGICVC_OF_PROPERTY_DITHERING,
+ LOGICVC_OF_PROPERTY_BACKGROUND_LAYER,
+ LOGICVC_OF_PROPERTY_LAYERS_CONFIGURABLE,
+ LOGICVC_OF_PROPERTY_LAYERS_COUNT,
+ LOGICVC_OF_PROPERTY_LAYER_DEPTH,
+ LOGICVC_OF_PROPERTY_LAYER_COLORSPACE,
+ LOGICVC_OF_PROPERTY_LAYER_ALPHA_MODE,
+ LOGICVC_OF_PROPERTY_LAYER_BASE_OFFSET,
+ LOGICVC_OF_PROPERTY_LAYER_BUFFER_OFFSET,
+ LOGICVC_OF_PROPERTY_LAYER_PRIMARY,
+ LOGICVC_OF_PROPERTY_MAXIMUM,
+};
+
+struct logicvc_of_property_sv {
+ const char *string;
+ u32 value;
+};
+
+struct logicvc_of_property {
+ char *name;
+ bool optional;
+ struct logicvc_of_property_sv *sv;
+ u32 range[2];
+};
+
+int logicvc_of_property_parse_u32(struct device_node *of_node,
+ unsigned int index, u32 *target);
+void logicvc_of_property_parse_bool(struct device_node *of_node,
+ unsigned int index, bool *target);
+bool logicvc_of_node_is_layer(struct device_node *of_node);
+
+#endif
diff --git a/drivers/gpu/drm/logicvc/logicvc_regs.h b/drivers/gpu/drm/logicvc/logicvc_regs.h
new file mode 100644
index 000000000000..4aae27e9ba2b
--- /dev/null
+++ b/drivers/gpu/drm/logicvc/logicvc_regs.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ *
+ * Copyright (C) 2014 Xylon d.o.o.
+ * Author: Davor Joja <davor.joja@logicbricks.com>
+ */
+
+#ifndef _LOGICVC_REGS_H_
+#define _LOGICVC_REGS_H_
+
+#define LOGICVC_DIMENSIONS_MAX (BIT(16) - 1)
+
+#define LOGICVC_HSYNC_FRONT_PORCH_REG 0x00
+#define LOGICVC_HSYNC_REG 0x08
+#define LOGICVC_HSYNC_BACK_PORCH_REG 0x10
+#define LOGICVC_HRES_REG 0x18
+#define LOGICVC_VSYNC_FRONT_PORCH_REG 0x20
+#define LOGICVC_VSYNC_REG 0x28
+#define LOGICVC_VSYNC_BACK_PORCH_REG 0x30
+#define LOGICVC_VRES_REG 0x38
+
+#define LOGICVC_CTRL_REG 0x40
+#define LOGICVC_CTRL_CLOCK_INVERT BIT(8)
+#define LOGICVC_CTRL_PIXEL_INVERT BIT(7)
+#define LOGICVC_CTRL_DE_INVERT BIT(5)
+#define LOGICVC_CTRL_DE_ENABLE BIT(4)
+#define LOGICVC_CTRL_VSYNC_INVERT BIT(3)
+#define LOGICVC_CTRL_VSYNC_ENABLE BIT(2)
+#define LOGICVC_CTRL_HSYNC_INVERT BIT(1)
+#define LOGICVC_CTRL_HSYNC_ENABLE BIT(0)
+
+#define LOGICVC_DTYPE_REG 0x48
+#define LOGICVC_BACKGROUND_COLOR_REG 0x50
+
+#define LOGICVC_BUFFER_SEL_REG 0x58
+#define LOGICVC_BUFFER_SEL_VALUE(i, v) \
+ (BIT(10 + (i)) | ((v) << (2 * (i))))
+#define LOGICVC_BUFFER_SEL_MAX 2
+
+#define LOGICVC_DOUBLE_CLUT_REG 0x60
+
+#define LOGICVC_INT_STAT_REG 0x68
+#define LOGICVC_INT_STAT_V_SYNC BIT(5)
+
+#define LOGICVC_INT_MASK_REG 0x70
+#define LOGICVC_INT_MASK_V_SYNC BIT(5)
+
+#define LOGICVC_POWER_CTRL_REG 0x78
+#define LOGICVC_POWER_CTRL_BACKLIGHT_ENABLE BIT(0)
+#define LOGICVC_POWER_CTRL_VDD_ENABLE BIT(1)
+#define LOGICVC_POWER_CTRL_VEE_ENABLE BIT(2)
+#define LOGICVC_POWER_CTRL_VIDEO_ENABLE BIT(3)
+
+#define LOGICVC_IP_VERSION_REG 0xf8
+#define LOGICVC_IP_VERSION_MAJOR_MASK GENMASK(16, 11)
+#define LOGICVC_IP_VERSION_MINOR_MASK GENMASK(10, 5)
+#define LOGICVC_IP_VERSION_LEVEL_MASK GENMASK(4, 0)
+
+#define LOGICVC_LAYER_ADDRESS_REG(i) (0x100 + (i) * 0x80)
+#define LOGICVC_LAYER_HOFFSET_REG(i) (0x100 + (i) * 0x80)
+
+#define LOGICVC_LAYER_VOFFSET_REG(i) (0x108 + (i) * 0x80)
+#define LOGICVC_LAYER_VOFFSET_MAX 4095
+
+#define LOGICVC_LAYER_HPOSITION_REG(i) (0x110 + (i) * 0x80)
+#define LOGICVC_LAYER_VPOSITION_REG(i) (0x118 + (i) * 0x80)
+#define LOGICVC_LAYER_WIDTH_REG(i) (0x120 + (i) * 0x80)
+#define LOGICVC_LAYER_HEIGHT_REG(i) (0x128 + (i) * 0x80)
+#define LOGICVC_LAYER_ALPHA_REG(i) (0x130 + (i) * 0x80)
+
+#define LOGICVC_LAYER_CTRL_REG(i) (0x138 + (i) * 0x80)
+#define LOGICVC_LAYER_CTRL_ENABLE BIT(0)
+#define LOGICVC_LAYER_CTRL_COLOR_KEY_DISABLE BIT(1)
+#define LOGICVC_LAYER_CTRL_PIXEL_FORMAT_INVERT BIT(4)
+
+#define LOGICVC_LAYER_COLOR_KEY_REG(i) (0x140 + (i) * 0x80)
+
+#endif
diff --git a/drivers/gpu/drm/mcde/mcde_clk_div.c b/drivers/gpu/drm/mcde/mcde_clk_div.c
index 038821d2ef80..3056ac566473 100644
--- a/drivers/gpu/drm/mcde/mcde_clk_div.c
+++ b/drivers/gpu/drm/mcde/mcde_clk_div.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include "mcde_drm.h"
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index ce12a36e2db4..4df477540d07 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -13,6 +13,7 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_mipi_dsi.h>
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 5651734ce977..9f9ac8699310 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1111,6 +1111,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
bridge = of_drm_find_bridge(child);
if (!bridge) {
dev_err(dev, "failed to find bridge\n");
+ of_node_put(child);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 29098d7c8307..6e604a933ed0 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -4,6 +4,7 @@ mediatek-drm-y := mtk_disp_aal.o \
mtk_disp_ccorr.o \
mtk_disp_color.o \
mtk_disp_gamma.o \
+ mtk_disp_merge.o \
mtk_disp_ovl.o \
mtk_disp_rdma.o \
mtk_drm_crtc.o \
@@ -12,7 +13,8 @@ mediatek-drm-y := mtk_disp_aal.o \
mtk_drm_gem.o \
mtk_drm_plane.o \
mtk_dsi.o \
- mtk_dpi.o
+ mtk_dpi.o \
+ mtk_mdp_rdma.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 763be99e8d33..33e61a136bbc 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -8,6 +8,7 @@
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_plane.h"
+#include "mtk_mdp_rdma.h"
int mtk_aal_clk_enable(struct device *dev);
void mtk_aal_clk_disable(struct device *dev);
@@ -55,6 +56,19 @@ void mtk_gamma_set_common(void __iomem *regs, struct drm_crtc_state *state, bool
void mtk_gamma_start(struct device *dev);
void mtk_gamma_stop(struct device *dev);
+int mtk_merge_clk_enable(struct device *dev);
+void mtk_merge_clk_disable(struct device *dev);
+void mtk_merge_config(struct device *dev, unsigned int width,
+ unsigned int height, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_merge_start(struct device *dev);
+void mtk_merge_stop(struct device *dev);
+void mtk_merge_advance_config(struct device *dev, unsigned int l_w, unsigned int r_w,
+ unsigned int h, unsigned int vrefresh, unsigned int bpc,
+ struct cmdq_pkt *cmdq_pkt);
+void mtk_merge_start_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt);
+void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt);
+
void mtk_ovl_bgclr_in_on(struct device *dev);
void mtk_ovl_bgclr_in_off(struct device *dev);
void mtk_ovl_bypass_shadow(struct device *dev);
@@ -102,4 +116,10 @@ void mtk_rdma_unregister_vblank_cb(struct device *dev);
void mtk_rdma_enable_vblank(struct device *dev);
void mtk_rdma_disable_vblank(struct device *dev);
+int mtk_mdp_rdma_clk_enable(struct device *dev);
+void mtk_mdp_rdma_clk_disable(struct device *dev);
+void mtk_mdp_rdma_start(struct device *dev, struct cmdq_pkt *cmdq_pkt);
+void mtk_mdp_rdma_stop(struct device *dev, struct cmdq_pkt *cmdq_pkt);
+void mtk_mdp_rdma_config(struct device *dev, struct mtk_mdp_rdma_cfg *cfg,
+ struct cmdq_pkt *cmdq_pkt);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
new file mode 100644
index 000000000000..6428b6203ffe
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
+#include "mtk_disp_drv.h"
+
+#define DISP_REG_MERGE_CTRL 0x000
+#define MERGE_EN 1
+#define DISP_REG_MERGE_CFG_0 0x010
+#define DISP_REG_MERGE_CFG_1 0x014
+#define DISP_REG_MERGE_CFG_4 0x020
+#define DISP_REG_MERGE_CFG_10 0x038
+/* no swap */
+#define SWAP_MODE 0
+#define FLD_SWAP_MODE GENMASK(4, 0)
+#define DISP_REG_MERGE_CFG_12 0x040
+#define CFG_10_10_1PI_2PO_BUF_MODE 6
+#define CFG_10_10_2PI_2PO_BUF_MODE 8
+#define CFG_11_10_1PI_2PO_MERGE 18
+#define FLD_CFG_MERGE_MODE GENMASK(4, 0)
+#define DISP_REG_MERGE_CFG_24 0x070
+#define DISP_REG_MERGE_CFG_25 0x074
+#define DISP_REG_MERGE_CFG_26 0x078
+#define DISP_REG_MERGE_CFG_27 0x07c
+#define DISP_REG_MERGE_CFG_36 0x0a0
+#define ULTRA_EN BIT(0)
+#define PREULTRA_EN BIT(4)
+#define DISP_REG_MERGE_CFG_37 0x0a4
+/* 0: Off, 1: SRAM0, 2: SRAM1, 3: SRAM0 + SRAM1 */
+#define BUFFER_MODE 3
+#define FLD_BUFFER_MODE GENMASK(1, 0)
+/*
+ * For the ultra and preultra settings, 6us ~ 9us is experience value
+ * and the maximum frequency of mmsys clock is 594MHz.
+ */
+#define DISP_REG_MERGE_CFG_40 0x0b0
+/* 6 us, 594M pixel/sec */
+#define ULTRA_TH_LOW (6 * 594)
+/* 8 us, 594M pixel/sec */
+#define ULTRA_TH_HIGH (8 * 594)
+#define FLD_ULTRA_TH_LOW GENMASK(15, 0)
+#define FLD_ULTRA_TH_HIGH GENMASK(31, 16)
+#define DISP_REG_MERGE_CFG_41 0x0b4
+/* 8 us, 594M pixel/sec */
+#define PREULTRA_TH_LOW (8 * 594)
+/* 9 us, 594M pixel/sec */
+#define PREULTRA_TH_HIGH (9 * 594)
+#define FLD_PREULTRA_TH_LOW GENMASK(15, 0)
+#define FLD_PREULTRA_TH_HIGH GENMASK(31, 16)
+
+#define DISP_REG_MERGE_MUTE_0 0xf00
+
+struct mtk_disp_merge {
+ void __iomem *regs;
+ struct clk *clk;
+ struct clk *async_clk;
+ struct cmdq_client_reg cmdq_reg;
+ bool fifo_en;
+ bool mute_support;
+ struct reset_control *reset_ctl;
+};
+
+void mtk_merge_start(struct device *dev)
+{
+ mtk_merge_start_cmdq(dev, NULL);
+}
+
+void mtk_merge_stop(struct device *dev)
+{
+ mtk_merge_stop_cmdq(dev, NULL);
+}
+
+void mtk_merge_start_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_disp_merge *priv = dev_get_drvdata(dev);
+
+ if (priv->mute_support)
+ mtk_ddp_write(cmdq_pkt, 0x0, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_MUTE_0);
+
+ mtk_ddp_write(cmdq_pkt, 1, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CTRL);
+}
+
+void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_disp_merge *priv = dev_get_drvdata(dev);
+
+ if (priv->mute_support)
+ mtk_ddp_write(cmdq_pkt, 0x1, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_MUTE_0);
+
+ mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CTRL);
+
+ if (priv->async_clk)
+ reset_control_reset(priv->reset_ctl);
+}
+
+static void mtk_merge_fifo_setting(struct mtk_disp_merge *priv,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ mtk_ddp_write(cmdq_pkt, ULTRA_EN | PREULTRA_EN,
+ &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_36);
+
+ mtk_ddp_write_mask(cmdq_pkt, BUFFER_MODE,
+ &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_37,
+ FLD_BUFFER_MODE);
+
+ mtk_ddp_write_mask(cmdq_pkt, ULTRA_TH_LOW | ULTRA_TH_HIGH << 16,
+ &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_40,
+ FLD_ULTRA_TH_LOW | FLD_ULTRA_TH_HIGH);
+
+ mtk_ddp_write_mask(cmdq_pkt, PREULTRA_TH_LOW | PREULTRA_TH_HIGH << 16,
+ &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_41,
+ FLD_PREULTRA_TH_LOW | FLD_PREULTRA_TH_HIGH);
+}
+
+void mtk_merge_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ mtk_merge_advance_config(dev, w, 0, h, vrefresh, bpc, cmdq_pkt);
+}
+
+void mtk_merge_advance_config(struct device *dev, unsigned int l_w, unsigned int r_w,
+ unsigned int h, unsigned int vrefresh, unsigned int bpc,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_disp_merge *priv = dev_get_drvdata(dev);
+ unsigned int mode = CFG_10_10_1PI_2PO_BUF_MODE;
+
+ if (!h || !l_w) {
+ dev_err(dev, "%s: input width(%d) or height(%d) is invalid\n", __func__, l_w, h);
+ return;
+ }
+
+ if (priv->fifo_en) {
+ mtk_merge_fifo_setting(priv, cmdq_pkt);
+ mode = CFG_10_10_2PI_2PO_BUF_MODE;
+ }
+
+ if (r_w)
+ mode = CFG_11_10_1PI_2PO_MERGE;
+
+ mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CFG_0);
+ mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CFG_1);
+ mtk_ddp_write(cmdq_pkt, h << 16 | (l_w + r_w), &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CFG_4);
+ /*
+ * DISP_REG_MERGE_CFG_24 is merge SRAM0 w/h
+ * DISP_REG_MERGE_CFG_25 is merge SRAM1 w/h.
+ * If r_w > 0, the merge is in merge mode (input0 and input1 merge together),
+ * the input0 goes to SRAM0, and input1 goes to SRAM1.
+ * If r_w = 0, the merge is in buffer mode, the input goes through SRAM0 and
+ * then to SRAM1. Both SRAM0 and SRAM1 are set to the same size.
+ */
+ mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CFG_24);
+ if (r_w)
+ mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CFG_25);
+ else
+ mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CFG_25);
+
+ /*
+ * DISP_REG_MERGE_CFG_26 and DISP_REG_MERGE_CFG_27 is only used in LR merge.
+ * Only take effect when the merge is setting to merge mode.
+ */
+ mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CFG_26);
+ mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CFG_27);
+
+ mtk_ddp_write_mask(cmdq_pkt, SWAP_MODE, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CFG_10, FLD_SWAP_MODE);
+ mtk_ddp_write_mask(cmdq_pkt, mode, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CFG_12, FLD_CFG_MERGE_MODE);
+}
+
+int mtk_merge_clk_enable(struct device *dev)
+{
+ int ret = 0;
+ struct mtk_disp_merge *priv = dev_get_drvdata(dev);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "merge clk prepare enable failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->async_clk);
+ if (ret) {
+ /* should clean up the state of priv->clk */
+ clk_disable_unprepare(priv->clk);
+
+ dev_err(dev, "async clk prepare enable failed\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+void mtk_merge_clk_disable(struct device *dev)
+{
+ struct mtk_disp_merge *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->async_clk);
+ clk_disable_unprepare(priv->clk);
+}
+
+static int mtk_disp_merge_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ return 0;
+}
+
+static void mtk_disp_merge_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+}
+
+static const struct component_ops mtk_disp_merge_component_ops = {
+ .bind = mtk_disp_merge_bind,
+ .unbind = mtk_disp_merge_unbind,
+};
+
+static int mtk_disp_merge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct mtk_disp_merge *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap merge\n");
+ return PTR_ERR(priv->regs);
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get merge clk\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ priv->async_clk = devm_clk_get_optional(dev, "merge_async");
+ if (IS_ERR(priv->async_clk)) {
+ dev_err(dev, "failed to get merge async clock\n");
+ return PTR_ERR(priv->async_clk);
+ }
+
+ if (priv->async_clk) {
+ priv->reset_ctl = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(priv->reset_ctl))
+ return PTR_ERR(priv->reset_ctl);
+ }
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+ priv->fifo_en = of_property_read_bool(dev->of_node,
+ "mediatek,merge-fifo-en");
+
+ priv->mute_support = of_property_read_bool(dev->of_node,
+ "mediatek,merge-mute");
+ platform_set_drvdata(pdev, priv);
+
+ ret = component_add(dev, &mtk_disp_merge_component_ops);
+ if (ret != 0)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+
+ return ret;
+}
+
+static int mtk_disp_merge_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_disp_merge_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id mtk_disp_merge_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8195-disp-merge", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_disp_merge_driver_dt_match);
+
+struct platform_driver mtk_disp_merge_driver = {
+ .probe = mtk_disp_merge_probe,
+ .remove = mtk_disp_merge_remove,
+ .driver = {
+ .name = "mediatek-disp-merge",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_disp_merge_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 70ab22964f3b..002b0f6cae1a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -3,7 +3,9 @@
* Copyright (c) 2015 MediaTek Inc.
*/
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <linux/clk.h>
#include <linux/component.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 1be4caf9ff96..0ec2e4049e07 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -3,6 +3,8 @@
* Copyright (c) 2015 MediaTek Inc.
*/
+#include <drm/drm_fourcc.h>
+
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
@@ -368,8 +370,8 @@ static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = {
.fifo_size = 5 * SZ_1K,
};
-static const struct mtk_disp_rdma_data mt8192_rdma_driver_data = {
- .fifo_size = 5 * SZ_1K,
+static const struct mtk_disp_rdma_data mt8195_rdma_driver_data = {
+ .fifo_size = 1920,
};
static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
@@ -379,8 +381,8 @@ static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
.data = &mt8173_rdma_driver_data},
{ .compatible = "mediatek,mt8183-disp-rdma",
.data = &mt8183_rdma_driver_data},
- { .compatible = "mediatek,mt8192-disp-rdma",
- .data = &mt8192_rdma_driver_data},
+ { .compatible = "mediatek,mt8195-disp-rdma",
+ .data = &mt8195_rdma_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index e61cd67b978f..630a4e301ef6 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -8,6 +8,7 @@
#include <linux/component.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -22,6 +23,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
@@ -55,12 +57,7 @@ enum mtk_dpi_out_channel_swap {
enum mtk_dpi_out_color_format {
MTK_DPI_COLOR_FORMAT_RGB,
- MTK_DPI_COLOR_FORMAT_RGB_FULL,
- MTK_DPI_COLOR_FORMAT_YCBCR_444,
- MTK_DPI_COLOR_FORMAT_YCBCR_422,
- MTK_DPI_COLOR_FORMAT_XV_YCC,
- MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL,
- MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL
+ MTK_DPI_COLOR_FORMAT_YCBCR_422
};
struct mtk_dpi {
@@ -118,6 +115,27 @@ struct mtk_dpi_yc_limit {
u16 c_bottom;
};
+/**
+ * struct mtk_dpi_conf - Configuration of mediatek dpi.
+ * @cal_factor: Callback function to calculate factor value.
+ * @reg_h_fre_con: Register address of frequency control.
+ * @max_clock_khz: Max clock frequency supported for this SoCs in khz units.
+ * @edge_sel_en: Enable of edge selection.
+ * @output_fmts: Array of supported output formats.
+ * @num_output_fmts: Quantity of supported output formats.
+ * @is_ck_de_pol: Support CK/DE polarity.
+ * @swap_input_support: Support input swap function.
+ * @support_direct_pin: IP supports direct connection to dpi panels.
+ * @input_2pixel: Input pixel of dp_intf is 2 pixel per round, so enable this
+ * config to enable this feature.
+ * @dimension_mask: Mask used for HWIDTH, HPORCH, VSYNC_WIDTH and VSYNC_PORCH
+ * (no shift).
+ * @hvsize_mask: Mask of HSIZE and VSIZE mask (no shift).
+ * @channel_swap_shift: Shift value of channel swap.
+ * @yuv422_en_bit: Enable bit of yuv422.
+ * @csc_enable_bit: Enable bit of CSC.
+ * @pixels_per_iter: Quantity of transferred pixels per iteration.
+ */
struct mtk_dpi_conf {
unsigned int (*cal_factor)(int clock);
u32 reg_h_fre_con;
@@ -125,6 +143,16 @@ struct mtk_dpi_conf {
bool edge_sel_en;
const u32 *output_fmts;
u32 num_output_fmts;
+ bool is_ck_de_pol;
+ bool swap_input_support;
+ bool support_direct_pin;
+ bool input_2pixel;
+ u32 dimension_mask;
+ u32 hvsize_mask;
+ u32 channel_swap_shift;
+ u32 yuv422_en_bit;
+ u32 csc_enable_bit;
+ u32 pixels_per_iter;
};
static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
@@ -153,12 +181,12 @@ static void mtk_dpi_disable(struct mtk_dpi *dpi)
static void mtk_dpi_config_hsync(struct mtk_dpi *dpi,
struct mtk_dpi_sync_param *sync)
{
- mtk_dpi_mask(dpi, DPI_TGEN_HWIDTH,
- sync->sync_width << HPW, HPW_MASK);
- mtk_dpi_mask(dpi, DPI_TGEN_HPORCH,
- sync->back_porch << HBP, HBP_MASK);
+ mtk_dpi_mask(dpi, DPI_TGEN_HWIDTH, sync->sync_width << HPW,
+ dpi->conf->dimension_mask << HPW);
+ mtk_dpi_mask(dpi, DPI_TGEN_HPORCH, sync->back_porch << HBP,
+ dpi->conf->dimension_mask << HBP);
mtk_dpi_mask(dpi, DPI_TGEN_HPORCH, sync->front_porch << HFP,
- HFP_MASK);
+ dpi->conf->dimension_mask << HFP);
}
static void mtk_dpi_config_vsync(struct mtk_dpi *dpi,
@@ -166,17 +194,17 @@ static void mtk_dpi_config_vsync(struct mtk_dpi *dpi,
u32 width_addr, u32 porch_addr)
{
mtk_dpi_mask(dpi, width_addr,
- sync->sync_width << VSYNC_WIDTH_SHIFT,
- VSYNC_WIDTH_MASK);
- mtk_dpi_mask(dpi, width_addr,
sync->shift_half_line << VSYNC_HALF_LINE_SHIFT,
VSYNC_HALF_LINE_MASK);
+ mtk_dpi_mask(dpi, width_addr,
+ sync->sync_width << VSYNC_WIDTH_SHIFT,
+ dpi->conf->dimension_mask << VSYNC_WIDTH_SHIFT);
mtk_dpi_mask(dpi, porch_addr,
sync->back_porch << VSYNC_BACK_PORCH_SHIFT,
- VSYNC_BACK_PORCH_MASK);
+ dpi->conf->dimension_mask << VSYNC_BACK_PORCH_SHIFT);
mtk_dpi_mask(dpi, porch_addr,
sync->front_porch << VSYNC_FRONT_PORCH_SHIFT,
- VSYNC_FRONT_PORCH_MASK);
+ dpi->conf->dimension_mask << VSYNC_FRONT_PORCH_SHIFT);
}
static void mtk_dpi_config_vsync_lodd(struct mtk_dpi *dpi,
@@ -210,13 +238,20 @@ static void mtk_dpi_config_pol(struct mtk_dpi *dpi,
struct mtk_dpi_polarities *dpi_pol)
{
unsigned int pol;
+ unsigned int mask;
- pol = (dpi_pol->ck_pol == MTK_DPI_POLARITY_RISING ? 0 : CK_POL) |
- (dpi_pol->de_pol == MTK_DPI_POLARITY_RISING ? 0 : DE_POL) |
- (dpi_pol->hsync_pol == MTK_DPI_POLARITY_RISING ? 0 : HSYNC_POL) |
+ mask = HSYNC_POL | VSYNC_POL;
+ pol = (dpi_pol->hsync_pol == MTK_DPI_POLARITY_RISING ? 0 : HSYNC_POL) |
(dpi_pol->vsync_pol == MTK_DPI_POLARITY_RISING ? 0 : VSYNC_POL);
- mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, pol,
- CK_POL | DE_POL | HSYNC_POL | VSYNC_POL);
+ if (dpi->conf->is_ck_de_pol) {
+ mask |= CK_POL | DE_POL;
+ pol |= (dpi_pol->ck_pol == MTK_DPI_POLARITY_RISING ?
+ 0 : CK_POL) |
+ (dpi_pol->de_pol == MTK_DPI_POLARITY_RISING ?
+ 0 : DE_POL);
+ }
+
+ mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, pol, mask);
}
static void mtk_dpi_config_3d(struct mtk_dpi *dpi, bool en_3d)
@@ -231,20 +266,36 @@ static void mtk_dpi_config_interface(struct mtk_dpi *dpi, bool inter)
static void mtk_dpi_config_fb_size(struct mtk_dpi *dpi, u32 width, u32 height)
{
- mtk_dpi_mask(dpi, DPI_SIZE, width << HSIZE, HSIZE_MASK);
- mtk_dpi_mask(dpi, DPI_SIZE, height << VSIZE, VSIZE_MASK);
+ mtk_dpi_mask(dpi, DPI_SIZE, width << HSIZE,
+ dpi->conf->hvsize_mask << HSIZE);
+ mtk_dpi_mask(dpi, DPI_SIZE, height << VSIZE,
+ dpi->conf->hvsize_mask << VSIZE);
}
-static void mtk_dpi_config_channel_limit(struct mtk_dpi *dpi,
- struct mtk_dpi_yc_limit *limit)
+static void mtk_dpi_config_channel_limit(struct mtk_dpi *dpi)
{
- mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit->y_bottom << Y_LIMINT_BOT,
+ struct mtk_dpi_yc_limit limit;
+
+ if (drm_default_rgb_quant_range(&dpi->mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED) {
+ limit.y_bottom = 0x10;
+ limit.y_top = 0xfe0;
+ limit.c_bottom = 0x10;
+ limit.c_top = 0xfe0;
+ } else {
+ limit.y_bottom = 0;
+ limit.y_top = 0xfff;
+ limit.c_bottom = 0;
+ limit.c_top = 0xfff;
+ }
+
+ mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit.y_bottom << Y_LIMINT_BOT,
Y_LIMINT_BOT_MASK);
- mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit->y_top << Y_LIMINT_TOP,
+ mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit.y_top << Y_LIMINT_TOP,
Y_LIMINT_TOP_MASK);
- mtk_dpi_mask(dpi, DPI_C_LIMIT, limit->c_bottom << C_LIMIT_BOT,
+ mtk_dpi_mask(dpi, DPI_C_LIMIT, limit.c_bottom << C_LIMIT_BOT,
C_LIMIT_BOT_MASK);
- mtk_dpi_mask(dpi, DPI_C_LIMIT, limit->c_top << C_LIMIT_TOP,
+ mtk_dpi_mask(dpi, DPI_C_LIMIT, limit.c_top << C_LIMIT_TOP,
C_LIMIT_TOP_MASK);
}
@@ -332,17 +383,21 @@ static void mtk_dpi_config_channel_swap(struct mtk_dpi *dpi,
break;
}
- mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << CH_SWAP, CH_SWAP_MASK);
+ mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING,
+ val << dpi->conf->channel_swap_shift,
+ CH_SWAP_MASK << dpi->conf->channel_swap_shift);
}
static void mtk_dpi_config_yuv422_enable(struct mtk_dpi *dpi, bool enable)
{
- mtk_dpi_mask(dpi, DPI_CON, enable ? YUV422_EN : 0, YUV422_EN);
+ mtk_dpi_mask(dpi, DPI_CON, enable ? dpi->conf->yuv422_en_bit : 0,
+ dpi->conf->yuv422_en_bit);
}
static void mtk_dpi_config_csc_enable(struct mtk_dpi *dpi, bool enable)
{
- mtk_dpi_mask(dpi, DPI_CON, enable ? CSC_ENABLE : 0, CSC_ENABLE);
+ mtk_dpi_mask(dpi, DPI_CON, enable ? dpi->conf->csc_enable_bit : 0,
+ dpi->conf->csc_enable_bit);
}
static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
@@ -364,23 +419,24 @@ static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
enum mtk_dpi_out_color_format format)
{
- if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_444) ||
- (format == MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL)) {
- mtk_dpi_config_yuv422_enable(dpi, false);
- mtk_dpi_config_csc_enable(dpi, true);
- mtk_dpi_config_swap_input(dpi, false);
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_BGR);
- } else if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_422) ||
- (format == MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL)) {
+ mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+
+ if (format == MTK_DPI_COLOR_FORMAT_YCBCR_422) {
mtk_dpi_config_yuv422_enable(dpi, true);
mtk_dpi_config_csc_enable(dpi, true);
- mtk_dpi_config_swap_input(dpi, true);
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+
+ /*
+ * If height is smaller than 720, we need to use RGB_TO_BT601
+ * to transfer to yuv422. Otherwise, we use RGB_TO_JPEG.
+ */
+ mtk_dpi_mask(dpi, DPI_MATRIX_SET, dpi->mode.hdisplay <= 720 ?
+ MATRIX_SEL_RGB_TO_BT601 : MATRIX_SEL_RGB_TO_JPEG,
+ INT_MATRIX_SEL_MASK);
} else {
mtk_dpi_config_yuv422_enable(dpi, false);
mtk_dpi_config_csc_enable(dpi, false);
- mtk_dpi_config_swap_input(dpi, false);
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+ if (dpi->conf->swap_input_support)
+ mtk_dpi_config_swap_input(dpi, false);
}
}
@@ -436,7 +492,6 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
if (dpi->pinctrl && dpi->pins_dpi)
pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
- mtk_dpi_enable(dpi);
return 0;
err_pixel:
@@ -449,7 +504,6 @@ err_refcount:
static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
struct drm_display_mode *mode)
{
- struct mtk_dpi_yc_limit limit;
struct mtk_dpi_polarities dpi_pol;
struct mtk_dpi_sync_param hsync;
struct mtk_dpi_sync_param vsync_lodd = { 0 };
@@ -471,7 +525,14 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
clk_set_rate(dpi->tvd_clk, pll_rate);
pll_rate = clk_get_rate(dpi->tvd_clk);
+ /*
+ * Depending on the IP version, we may output a different amount of
+ * pixels for each iteration: divide the clock by this number and
+ * adjust the display porches accordingly.
+ */
vm.pixelclock = pll_rate / factor;
+ vm.pixelclock /= dpi->conf->pixels_per_iter;
+
if ((dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE) ||
(dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_BE))
clk_set_rate(dpi->pixel_clk, vm.pixelclock * 2);
@@ -484,20 +545,22 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
dev_dbg(dpi->dev, "Got PLL %lu Hz, pixel clock %lu Hz\n",
pll_rate, vm.pixelclock);
- limit.c_bottom = 0x0010;
- limit.c_top = 0x0FE0;
- limit.y_bottom = 0x0010;
- limit.y_top = 0x0FE0;
-
dpi_pol.ck_pol = MTK_DPI_POLARITY_FALLING;
dpi_pol.de_pol = MTK_DPI_POLARITY_RISING;
dpi_pol.hsync_pol = vm.flags & DISPLAY_FLAGS_HSYNC_HIGH ?
MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING;
dpi_pol.vsync_pol = vm.flags & DISPLAY_FLAGS_VSYNC_HIGH ?
MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING;
- hsync.sync_width = vm.hsync_len;
- hsync.back_porch = vm.hback_porch;
- hsync.front_porch = vm.hfront_porch;
+
+ /*
+ * Depending on the IP version, we may output a different amount of
+ * pixels for each iteration: divide the clock by this number and
+ * adjust the display porches accordingly.
+ */
+ hsync.sync_width = vm.hsync_len / dpi->conf->pixels_per_iter;
+ hsync.back_porch = vm.hback_porch / dpi->conf->pixels_per_iter;
+ hsync.front_porch = vm.hfront_porch / dpi->conf->pixels_per_iter;
+
hsync.shift_half_line = false;
vsync_lodd.sync_width = vm.vsync_len;
vsync_lodd.back_porch = vm.vback_porch;
@@ -536,14 +599,20 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
else
mtk_dpi_config_fb_size(dpi, vm.hactive, vm.vactive);
- mtk_dpi_config_channel_limit(dpi, &limit);
+ mtk_dpi_config_channel_limit(dpi);
mtk_dpi_config_bit_num(dpi, dpi->bit_num);
mtk_dpi_config_channel_swap(dpi, dpi->channel_swap);
- mtk_dpi_config_yc_map(dpi, dpi->yc_map);
mtk_dpi_config_color_format(dpi, dpi->color_format);
- mtk_dpi_config_2n_h_fre(dpi);
- mtk_dpi_dual_edge(dpi);
- mtk_dpi_config_disable_edge(dpi);
+ if (dpi->conf->support_direct_pin) {
+ mtk_dpi_config_yc_map(dpi, dpi->yc_map);
+ mtk_dpi_config_2n_h_fre(dpi);
+ mtk_dpi_dual_edge(dpi);
+ mtk_dpi_config_disable_edge(dpi);
+ }
+ if (dpi->conf->input_2pixel) {
+ mtk_dpi_mask(dpi, DPI_CON, DPINTF_INPUT_2P_EN,
+ DPINTF_INPUT_2P_EN);
+ }
mtk_dpi_sw_reset(dpi, false);
return 0;
@@ -622,7 +691,10 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
- dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
+ if (out_bus_format == MEDIA_BUS_FMT_YUYV8_1X16)
+ dpi->color_format = MTK_DPI_COLOR_FORMAT_YCBCR_422;
+ else
+ dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
return 0;
}
@@ -658,6 +730,7 @@ static void mtk_dpi_bridge_enable(struct drm_bridge *bridge)
mtk_dpi_power_on(dpi);
mtk_dpi_set_display_mode(dpi, &dpi->mode);
+ mtk_dpi_enable(dpi);
}
static enum drm_mode_status
@@ -781,6 +854,16 @@ static unsigned int mt8183_calculate_factor(int clock)
return 2;
}
+static unsigned int mt8195_dpintf_calculate_factor(int clock)
+{
+ if (clock < 70000)
+ return 4;
+ else if (clock < 200000)
+ return 2;
+ else
+ return 1;
+}
+
static const u32 mt8173_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_1X24,
};
@@ -790,12 +873,26 @@ static const u32 mt8183_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_2X12_BE,
};
+static const u32 mt8195_output_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+};
+
static const struct mtk_dpi_conf mt8173_conf = {
.cal_factor = mt8173_calculate_factor,
.reg_h_fre_con = 0xe0,
.max_clock_khz = 300000,
.output_fmts = mt8173_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8173_output_fmts),
+ .pixels_per_iter = 1,
+ .is_ck_de_pol = true,
+ .swap_input_support = true,
+ .support_direct_pin = true,
+ .dimension_mask = HPW_MASK,
+ .hvsize_mask = HSIZE_MASK,
+ .channel_swap_shift = CH_SWAP,
+ .yuv422_en_bit = YUV422_EN,
+ .csc_enable_bit = CSC_ENABLE,
};
static const struct mtk_dpi_conf mt2701_conf = {
@@ -805,6 +902,15 @@ static const struct mtk_dpi_conf mt2701_conf = {
.max_clock_khz = 150000,
.output_fmts = mt8173_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8173_output_fmts),
+ .pixels_per_iter = 1,
+ .is_ck_de_pol = true,
+ .swap_input_support = true,
+ .support_direct_pin = true,
+ .dimension_mask = HPW_MASK,
+ .hvsize_mask = HSIZE_MASK,
+ .channel_swap_shift = CH_SWAP,
+ .yuv422_en_bit = YUV422_EN,
+ .csc_enable_bit = CSC_ENABLE,
};
static const struct mtk_dpi_conf mt8183_conf = {
@@ -813,6 +919,15 @@ static const struct mtk_dpi_conf mt8183_conf = {
.max_clock_khz = 100000,
.output_fmts = mt8183_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
+ .pixels_per_iter = 1,
+ .is_ck_de_pol = true,
+ .swap_input_support = true,
+ .support_direct_pin = true,
+ .dimension_mask = HPW_MASK,
+ .hvsize_mask = HSIZE_MASK,
+ .channel_swap_shift = CH_SWAP,
+ .yuv422_en_bit = YUV422_EN,
+ .csc_enable_bit = CSC_ENABLE,
};
static const struct mtk_dpi_conf mt8192_conf = {
@@ -821,6 +936,29 @@ static const struct mtk_dpi_conf mt8192_conf = {
.max_clock_khz = 150000,
.output_fmts = mt8183_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
+ .pixels_per_iter = 1,
+ .is_ck_de_pol = true,
+ .swap_input_support = true,
+ .support_direct_pin = true,
+ .dimension_mask = HPW_MASK,
+ .hvsize_mask = HSIZE_MASK,
+ .channel_swap_shift = CH_SWAP,
+ .yuv422_en_bit = YUV422_EN,
+ .csc_enable_bit = CSC_ENABLE,
+};
+
+static const struct mtk_dpi_conf mt8195_dpintf_conf = {
+ .cal_factor = mt8195_dpintf_calculate_factor,
+ .max_clock_khz = 600000,
+ .output_fmts = mt8195_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
+ .pixels_per_iter = 4,
+ .input_2pixel = true,
+ .dimension_mask = DPINTF_HPW_MASK,
+ .hvsize_mask = DPINTF_HSIZE_MASK,
+ .channel_swap_shift = DPINTF_CH_SWAP,
+ .yuv422_en_bit = DPINTF_YUV422_EN,
+ .csc_enable_bit = DPINTF_CSC_ENABLE,
};
static int mtk_dpi_probe(struct platform_device *pdev)
@@ -945,6 +1083,9 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
{ .compatible = "mediatek,mt8192-dpi",
.data = &mt8192_conf,
},
+ { .compatible = "mediatek,mt8195-dp-intf",
+ .data = &mt8195_dpintf_conf,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, mtk_dpi_of_ids);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
index 3a02fabe1662..62bd4931b344 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
+++ b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
@@ -40,9 +40,13 @@
#define FAKE_DE_LEVEN BIT(21)
#define FAKE_DE_RODD BIT(22)
#define FAKE_DE_REVEN BIT(23)
+#define DPINTF_YUV422_EN BIT(24)
+#define DPINTF_CSC_ENABLE BIT(26)
+#define DPINTF_INPUT_2P_EN BIT(29)
#define DPI_OUTPUT_SETTING 0x14
#define CH_SWAP 0
+#define DPINTF_CH_SWAP 1
#define CH_SWAP_MASK (0x7 << 0)
#define SWAP_RGB 0x00
#define SWAP_GBR 0x01
@@ -80,8 +84,10 @@
#define DPI_SIZE 0x18
#define HSIZE 0
#define HSIZE_MASK (0x1FFF << 0)
+#define DPINTF_HSIZE_MASK (0xFFFF << 0)
#define VSIZE 16
#define VSIZE_MASK (0x1FFF << 16)
+#define DPINTF_VSIZE_MASK (0xFFFF << 16)
#define DPI_DDR_SETTING 0x1C
#define DDR_EN BIT(0)
@@ -93,24 +99,30 @@
#define DPI_TGEN_HWIDTH 0x20
#define HPW 0
#define HPW_MASK (0xFFF << 0)
+#define DPINTF_HPW_MASK (0xFFFF << 0)
#define DPI_TGEN_HPORCH 0x24
#define HBP 0
#define HBP_MASK (0xFFF << 0)
+#define DPINTF_HBP_MASK (0xFFFF << 0)
#define HFP 16
#define HFP_MASK (0xFFF << 16)
+#define DPINTF_HFP_MASK (0xFFFF << 16)
#define DPI_TGEN_VWIDTH 0x28
#define DPI_TGEN_VPORCH 0x2C
#define VSYNC_WIDTH_SHIFT 0
#define VSYNC_WIDTH_MASK (0xFFF << 0)
+#define DPINTF_VSYNC_WIDTH_MASK (0xFFFF << 0)
#define VSYNC_HALF_LINE_SHIFT 16
#define VSYNC_HALF_LINE_MASK BIT(16)
#define VSYNC_BACK_PORCH_SHIFT 0
#define VSYNC_BACK_PORCH_MASK (0xFFF << 0)
+#define DPINTF_VSYNC_BACK_PORCH_MASK (0xFFFF << 0)
#define VSYNC_FRONT_PORCH_SHIFT 16
#define VSYNC_FRONT_PORCH_MASK (0xFFF << 16)
+#define DPINTF_VSYNC_FRONT_PORCH_MASK (0xFFFF << 16)
#define DPI_BG_HCNTL 0x30
#define BG_RIGHT (0x1FFF << 0)
@@ -217,4 +229,10 @@
#define EDGE_SEL_EN BIT(5)
#define H_FRE_2N BIT(25)
+
+#define DPI_MATRIX_SET 0xB4
+#define INT_MATRIX_SEL_MASK GENMASK(4, 0)
+#define MATRIX_SEL_RGB_TO_JPEG 0
+#define MATRIX_SEL_RGB_TO_BT601 2
+
#endif /* __MTK_DPI_REGS_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 5d7504a72b11..2d72cc5ddaba 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -40,6 +40,12 @@
#define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12)
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
+#define DISP_REG_DSC_CON 0x0000
+#define DSC_EN BIT(0)
+#define DSC_DUAL_INOUT BIT(2)
+#define DSC_BYPASS BIT(4)
+#define DSC_UFOE_SEL BIT(16)
+
#define DISP_REG_OD_EN 0x0000
#define DISP_REG_OD_CFG 0x0020
#define OD_RELAYMODE BIT(0)
@@ -181,6 +187,36 @@ static void mtk_dither_set(struct device *dev, unsigned int bpc,
DISP_DITHERING, cmdq_pkt);
}
+static void mtk_dsc_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ /* dsc bypass mode */
+ mtk_ddp_write_mask(cmdq_pkt, DSC_BYPASS, &priv->cmdq_reg, priv->regs,
+ DISP_REG_DSC_CON, DSC_BYPASS);
+ mtk_ddp_write_mask(cmdq_pkt, DSC_UFOE_SEL, &priv->cmdq_reg, priv->regs,
+ DISP_REG_DSC_CON, DSC_UFOE_SEL);
+ mtk_ddp_write_mask(cmdq_pkt, DSC_DUAL_INOUT, &priv->cmdq_reg, priv->regs,
+ DISP_REG_DSC_CON, DSC_DUAL_INOUT);
+}
+
+static void mtk_dsc_start(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ /* write with mask to reserve the value set in mtk_dsc_config */
+ mtk_ddp_write_mask(NULL, DSC_EN, &priv->cmdq_reg, priv->regs, DISP_REG_DSC_CON, DSC_EN);
+}
+
+static void mtk_dsc_stop(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ writel_relaxed(0x0, priv->regs + DISP_REG_DSC_CON);
+}
+
static void mtk_od_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
@@ -270,6 +306,14 @@ static const struct mtk_ddp_comp_funcs ddp_dpi = {
.stop = mtk_dpi_stop,
};
+static const struct mtk_ddp_comp_funcs ddp_dsc = {
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
+ .config = mtk_dsc_config,
+ .start = mtk_dsc_start,
+ .stop = mtk_dsc_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_dsi = {
.start = mtk_dsi_ddp_start,
.stop = mtk_dsi_ddp_stop,
@@ -284,6 +328,14 @@ static const struct mtk_ddp_comp_funcs ddp_gamma = {
.stop = mtk_gamma_stop,
};
+static const struct mtk_ddp_comp_funcs ddp_merge = {
+ .clk_enable = mtk_merge_clk_enable,
+ .clk_disable = mtk_merge_clk_disable,
+ .start = mtk_merge_start,
+ .stop = mtk_merge_stop,
+ .config = mtk_merge_config,
+};
+
static const struct mtk_ddp_comp_funcs ddp_od = {
.clk_enable = mtk_ddp_clk_enable,
.clk_disable = mtk_ddp_clk_disable,
@@ -343,7 +395,9 @@ static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
[MTK_DISP_CCORR] = "ccorr",
[MTK_DISP_COLOR] = "color",
[MTK_DISP_DITHER] = "dither",
+ [MTK_DISP_DSC] = "dsc",
[MTK_DISP_GAMMA] = "gamma",
+ [MTK_DISP_MERGE] = "merge",
[MTK_DISP_MUTEX] = "mutex",
[MTK_DISP_OD] = "od",
[MTK_DISP_OVL] = "ovl",
@@ -353,6 +407,7 @@ static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
[MTK_DISP_RDMA] = "rdma",
[MTK_DISP_UFOE] = "ufoe",
[MTK_DISP_WDMA] = "wdma",
+ [MTK_DP_INTF] = "dp-intf",
[MTK_DPI] = "dpi",
[MTK_DSI] = "dsi",
};
@@ -370,14 +425,24 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
- [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
+ [DDP_COMPONENT_DITHER0] = { MTK_DISP_DITHER, 0, &ddp_dither },
+ [DDP_COMPONENT_DP_INTF0] = { MTK_DP_INTF, 0, &ddp_dpi },
+ [DDP_COMPONENT_DP_INTF1] = { MTK_DP_INTF, 1, &ddp_dpi },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi },
[DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi },
+ [DDP_COMPONENT_DSC0] = { MTK_DISP_DSC, 0, &ddp_dsc },
+ [DDP_COMPONENT_DSC1] = { MTK_DISP_DSC, 1, &ddp_dsc },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi },
[DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi },
[DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi },
[DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi },
[DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
+ [DDP_COMPONENT_MERGE0] = { MTK_DISP_MERGE, 0, &ddp_merge },
+ [DDP_COMPONENT_MERGE1] = { MTK_DISP_MERGE, 1, &ddp_merge },
+ [DDP_COMPONENT_MERGE2] = { MTK_DISP_MERGE, 2, &ddp_merge },
+ [DDP_COMPONENT_MERGE3] = { MTK_DISP_MERGE, 3, &ddp_merge },
+ [DDP_COMPONENT_MERGE4] = { MTK_DISP_MERGE, 4, &ddp_merge },
+ [DDP_COMPONENT_MERGE5] = { MTK_DISP_MERGE, 5, &ddp_merge },
[DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
[DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl },
@@ -480,11 +545,13 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
type == MTK_DISP_CCORR ||
type == MTK_DISP_COLOR ||
type == MTK_DISP_GAMMA ||
+ type == MTK_DISP_MERGE ||
type == MTK_DISP_OVL ||
type == MTK_DISP_OVL_2L ||
type == MTK_DISP_PWM ||
type == MTK_DISP_RDMA ||
type == MTK_DPI ||
+ type == MTK_DP_INTF ||
type == MTK_DSI)
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 1cbc6332282d..2d0052c23dcb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -23,7 +23,9 @@ enum mtk_ddp_comp_type {
MTK_DISP_CCORR,
MTK_DISP_COLOR,
MTK_DISP_DITHER,
+ MTK_DISP_DSC,
MTK_DISP_GAMMA,
+ MTK_DISP_MERGE,
MTK_DISP_MUTEX,
MTK_DISP_OD,
MTK_DISP_OVL,
@@ -34,6 +36,7 @@ enum mtk_ddp_comp_type {
MTK_DISP_UFOE,
MTK_DISP_WDMA,
MTK_DPI,
+ MTK_DP_INTF,
MTK_DSI,
MTK_DDP_COMP_TYPE_MAX,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 6abe6bcacbdc..0e4c77724b05 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -4,6 +4,8 @@
* Author: YT SHEN <yt.shen@mediatek.com>
*/
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/component.h>
#include <linux/iommu.h>
#include <linux/module.h>
@@ -116,7 +118,7 @@ static enum mtk_ddp_comp_id mt8167_mtk_ddp_main[] = {
DDP_COMPONENT_CCORR,
DDP_COMPONENT_AAL0,
DDP_COMPONENT_GAMMA,
- DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DITHER0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_DSI0,
};
@@ -148,7 +150,7 @@ static const enum mtk_ddp_comp_id mt8183_mtk_ddp_main[] = {
DDP_COMPONENT_CCORR,
DDP_COMPONENT_AAL0,
DDP_COMPONENT_GAMMA,
- DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DITHER0,
DDP_COMPONENT_DSI0,
};
@@ -166,7 +168,7 @@ static const enum mtk_ddp_comp_id mt8186_mtk_ddp_main[] = {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_POSTMASK0,
- DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DITHER0,
DDP_COMPONENT_DSI0,
};
@@ -185,7 +187,7 @@ static const enum mtk_ddp_comp_id mt8192_mtk_ddp_main[] = {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_POSTMASK0,
- DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DITHER0,
DDP_COMPONENT_DSI0,
};
@@ -195,6 +197,19 @@ static const enum mtk_ddp_comp_id mt8192_mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const enum mtk_ddp_comp_id mt8195_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_CCORR,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_DITHER0,
+ DDP_COMPONENT_DSC0,
+ DDP_COMPONENT_MERGE0,
+ DDP_COMPONENT_DP_INTF0,
+};
+
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.main_path = mt2701_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
@@ -203,6 +218,13 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.shadow_register = true,
};
+static const struct mtk_mmsys_match_data mt2701_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt2701_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt7623_mmsys_driver_data = {
.main_path = mt7623_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt7623_mtk_ddp_main),
@@ -211,6 +233,13 @@ static const struct mtk_mmsys_driver_data mt7623_mmsys_driver_data = {
.shadow_register = true,
};
+static const struct mtk_mmsys_match_data mt7623_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt7623_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
.main_path = mt2712_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt2712_mtk_ddp_main),
@@ -220,11 +249,25 @@ static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
.third_len = ARRAY_SIZE(mt2712_mtk_ddp_third),
};
+static const struct mtk_mmsys_match_data mt2712_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt2712_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt8167_mmsys_driver_data = {
.main_path = mt8167_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8167_mtk_ddp_main),
};
+static const struct mtk_mmsys_match_data mt8167_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8167_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.main_path = mt8173_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
@@ -232,6 +275,13 @@ static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext),
};
+static const struct mtk_mmsys_match_data mt8173_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8173_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.main_path = mt8183_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8183_mtk_ddp_main),
@@ -239,6 +289,13 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext),
};
+static const struct mtk_mmsys_match_data mt8183_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8183_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
.main_path = mt8186_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8186_mtk_ddp_main),
@@ -246,6 +303,13 @@ static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
.ext_len = ARRAY_SIZE(mt8186_mtk_ddp_ext),
};
+static const struct mtk_mmsys_match_data mt8186_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8186_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
.main_path = mt8192_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8192_mtk_ddp_main),
@@ -253,6 +317,31 @@ static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
.ext_len = ARRAY_SIZE(mt8192_mtk_ddp_ext),
};
+static const struct mtk_mmsys_match_data mt8192_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8192_mmsys_driver_data,
+ },
+};
+
+static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = {
+ .io_start = 0x1c01a000,
+ .main_path = mt8195_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8195_mtk_ddp_main),
+};
+
+static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = {
+ .io_start = 0x1c100000,
+};
+
+static const struct mtk_mmsys_match_data mt8195_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8195_vdosys0_driver_data,
+ &mt8195_vdosys1_driver_data,
+ },
+};
+
static int mtk_drm_kms_init(struct drm_device *drm)
{
struct mtk_drm_private *private = drm->dev_private;
@@ -470,12 +559,16 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_DITHER },
{ .compatible = "mediatek,mt8183-disp-dither",
.data = (void *)MTK_DISP_DITHER },
+ { .compatible = "mediatek,mt8195-disp-dsc",
+ .data = (void *)MTK_DISP_DSC },
{ .compatible = "mediatek,mt8167-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8173-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8183-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8195-disp-merge",
+ .data = (void *)MTK_DISP_MERGE },
{ .compatible = "mediatek,mt2701-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt2712-disp-mutex",
@@ -490,6 +583,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8192-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8195-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-od",
.data = (void *)MTK_DISP_OD },
{ .compatible = "mediatek,mt2701-disp-ovl",
@@ -522,7 +617,7 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8183-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8192-disp-rdma",
+ { .compatible = "mediatek,mt8195-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-ufoe",
.data = (void *)MTK_DISP_UFOE },
@@ -538,41 +633,68 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8192-dpi",
.data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt8195-dp-intf",
+ .data = (void *)MTK_DP_INTF },
{ .compatible = "mediatek,mt2701-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8183-dsi",
.data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8186-dsi",
+ .data = (void *)MTK_DSI },
{ }
};
static const struct of_device_id mtk_drm_of_ids[] = {
{ .compatible = "mediatek,mt2701-mmsys",
- .data = &mt2701_mmsys_driver_data},
+ .data = &mt2701_mmsys_match_data},
{ .compatible = "mediatek,mt7623-mmsys",
- .data = &mt7623_mmsys_driver_data},
+ .data = &mt7623_mmsys_match_data},
{ .compatible = "mediatek,mt2712-mmsys",
- .data = &mt2712_mmsys_driver_data},
+ .data = &mt2712_mmsys_match_data},
{ .compatible = "mediatek,mt8167-mmsys",
- .data = &mt8167_mmsys_driver_data},
+ .data = &mt8167_mmsys_match_data},
{ .compatible = "mediatek,mt8173-mmsys",
- .data = &mt8173_mmsys_driver_data},
+ .data = &mt8173_mmsys_match_data},
{ .compatible = "mediatek,mt8183-mmsys",
- .data = &mt8183_mmsys_driver_data},
+ .data = &mt8183_mmsys_match_data},
{ .compatible = "mediatek,mt8186-mmsys",
- .data = &mt8186_mmsys_driver_data},
+ .data = &mt8186_mmsys_match_data},
{ .compatible = "mediatek,mt8192-mmsys",
- .data = &mt8192_mmsys_driver_data},
+ .data = &mt8192_mmsys_match_data},
+ { .compatible = "mediatek,mt8195-mmsys",
+ .data = &mt8195_mmsys_match_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_drm_of_ids);
+static int mtk_drm_find_match_data(struct device *dev,
+ const struct mtk_mmsys_match_data *match_data)
+{
+ int i;
+ struct platform_device *pdev = of_find_device_by_node(dev->parent->of_node);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get parent resource\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < match_data->num_drv_data; i++)
+ if (match_data->drv_data[i]->io_start == res->start)
+ return i;
+
+ return -EINVAL;
+}
+
static int mtk_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *phandle = dev->parent->of_node;
const struct of_device_id *of_id;
+ const struct mtk_mmsys_match_data *match_data;
struct mtk_drm_private *private;
struct device_node *node;
struct component_match *match = NULL;
@@ -593,7 +715,19 @@ static int mtk_drm_probe(struct platform_device *pdev)
if (!of_id)
return -ENODEV;
- private->data = of_id->data;
+ match_data = of_id->data;
+ if (match_data->num_drv_data > 1) {
+ /* This SoC has multiple mmsys channels */
+ ret = mtk_drm_find_match_data(dev, match_data);
+ if (ret < 0) {
+ dev_err(dev, "Couldn't get match driver data\n");
+ return ret;
+ }
+ private->data = match_data->drv_data[ret];
+ } else {
+ dev_dbg(dev, "Using single mmsys channel\n");
+ private->data = match_data->drv_data[0];
+ }
/* Iterate over sibling DISP function blocks */
for_each_child_of_node(phandle->parent, node) {
@@ -628,7 +762,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
private->comp_node[comp_id] = of_node_get(node);
/*
- * Currently only the AAL, CCORR, COLOR, GAMMA, OVL, RDMA, DSI, and DPI
+ * Currently only the AAL, CCORR, COLOR, GAMMA, MERGE, OVL, RDMA, DSI, and DPI
* blocks have separate component platform drivers and initialize their own
* DDP component structure. The others are initialized here.
*/
@@ -636,9 +770,11 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp_type == MTK_DISP_CCORR ||
comp_type == MTK_DISP_COLOR ||
comp_type == MTK_DISP_GAMMA ||
+ comp_type == MTK_DISP_MERGE ||
comp_type == MTK_DISP_OVL ||
comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_RDMA ||
+ comp_type == MTK_DP_INTF ||
comp_type == MTK_DPI ||
comp_type == MTK_DSI) {
dev_info(dev, "Adding component match for %pOF\n",
@@ -693,8 +829,7 @@ static int mtk_drm_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int mtk_drm_sys_suspend(struct device *dev)
+static int mtk_drm_sys_prepare(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
@@ -705,20 +840,21 @@ static int mtk_drm_sys_suspend(struct device *dev)
return ret;
}
-static int mtk_drm_sys_resume(struct device *dev)
+static void mtk_drm_sys_complete(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
int ret;
ret = drm_mode_config_helper_resume(drm);
-
- return ret;
+ if (ret)
+ dev_err(dev, "Failed to resume\n");
}
-#endif
-static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
- mtk_drm_sys_resume);
+static const struct dev_pm_ops mtk_drm_pm_ops = {
+ .prepare = mtk_drm_sys_prepare,
+ .complete = mtk_drm_sys_complete,
+};
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
@@ -734,11 +870,13 @@ static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_disp_ccorr_driver,
&mtk_disp_color_driver,
&mtk_disp_gamma_driver,
+ &mtk_disp_merge_driver,
&mtk_disp_ovl_driver,
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
&mtk_drm_platform_driver,
&mtk_dsi_driver,
+ &mtk_mdp_rdma_driver,
};
static int __init mtk_drm_init(void)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 3e7d1e6fbe01..7b37b5cf9629 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -21,6 +21,7 @@ struct drm_property;
struct regmap;
struct mtk_mmsys_driver_data {
+ const resource_size_t io_start;
const enum mtk_ddp_comp_id *main_path;
unsigned int main_len;
const enum mtk_ddp_comp_id *ext_path;
@@ -31,6 +32,11 @@ struct mtk_mmsys_driver_data {
bool shadow_register;
};
+struct mtk_mmsys_match_data {
+ unsigned short num_drv_data;
+ const struct mtk_mmsys_driver_data *drv_data[];
+};
+
struct mtk_drm_private {
struct drm_device *drm;
struct device *dma_dev;
@@ -50,9 +56,11 @@ extern struct platform_driver mtk_disp_aal_driver;
extern struct platform_driver mtk_disp_ccorr_driver;
extern struct platform_driver mtk_disp_color_driver;
extern struct platform_driver mtk_disp_gamma_driver;
+extern struct platform_driver mtk_disp_merge_driver;
extern struct platform_driver mtk_disp_ovl_driver;
extern struct platform_driver mtk_disp_rdma_driver;
extern struct platform_driver mtk_dpi_driver;
extern struct platform_driver mtk_dsi_driver;
+extern struct platform_driver mtk_mdp_rdma_driver;
#endif /* MTK_DRM_DRV_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index e5fae4ec2337..5c0d9ce69931 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -7,7 +7,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
@@ -138,6 +140,7 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
mtk_plane_state->pending.rotation = new_state->rotation;
+ mtk_plane_state->pending.color_encoding = new_state->color_encoding;
}
static void mtk_plane_atomic_async_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
index d454bece9535..2d5ec66e3df1 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -24,6 +24,7 @@ struct mtk_plane_pending_state {
bool dirty;
bool async_dirty;
bool async_config;
+ enum drm_color_encoding color_encoding;
};
struct mtk_plane_state {
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d9f10a33e6fa..9cc406e1eee1 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -203,6 +203,7 @@ struct mtk_dsi {
struct mtk_phy_timing phy_timing;
int refcount;
bool enabled;
+ bool lanes_ready;
u32 irq_data;
wait_queue_head_t irq_wait_queue;
const struct mtk_dsi_driver_data *driver_data;
@@ -661,18 +662,11 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
- mtk_dsi_rxtx_control(dsi);
- usleep_range(30, 100);
- mtk_dsi_reset_dphy(dsi);
mtk_dsi_ps_control_vact(dsi);
mtk_dsi_set_vm_cmd(dsi);
mtk_dsi_config_vdo_timing(dsi);
mtk_dsi_set_interrupt_enable(dsi);
- mtk_dsi_clk_ulp_mode_leave(dsi);
- mtk_dsi_lane0_ulp_mode_leave(dsi);
- mtk_dsi_clk_hs_mode(dsi, 0);
-
return 0;
err_disable_engine_clk:
clk_disable_unprepare(dsi->engine_clk);
@@ -691,19 +685,11 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
- /*
- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
- * after dsi is fully set.
- */
- mtk_dsi_stop(dsi);
-
- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
+ /* set the lane number as 0 to pull down mipi */
+ writel(0, dsi->regs + DSI_TXRX_CTRL);
mtk_dsi_disable(dsi);
@@ -711,21 +697,31 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
clk_disable_unprepare(dsi->digital_clk);
phy_power_off(dsi->phy);
+
+ dsi->lanes_ready = false;
}
-static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
+static void mtk_dsi_lane_ready(struct mtk_dsi *dsi)
{
- int ret;
+ if (!dsi->lanes_ready) {
+ dsi->lanes_ready = true;
+ mtk_dsi_rxtx_control(dsi);
+ usleep_range(30, 100);
+ mtk_dsi_reset_dphy(dsi);
+ mtk_dsi_clk_ulp_mode_leave(dsi);
+ mtk_dsi_lane0_ulp_mode_leave(dsi);
+ mtk_dsi_clk_hs_mode(dsi, 0);
+ msleep(20);
+ /* The reaction time after pulling up the mipi signal for dsi_rx */
+ }
+}
+static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
+{
if (dsi->enabled)
return;
- ret = mtk_dsi_poweron(dsi);
- if (ret < 0) {
- DRM_ERROR("failed to power on dsi\n");
- return;
- }
-
+ mtk_dsi_lane_ready(dsi);
mtk_dsi_set_mode(dsi);
mtk_dsi_clk_hs_mode(dsi, 1);
@@ -739,7 +735,16 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
if (!dsi->enabled)
return;
- mtk_dsi_poweroff(dsi);
+ /*
+ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+ * after dsi is fully set.
+ */
+ mtk_dsi_stop(dsi);
+
+ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
dsi->enabled = false;
}
@@ -763,24 +768,50 @@ static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge,
drm_display_mode_to_videomode(adjusted, &dsi->vm);
}
-static void mtk_dsi_bridge_disable(struct drm_bridge *bridge)
+static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
mtk_output_dsi_disable(dsi);
}
-static void mtk_dsi_bridge_enable(struct drm_bridge *bridge)
+static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
+ if (dsi->refcount == 0)
+ return;
+
mtk_output_dsi_enable(dsi);
}
+static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct mtk_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ ret = mtk_dsi_poweron(dsi);
+ if (ret < 0)
+ DRM_ERROR("failed to power on dsi\n");
+}
+
+static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct mtk_dsi *dsi = bridge_to_dsi(bridge);
+
+ mtk_dsi_poweroff(dsi);
+}
+
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.attach = mtk_dsi_bridge_attach,
- .disable = mtk_dsi_bridge_disable,
- .enable = mtk_dsi_bridge_enable,
+ .atomic_disable = mtk_dsi_bridge_atomic_disable,
+ .atomic_enable = mtk_dsi_bridge_atomic_enable,
+ .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
+ .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
.mode_set = mtk_dsi_bridge_mode_set,
};
@@ -1000,6 +1031,8 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
if (MTK_DSI_HOST_IS_READ(msg->type))
irq_flag |= LPRX_RD_RDY_INT_FLAG;
+ mtk_dsi_lane_ready(dsi);
+
ret = mtk_dsi_host_send_cmd(dsi, msg, irq_flag);
if (ret)
goto restore_dsi_mode;
@@ -1166,6 +1199,12 @@ static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
.has_size_ctl = true,
};
+static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = {
+ .reg_cmdq_off = 0xd00,
+ .has_shadow_ctl = true,
+ .has_size_ctl = true,
+};
+
static const struct of_device_id mtk_dsi_of_match[] = {
{ .compatible = "mediatek,mt2701-dsi",
.data = &mt2701_dsi_driver_data },
@@ -1173,6 +1212,8 @@ static const struct of_device_id mtk_dsi_of_match[] = {
.data = &mt8173_dsi_driver_data },
{ .compatible = "mediatek,mt8183-dsi",
.data = &mt8183_dsi_driver_data },
+ { .compatible = "mediatek,mt8186-dsi",
+ .data = &mt8186_dsi_driver_data },
{ },
};
MODULE_DEVICE_TABLE(of, mtk_dsi_of_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
new file mode 100644
index 000000000000..eecfa98ff52e
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <drm/drm_fourcc.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_disp_drv.h"
+#include "mtk_drm_drv.h"
+#include "mtk_mdp_rdma.h"
+
+#define MDP_RDMA_EN 0x000
+#define FLD_ROT_ENABLE BIT(0)
+#define MDP_RDMA_RESET 0x008
+#define MDP_RDMA_CON 0x020
+#define FLD_OUTPUT_10B BIT(5)
+#define FLD_SIMPLE_MODE BIT(4)
+#define MDP_RDMA_GMCIF_CON 0x028
+#define FLD_COMMAND_DIV BIT(0)
+#define FLD_EXT_PREULTRA_EN BIT(3)
+#define FLD_RD_REQ_TYPE GENMASK(7, 4)
+#define VAL_RD_REQ_TYPE_BURST_8_ACCESS 7
+#define FLD_ULTRA_EN GENMASK(13, 12)
+#define VAL_ULTRA_EN_ENABLE 1
+#define FLD_PRE_ULTRA_EN GENMASK(17, 16)
+#define VAL_PRE_ULTRA_EN_ENABLE 1
+#define FLD_EXT_ULTRA_EN BIT(18)
+#define MDP_RDMA_SRC_CON 0x030
+#define FLD_OUTPUT_ARGB BIT(25)
+#define FLD_BIT_NUMBER GENMASK(19, 18)
+#define FLD_SWAP BIT(14)
+#define FLD_UNIFORM_CONFIG BIT(17)
+#define RDMA_INPUT_10BIT BIT(18)
+#define FLD_SRC_FORMAT GENMASK(3, 0)
+#define MDP_RDMA_COMP_CON 0x038
+#define FLD_AFBC_EN BIT(22)
+#define FLD_AFBC_YUV_TRANSFORM BIT(21)
+#define FLD_UFBDC_EN BIT(12)
+#define MDP_RDMA_MF_BKGD_SIZE_IN_BYTE 0x060
+#define FLD_MF_BKGD_WB GENMASK(22, 0)
+#define MDP_RDMA_MF_SRC_SIZE 0x070
+#define FLD_MF_SRC_H GENMASK(30, 16)
+#define FLD_MF_SRC_W GENMASK(14, 0)
+#define MDP_RDMA_MF_CLIP_SIZE 0x078
+#define FLD_MF_CLIP_H GENMASK(30, 16)
+#define FLD_MF_CLIP_W GENMASK(14, 0)
+#define MDP_RDMA_SRC_OFFSET_0 0x118
+#define FLD_SRC_OFFSET_0 GENMASK(31, 0)
+#define MDP_RDMA_TRANSFORM_0 0x200
+#define FLD_INT_MATRIX_SEL GENMASK(27, 23)
+#define FLD_TRANS_EN BIT(16)
+#define MDP_RDMA_SRC_BASE_0 0xf00
+#define FLD_SRC_BASE_0 GENMASK(31, 0)
+
+#define RDMA_CSC_FULL709_TO_RGB 5
+#define RDMA_CSC_BT601_TO_RGB 6
+
+enum rdma_format {
+ RDMA_INPUT_FORMAT_RGB565 = 0,
+ RDMA_INPUT_FORMAT_RGB888 = 1,
+ RDMA_INPUT_FORMAT_RGBA8888 = 2,
+ RDMA_INPUT_FORMAT_ARGB8888 = 3,
+ RDMA_INPUT_FORMAT_UYVY = 4,
+ RDMA_INPUT_FORMAT_YUY2 = 5,
+ RDMA_INPUT_FORMAT_Y8 = 7,
+ RDMA_INPUT_FORMAT_YV12 = 8,
+ RDMA_INPUT_FORMAT_UYVY_3PL = 9,
+ RDMA_INPUT_FORMAT_NV12 = 12,
+ RDMA_INPUT_FORMAT_UYVY_2PL = 13,
+ RDMA_INPUT_FORMAT_Y410 = 14
+};
+
+struct mtk_mdp_rdma {
+ void __iomem *regs;
+ struct clk *clk;
+ struct cmdq_client_reg cmdq_reg;
+};
+
+static unsigned int rdma_fmt_convert(unsigned int fmt)
+{
+ switch (fmt) {
+ default:
+ case DRM_FORMAT_RGB565:
+ return RDMA_INPUT_FORMAT_RGB565;
+ case DRM_FORMAT_BGR565:
+ return RDMA_INPUT_FORMAT_RGB565 | FLD_SWAP;
+ case DRM_FORMAT_RGB888:
+ return RDMA_INPUT_FORMAT_RGB888;
+ case DRM_FORMAT_BGR888:
+ return RDMA_INPUT_FORMAT_RGB888 | FLD_SWAP;
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_RGBA8888:
+ return RDMA_INPUT_FORMAT_ARGB8888;
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_BGRA8888:
+ return RDMA_INPUT_FORMAT_ARGB8888 | FLD_SWAP;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return RDMA_INPUT_FORMAT_RGBA8888;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return RDMA_INPUT_FORMAT_RGBA8888 | FLD_SWAP;
+ case DRM_FORMAT_ABGR2101010:
+ return RDMA_INPUT_FORMAT_RGBA8888 | FLD_SWAP | RDMA_INPUT_10BIT;
+ case DRM_FORMAT_ARGB2101010:
+ return RDMA_INPUT_FORMAT_RGBA8888 | RDMA_INPUT_10BIT;
+ case DRM_FORMAT_RGBA1010102:
+ return RDMA_INPUT_FORMAT_ARGB8888 | FLD_SWAP | RDMA_INPUT_10BIT;
+ case DRM_FORMAT_BGRA1010102:
+ return RDMA_INPUT_FORMAT_ARGB8888 | RDMA_INPUT_10BIT;
+ case DRM_FORMAT_UYVY:
+ return RDMA_INPUT_FORMAT_UYVY;
+ case DRM_FORMAT_YUYV:
+ return RDMA_INPUT_FORMAT_YUY2;
+ }
+}
+
+static unsigned int rdma_color_convert(unsigned int color_encoding)
+{
+ switch (color_encoding) {
+ default:
+ case DRM_COLOR_YCBCR_BT709:
+ return RDMA_CSC_FULL709_TO_RGB;
+ case DRM_COLOR_YCBCR_BT601:
+ return RDMA_CSC_BT601_TO_RGB;
+ }
+}
+
+static void mtk_mdp_rdma_fifo_config(struct device *dev, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
+
+ mtk_ddp_write_mask(cmdq_pkt, FLD_EXT_ULTRA_EN | VAL_PRE_ULTRA_EN_ENABLE << 16 |
+ VAL_ULTRA_EN_ENABLE << 12 | VAL_RD_REQ_TYPE_BURST_8_ACCESS << 4 |
+ FLD_EXT_PREULTRA_EN | FLD_COMMAND_DIV, &priv->cmdq_reg,
+ priv->regs, MDP_RDMA_GMCIF_CON, FLD_EXT_ULTRA_EN |
+ FLD_PRE_ULTRA_EN | FLD_ULTRA_EN | FLD_RD_REQ_TYPE |
+ FLD_EXT_PREULTRA_EN | FLD_COMMAND_DIV);
+}
+
+void mtk_mdp_rdma_start(struct device *dev, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
+
+ mtk_ddp_write_mask(cmdq_pkt, FLD_ROT_ENABLE, &priv->cmdq_reg,
+ priv->regs, MDP_RDMA_EN, FLD_ROT_ENABLE);
+}
+
+void mtk_mdp_rdma_stop(struct device *dev, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
+
+ mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg,
+ priv->regs, MDP_RDMA_EN, FLD_ROT_ENABLE);
+ mtk_ddp_write(cmdq_pkt, 1, &priv->cmdq_reg, priv->regs, MDP_RDMA_RESET);
+ mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, MDP_RDMA_RESET);
+}
+
+void mtk_mdp_rdma_config(struct device *dev, struct mtk_mdp_rdma_cfg *cfg,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
+ const struct drm_format_info *fmt_info = drm_format_info(cfg->fmt);
+ bool csc_enable = fmt_info->is_yuv ? true : false;
+ unsigned int src_pitch_y = cfg->pitch;
+ unsigned int offset_y = 0;
+
+ mtk_mdp_rdma_fifo_config(dev, cmdq_pkt);
+
+ mtk_ddp_write_mask(cmdq_pkt, FLD_UNIFORM_CONFIG, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_SRC_CON, FLD_UNIFORM_CONFIG);
+ mtk_ddp_write_mask(cmdq_pkt, rdma_fmt_convert(cfg->fmt), &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_SRC_CON, FLD_SWAP | FLD_SRC_FORMAT | FLD_BIT_NUMBER);
+
+ if (!csc_enable && fmt_info->has_alpha)
+ mtk_ddp_write_mask(cmdq_pkt, FLD_OUTPUT_ARGB, &priv->cmdq_reg,
+ priv->regs, MDP_RDMA_SRC_CON, FLD_OUTPUT_ARGB);
+ else
+ mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_SRC_CON, FLD_OUTPUT_ARGB);
+
+ mtk_ddp_write_mask(cmdq_pkt, cfg->addr0, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_SRC_BASE_0, FLD_SRC_BASE_0);
+
+ mtk_ddp_write_mask(cmdq_pkt, src_pitch_y, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_MF_BKGD_SIZE_IN_BYTE, FLD_MF_BKGD_WB);
+
+ mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, MDP_RDMA_COMP_CON,
+ FLD_AFBC_YUV_TRANSFORM | FLD_UFBDC_EN | FLD_AFBC_EN);
+ mtk_ddp_write_mask(cmdq_pkt, FLD_OUTPUT_10B, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_CON, FLD_OUTPUT_10B);
+ mtk_ddp_write_mask(cmdq_pkt, FLD_SIMPLE_MODE, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_CON, FLD_SIMPLE_MODE);
+ if (csc_enable)
+ mtk_ddp_write_mask(cmdq_pkt, rdma_color_convert(cfg->color_encoding) << 23,
+ &priv->cmdq_reg, priv->regs, MDP_RDMA_TRANSFORM_0,
+ FLD_INT_MATRIX_SEL);
+ mtk_ddp_write_mask(cmdq_pkt, csc_enable << 16, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_TRANSFORM_0, FLD_TRANS_EN);
+
+ offset_y = cfg->x_left * fmt_info->cpp[0] + cfg->y_top * src_pitch_y;
+
+ mtk_ddp_write_mask(cmdq_pkt, offset_y, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_SRC_OFFSET_0, FLD_SRC_OFFSET_0);
+ mtk_ddp_write_mask(cmdq_pkt, cfg->width, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_MF_SRC_SIZE, FLD_MF_SRC_W);
+ mtk_ddp_write_mask(cmdq_pkt, cfg->height << 16, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_MF_SRC_SIZE, FLD_MF_SRC_H);
+ mtk_ddp_write_mask(cmdq_pkt, cfg->width, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_MF_CLIP_SIZE, FLD_MF_CLIP_W);
+ mtk_ddp_write_mask(cmdq_pkt, cfg->height << 16, &priv->cmdq_reg, priv->regs,
+ MDP_RDMA_MF_CLIP_SIZE, FLD_MF_CLIP_H);
+}
+
+int mtk_mdp_rdma_clk_enable(struct device *dev)
+{
+ struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);
+
+ clk_prepare_enable(rdma->clk);
+ return 0;
+}
+
+void mtk_mdp_rdma_clk_disable(struct device *dev)
+{
+ struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rdma->clk);
+}
+
+static int mtk_mdp_rdma_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ return 0;
+}
+
+static void mtk_mdp_rdma_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+}
+
+static const struct component_ops mtk_mdp_rdma_component_ops = {
+ .bind = mtk_mdp_rdma_bind,
+ .unbind = mtk_mdp_rdma_unbind,
+};
+
+static int mtk_mdp_rdma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct mtk_mdp_rdma *priv;
+ int ret = 0;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap rdma\n");
+ return PTR_ERR(priv->regs);
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get rdma clk\n");
+ return PTR_ERR(priv->clk);
+ }
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+ platform_set_drvdata(pdev, priv);
+
+ pm_runtime_enable(dev);
+
+ ret = component_add(dev, &mtk_mdp_rdma_component_ops);
+ if (ret != 0) {
+ pm_runtime_disable(dev);
+ dev_err(dev, "Failed to add component: %d\n", ret);
+ }
+ return ret;
+}
+
+static int mtk_mdp_rdma_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_mdp_rdma_component_ops);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id mtk_mdp_rdma_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8195-vdo1-rdma", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_mdp_rdma_driver_dt_match);
+
+struct platform_driver mtk_mdp_rdma_driver = {
+ .probe = mtk_mdp_rdma_probe,
+ .remove = mtk_mdp_rdma_remove,
+ .driver = {
+ .name = "mediatek-mdp-rdma",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_mdp_rdma_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.h b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.h
new file mode 100644
index 000000000000..9943ee3aac31
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MTK_MDP_RDMA_H__
+#define __MTK_MDP_RDMA_H__
+
+struct mtk_mdp_rdma_cfg {
+ unsigned int pitch;
+ unsigned int addr0;
+ unsigned int width;
+ unsigned int height;
+ unsigned int x_left;
+ unsigned int y_top;
+ int fmt;
+ int color_encoding;
+};
+
+#endif // __MTK_MDP_RDMA_H__
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index fd8db97ba8ba..8110a6e39320 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -238,6 +238,7 @@ int meson_encoder_cvbs_init(struct meson_drm *priv)
}
meson_encoder_cvbs->next_bridge = of_drm_find_bridge(remote);
+ of_node_put(remote);
if (!meson_encoder_cvbs->next_bridge) {
dev_err(priv->dev, "Failed to find CVBS Connector bridge\n");
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 5e306de6f485..2f616c55c271 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -218,7 +218,8 @@ static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge,
if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
yuv420_mode = true;
- }
+ } else if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYVY8_1X16)
+ ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
/* VENC + VENC-DVI Mode setup */
meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
@@ -230,6 +231,10 @@ static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge,
/* Setup YUV420 to HDMI-TX, no 10bit diphering */
writel_relaxed(2 | (2 << 2),
priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+ else if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYVY8_1X16)
+ /* Setup YUV422 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(1 | (2 << 2),
+ priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
else
/* Setup YUV444 to HDMI-TX, no 10bit diphering */
writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
@@ -257,6 +262,7 @@ static void meson_encoder_hdmi_atomic_disable(struct drm_bridge *bridge,
static const u32 meson_encoder_hdmi_out_bus_fmts[] = {
MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
MEDIA_BUS_FMT_UYYVYY8_0_5X24,
};
@@ -365,7 +371,8 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote);
if (!meson_encoder_hdmi->next_bridge) {
dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n");
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto err_put_node;
}
/* HDMI Encoder Bridge */
@@ -383,7 +390,7 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret);
- return ret;
+ goto err_put_node;
}
meson_encoder_hdmi->encoder.possible_crtcs = BIT(0);
@@ -393,7 +400,7 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
- return ret;
+ goto err_put_node;
}
/* Initialize & attach Bridge Connector */
@@ -401,7 +408,8 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
&meson_encoder_hdmi->encoder);
if (IS_ERR(meson_encoder_hdmi->connector)) {
dev_err(priv->dev, "Unable to create HDMI bridge connector\n");
- return PTR_ERR(meson_encoder_hdmi->connector);
+ ret = PTR_ERR(meson_encoder_hdmi->connector);
+ goto err_put_node;
}
drm_connector_attach_encoder(meson_encoder_hdmi->connector,
&meson_encoder_hdmi->encoder);
@@ -428,6 +436,7 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
meson_encoder_hdmi->connector->ycbcr_420_allowed = true;
pdev = of_find_device_by_node(remote);
+ of_node_put(remote);
if (pdev) {
struct cec_connector_info conn_info;
struct cec_notifier *notifier;
@@ -435,8 +444,10 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
cec_fill_conn_info_from_drm(&conn_info, meson_encoder_hdmi->connector);
notifier = cec_notifier_conn_register(&pdev->dev, NULL, &conn_info);
- if (!notifier)
+ if (!notifier) {
+ put_device(&pdev->dev);
return -ENOMEM;
+ }
meson_encoder_hdmi->cec_notifier = notifier;
}
@@ -444,4 +455,8 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
dev_dbg(priv->dev, "HDMI encoder initialized\n");
return 0;
+
+err_put_node:
+ of_node_put(remote);
+ return ret;
}
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index dfef8afcc245..b4a0518c1028 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -9,9 +9,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 8640a8a8a469..b9ac932af8d0 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -13,9 +13,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index d4f766522483..89558549c3af 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -1,4 +1,16 @@
# SPDX-License-Identifier: GPL-2.0-only
-mgag200-y := mgag200_drv.o mgag200_i2c.o mgag200_mm.o mgag200_mode.o mgag200_pll.o
+mgag200-y := \
+ mgag200_drv.o \
+ mgag200_g200.o \
+ mgag200_g200eh.o \
+ mgag200_g200eh3.o \
+ mgag200_g200er.o \
+ mgag200_g200ev.o \
+ mgag200_g200ew3.o \
+ mgag200_g200se.o \
+ mgag200_g200wb.o \
+ mgag200_i2c.o \
+ mgag200_mode.o \
+ mgag200_pll.o
obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 08839460606f..73e8e4e9e54b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/vmalloc.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
@@ -24,6 +23,71 @@ int mgag200_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, mgag200_modeset, int, 0400);
+int mgag200_init_pci_options(struct pci_dev *pdev, u32 option, u32 option2)
+{
+ struct device *dev = &pdev->dev;
+ int err;
+
+ err = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ if (err != PCIBIOS_SUCCESSFUL) {
+ dev_err(dev, "pci_read_config_dword(PCI_MGA_OPTION) failed: %d\n", err);
+ return pcibios_err_to_errno(err);
+ }
+
+ err = pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+ if (err != PCIBIOS_SUCCESSFUL) {
+ dev_err(dev, "pci_write_config_dword(PCI_MGA_OPTION) failed: %d\n", err);
+ return pcibios_err_to_errno(err);
+ }
+
+ err = pci_write_config_dword(pdev, PCI_MGA_OPTION2, option2);
+ if (err != PCIBIOS_SUCCESSFUL) {
+ dev_err(dev, "pci_write_config_dword(PCI_MGA_OPTION2) failed: %d\n", err);
+ return pcibios_err_to_errno(err);
+ }
+
+ return 0;
+}
+
+resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size)
+{
+ int offset;
+ int orig;
+ int test1, test2;
+ int orig1, orig2;
+ size_t vram_size;
+
+ /* Probe */
+ orig = ioread16(mem);
+ iowrite16(0, mem);
+
+ vram_size = size;
+
+ for (offset = 0x100000; offset < vram_size; offset += 0x4000) {
+ orig1 = ioread8(mem + offset);
+ orig2 = ioread8(mem + offset + 0x100);
+
+ iowrite16(0xaa55, mem + offset);
+ iowrite16(0xaa55, mem + offset + 0x100);
+
+ test1 = ioread16(mem + offset);
+ test2 = ioread16(mem);
+
+ iowrite16(orig1, mem + offset);
+ iowrite16(orig2, mem + offset + 0x100);
+
+ if (test1 != 0xaa55)
+ break;
+
+ if (test2)
+ break;
+ }
+
+ iowrite16(orig, mem);
+
+ return offset - 65536;
+}
+
/*
* DRM driver
*/
@@ -46,263 +110,87 @@ static const struct drm_driver mgag200_driver = {
* DRM device
*/
-static bool mgag200_has_sgram(struct mga_device *mdev)
+resource_size_t mgag200_device_probe_vram(struct mga_device *mdev)
{
- struct drm_device *dev = &mdev->base;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- u32 option;
- int ret;
-
- ret = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
- if (drm_WARN(dev, ret, "failed to read PCI config dword: %d\n", ret))
- return false;
-
- return !!(option & PCI_MGA_OPTION_HARDPWMSK);
+ return mgag200_probe_vram(mdev->vram, resource_size(mdev->vram_res));
}
-static int mgag200_regs_init(struct mga_device *mdev)
+int mgag200_device_preinit(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- u32 option, option2;
- u8 crtcext3;
- int ret;
-
- ret = drmm_mutex_init(dev, &mdev->rmmio_lock);
- if (ret)
- return ret;
-
- switch (mdev->type) {
- case G200_PCI:
- case G200_AGP:
- if (mgag200_has_sgram(mdev))
- option = 0x4049cd21;
- else
- option = 0x40499121;
- option2 = 0x00008000;
- break;
- case G200_SE_A:
- case G200_SE_B:
- option = 0x40049120;
- if (mgag200_has_sgram(mdev))
- option |= PCI_MGA_OPTION_HARDPWMSK;
- option2 = 0x00008000;
- break;
- case G200_WB:
- case G200_EW3:
- option = 0x41049120;
- option2 = 0x0000b000;
- break;
- case G200_EV:
- option = 0x00000120;
- option2 = 0x0000b000;
- break;
- case G200_EH:
- case G200_EH3:
- option = 0x00000120;
- option2 = 0x0000b000;
- break;
- default:
- option = 0;
- option2 = 0;
- }
-
- if (option)
- pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
- if (option2)
- pci_write_config_dword(pdev, PCI_MGA_OPTION2, option2);
+ resource_size_t start, len;
+ struct resource *res;
/* BAR 1 contains registers */
- mdev->rmmio_base = pci_resource_start(pdev, 1);
- mdev->rmmio_size = pci_resource_len(pdev, 1);
- if (!devm_request_mem_region(dev->dev, mdev->rmmio_base,
- mdev->rmmio_size, "mgadrmfb_mmio")) {
- drm_err(dev, "can't reserve mmio registers\n");
- return -ENOMEM;
+ start = pci_resource_start(pdev, 1);
+ len = pci_resource_len(pdev, 1);
+
+ res = devm_request_mem_region(dev->dev, start, len, "mgadrmfb_mmio");
+ if (!res) {
+ drm_err(dev, "devm_request_mem_region(MMIO) failed\n");
+ return -ENXIO;
}
+ mdev->rmmio_res = res;
mdev->rmmio = pcim_iomap(pdev, 1, 0);
- if (mdev->rmmio == NULL)
+ if (!mdev->rmmio)
return -ENOMEM;
- RREG_ECRT(0x03, crtcext3);
- crtcext3 |= MGAREG_CRTCEXT3_MGAMODE;
- WREG_ECRT(0x03, crtcext3);
-
- return 0;
-}
-
-static void mgag200_g200_interpret_bios(struct mga_device *mdev,
- const unsigned char *bios,
- size_t size)
-{
- static const char matrox[] = {'M', 'A', 'T', 'R', 'O', 'X'};
- static const unsigned int expected_length[6] = {
- 0, 64, 64, 64, 128, 128
- };
- struct drm_device *dev = &mdev->base;
- const unsigned char *pins;
- unsigned int pins_len, version;
- int offset;
- int tmp;
-
- /* Test for MATROX string. */
- if (size < 45 + sizeof(matrox))
- return;
- if (memcmp(&bios[45], matrox, sizeof(matrox)) != 0)
- return;
-
- /* Get the PInS offset. */
- if (size < MGA_BIOS_OFFSET + 2)
- return;
- offset = (bios[MGA_BIOS_OFFSET + 1] << 8) | bios[MGA_BIOS_OFFSET];
-
- /* Get PInS data structure. */
-
- if (size < offset + 6)
- return;
- pins = bios + offset;
- if (pins[0] == 0x2e && pins[1] == 0x41) {
- version = pins[5];
- pins_len = pins[2];
- } else {
- version = 1;
- pins_len = pins[0] + (pins[1] << 8);
- }
-
- if (version < 1 || version > 5) {
- drm_warn(dev, "Unknown BIOS PInS version: %d\n", version);
- return;
- }
- if (pins_len != expected_length[version]) {
- drm_warn(dev, "Unexpected BIOS PInS size: %d expected: %d\n",
- pins_len, expected_length[version]);
- return;
- }
- if (size < offset + pins_len)
- return;
+ /* BAR 0 is VRAM */
- drm_dbg_kms(dev, "MATROX BIOS PInS version %d size: %d found\n",
- version, pins_len);
+ start = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
- /* Extract the clock values */
-
- switch (version) {
- case 1:
- tmp = pins[24] + (pins[25] << 8);
- if (tmp)
- mdev->model.g200.pclk_max = tmp * 10;
- break;
- case 2:
- if (pins[41] != 0xff)
- mdev->model.g200.pclk_max = (pins[41] + 100) * 1000;
- break;
- case 3:
- if (pins[36] != 0xff)
- mdev->model.g200.pclk_max = (pins[36] + 100) * 1000;
- if (pins[52] & 0x20)
- mdev->model.g200.ref_clk = 14318;
- break;
- case 4:
- if (pins[39] != 0xff)
- mdev->model.g200.pclk_max = pins[39] * 4 * 1000;
- if (pins[92] & 0x01)
- mdev->model.g200.ref_clk = 14318;
- break;
- case 5:
- tmp = pins[4] ? 8000 : 6000;
- if (pins[123] != 0xff)
- mdev->model.g200.pclk_min = pins[123] * tmp;
- if (pins[38] != 0xff)
- mdev->model.g200.pclk_max = pins[38] * tmp;
- if (pins[110] & 0x01)
- mdev->model.g200.ref_clk = 14318;
- break;
- default:
- break;
+ res = devm_request_mem_region(dev->dev, start, len, "mgadrmfb_vram");
+ if (!res) {
+ drm_err(dev, "devm_request_mem_region(VRAM) failed\n");
+ return -ENXIO;
}
-}
-
-static void mgag200_g200_init_refclk(struct mga_device *mdev)
-{
- struct drm_device *dev = &mdev->base;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- unsigned char __iomem *rom;
- unsigned char *bios;
- size_t size;
-
- mdev->model.g200.pclk_min = 50000;
- mdev->model.g200.pclk_max = 230000;
- mdev->model.g200.ref_clk = 27050;
+ mdev->vram_res = res;
- rom = pci_map_rom(pdev, &size);
- if (!rom)
- return;
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_io_reserve_memtype_wc(dev->dev, res->start, resource_size(res));
+ devm_arch_phys_wc_add(dev->dev, res->start, resource_size(res));
- bios = vmalloc(size);
- if (!bios)
- goto out;
- memcpy_fromio(bios, rom, size);
-
- if (size != 0 && bios[0] == 0x55 && bios[1] == 0xaa)
- mgag200_g200_interpret_bios(mdev, bios, size);
-
- drm_dbg_kms(dev, "pclk_min: %ld pclk_max: %ld ref_clk: %ld\n",
- mdev->model.g200.pclk_min, mdev->model.g200.pclk_max,
- mdev->model.g200.ref_clk);
+ mdev->vram = devm_ioremap(dev->dev, res->start, resource_size(res));
+ if (!mdev->vram)
+ return -ENOMEM;
- vfree(bios);
-out:
- pci_unmap_rom(pdev, rom);
+ return 0;
}
-static void mgag200_g200se_init_unique_id(struct mga_device *mdev)
+int mgag200_device_init(struct mga_device *mdev, enum mga_type type,
+ const struct mgag200_device_info *info)
{
struct drm_device *dev = &mdev->base;
-
- /* stash G200 SE model number for later use */
- mdev->model.g200se.unique_rev_id = RREG32(0x1e24);
-
- drm_dbg(dev, "G200 SE unique revision id is 0x%x\n",
- mdev->model.g200se.unique_rev_id);
-}
-
-static struct mga_device *
-mgag200_device_create(struct pci_dev *pdev, enum mga_type type, unsigned long flags)
-{
- struct mga_device *mdev;
- struct drm_device *dev;
+ u8 crtcext3, misc;
int ret;
- mdev = devm_drm_dev_alloc(&pdev->dev, &mgag200_driver, struct mga_device, base);
- if (IS_ERR(mdev))
- return mdev;
- dev = &mdev->base;
-
- pci_set_drvdata(pdev, dev);
-
- mdev->flags = flags;
+ mdev->info = info;
mdev->type = type;
- ret = mgag200_regs_init(mdev);
+ ret = drmm_mutex_init(dev, &mdev->rmmio_lock);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- if (mdev->type == G200_PCI || mdev->type == G200_AGP)
- mgag200_g200_init_refclk(mdev);
- else if (IS_G200_SE(mdev))
- mgag200_g200se_init_unique_id(mdev);
+ mutex_lock(&mdev->rmmio_lock);
- ret = mgag200_mm_init(mdev);
- if (ret)
- return ERR_PTR(ret);
+ RREG_ECRT(0x03, crtcext3);
+ crtcext3 |= MGAREG_CRTCEXT3_MGAMODE;
+ WREG_ECRT(0x03, crtcext3);
- ret = mgag200_modeset_init(mdev);
- if (ret)
- return ERR_PTR(ret);
+ WREG_ECRT(0x04, 0x00);
- return mdev;
+ misc = RREG8(MGA_MISC_IN);
+ misc |= MGAREG_MISC_RAMMAPEN |
+ MGAREG_MISC_HIGH_PG_SEL;
+ WREG8(MGA_MISC_OUT, misc);
+
+ mutex_unlock(&mdev->rmmio_lock);
+
+ return 0;
}
/*
@@ -312,8 +200,7 @@ mgag200_device_create(struct pci_dev *pdev, enum mga_type type, unsigned long fl
static const struct pci_device_id mgag200_pciidlist[] = {
{ PCI_VENDOR_ID_MATROX, 0x520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_PCI },
{ PCI_VENDOR_ID_MATROX, 0x521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_AGP },
- { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
+ { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
{ PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
{ PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
@@ -326,22 +213,10 @@ static const struct pci_device_id mgag200_pciidlist[] = {
MODULE_DEVICE_TABLE(pci, mgag200_pciidlist);
-static enum mga_type mgag200_type_from_driver_data(kernel_ulong_t driver_data)
-{
- return (enum mga_type)(driver_data & MGAG200_TYPE_MASK);
-}
-
-static unsigned long mgag200_flags_from_driver_data(kernel_ulong_t driver_data)
-{
- return driver_data & MGAG200_FLAG_MASK;
-}
-
static int
mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- kernel_ulong_t driver_data = ent->driver_data;
- enum mga_type type = mgag200_type_from_driver_data(driver_data);
- unsigned long flags = mgag200_flags_from_driver_data(driver_data);
+ enum mga_type type = (enum mga_type)ent->driver_data;
struct mga_device *mdev;
struct drm_device *dev;
int ret;
@@ -354,7 +229,37 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- mdev = mgag200_device_create(pdev, type, flags);
+ switch (type) {
+ case G200_PCI:
+ case G200_AGP:
+ mdev = mgag200_g200_device_create(pdev, &mgag200_driver, type);
+ break;
+ case G200_SE_A:
+ case G200_SE_B:
+ mdev = mgag200_g200se_device_create(pdev, &mgag200_driver, type);
+ break;
+ case G200_WB:
+ mdev = mgag200_g200wb_device_create(pdev, &mgag200_driver, type);
+ break;
+ case G200_EV:
+ mdev = mgag200_g200ev_device_create(pdev, &mgag200_driver, type);
+ break;
+ case G200_EH:
+ mdev = mgag200_g200eh_device_create(pdev, &mgag200_driver, type);
+ break;
+ case G200_EH3:
+ mdev = mgag200_g200eh3_device_create(pdev, &mgag200_driver, type);
+ break;
+ case G200_ER:
+ mdev = mgag200_g200er_device_create(pdev, &mgag200_driver, type);
+ break;
+ case G200_EW3:
+ mdev = mgag200_g200ew3_device_create(pdev, &mgag200_driver, type);
+ break;
+ default:
+ dev_err(&pdev->dev, "Device type %d is unsupported\n", type);
+ return -ENODEV;
+ }
if (IS_ERR(mdev))
return PTR_ERR(mdev);
dev = &mdev->base;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index a18384c41fc4..301c4ab46539 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -168,8 +168,6 @@ static inline struct mgag200_crtc_state *to_mgag200_crtc_state(struct drm_crtc_s
return container_of(base, struct mgag200_crtc_state, base);
}
-#define to_mga_connector(x) container_of(x, struct mga_connector, base)
-
struct mga_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@@ -177,17 +175,6 @@ struct mga_i2c_chan {
int data, clock;
};
-struct mga_connector {
- struct drm_connector base;
- struct mga_i2c_chan *i2c;
-};
-
-struct mga_mc {
- resource_size_t vram_size;
- resource_size_t vram_base;
- resource_size_t vram_window;
-};
-
enum mga_type {
G200_PCI,
G200_AGP,
@@ -201,44 +188,66 @@ enum mga_type {
G200_EW3,
};
-/* HW does not handle 'startadd' field correct. */
-#define MGAG200_FLAG_HW_BUG_NO_STARTADD (1ul << 8)
+#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
-#define MGAG200_TYPE_MASK (0x000000ff)
-#define MGAG200_FLAG_MASK (0x00ffff00)
+struct mgag200_device_info {
+ u16 max_hdisplay;
+ u16 max_vdisplay;
+
+ /*
+ * Maximum memory bandwidth (MiB/sec). Setting this to zero disables
+ * the rsp test during mode validation.
+ */
+ unsigned long max_mem_bandwidth;
+
+ /* HW has external source (e.g., BMC) to synchronize with */
+ bool has_vidrst:1;
+
+ struct {
+ unsigned data_bit:3;
+ unsigned clock_bit:3;
+ } i2c;
+
+ /*
+ * HW does not handle 'startadd' register correctly. Always set
+ * it's value to 0.
+ */
+ bool bug_no_startadd:1;
+};
-#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
+#define MGAG200_DEVICE_INFO_INIT(_max_hdisplay, _max_vdisplay, _max_mem_bandwidth, \
+ _has_vidrst, _i2c_data_bit, _i2c_clock_bit, \
+ _bug_no_startadd) \
+ { \
+ .max_hdisplay = (_max_hdisplay), \
+ .max_vdisplay = (_max_vdisplay), \
+ .max_mem_bandwidth = (_max_mem_bandwidth), \
+ .has_vidrst = (_has_vidrst), \
+ .i2c = { \
+ .data_bit = (_i2c_data_bit), \
+ .clock_bit = (_i2c_clock_bit), \
+ }, \
+ .bug_no_startadd = (_bug_no_startadd), \
+ }
struct mga_device {
- struct drm_device base;
- unsigned long flags;
+ struct drm_device base;
- struct mutex rmmio_lock; /* Protects access to rmmio */
- resource_size_t rmmio_base;
- resource_size_t rmmio_size;
- void __iomem *rmmio;
+ const struct mgag200_device_info *info;
- struct mga_mc mc;
+ struct resource *rmmio_res;
+ void __iomem *rmmio;
+ struct mutex rmmio_lock; /* Protects access to rmmio */
+ struct resource *vram_res;
void __iomem *vram;
- size_t vram_fb_available;
+ resource_size_t vram_available;
enum mga_type type;
- union {
- struct {
- long ref_clk;
- long pclk_min;
- long pclk_max;
- } g200;
- struct {
- /* SE model number stored in reg 0x1e24 */
- u32 unique_rev_id;
- } g200se;
- } model;
-
- struct mga_connector connector;
struct mgag200_pll pixpll;
+ struct mga_i2c_chan i2c;
+ struct drm_connector connector;
struct drm_simple_display_pipe display_pipe;
};
@@ -247,15 +256,64 @@ static inline struct mga_device *to_mga_device(struct drm_device *dev)
return container_of(dev, struct mga_device, base);
}
+struct mgag200_g200_device {
+ struct mga_device base;
+
+ /* PLL constants */
+ long ref_clk;
+ long pclk_min;
+ long pclk_max;
+};
+
+static inline struct mgag200_g200_device *to_mgag200_g200_device(struct drm_device *dev)
+{
+ return container_of(to_mga_device(dev), struct mgag200_g200_device, base);
+}
+
+struct mgag200_g200se_device {
+ struct mga_device base;
+
+ /* SE model number stored in reg 0x1e24 */
+ u32 unique_rev_id;
+};
+
+static inline struct mgag200_g200se_device *to_mgag200_g200se_device(struct drm_device *dev)
+{
+ return container_of(to_mga_device(dev), struct mgag200_g200se_device, base);
+}
+
+ /* mgag200_drv.c */
+int mgag200_init_pci_options(struct pci_dev *pdev, u32 option, u32 option2);
+resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size);
+resource_size_t mgag200_device_probe_vram(struct mga_device *mdev);
+int mgag200_device_preinit(struct mga_device *mdev);
+int mgag200_device_init(struct mga_device *mdev, enum mga_type type,
+ const struct mgag200_device_info *info);
+
+ /* mgag200_<device type>.c */
+struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type);
+struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type);
+struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type);
+struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type);
+struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type);
+struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type);
+struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type);
+struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type);
+
/* mgag200_mode.c */
-int mgag200_modeset_init(struct mga_device *mdev);
+resource_size_t mgag200_device_probe_vram(struct mga_device *mdev);
+int mgag200_modeset_init(struct mga_device *mdev, resource_size_t vram_fb_available);
/* mgag200_i2c.c */
-struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
-void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
-
- /* mgag200_mm.c */
-int mgag200_mm_init(struct mga_device *mdev);
+int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c);
/* mgag200_pll.c */
int mgag200_pixpll_init(struct mgag200_pll *pixpll, struct mga_device *mdev);
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c
new file mode 100644
index 000000000000..674385921b7f
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_g200.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_drv.h>
+
+#include "mgag200_drv.h"
+
+static int mgag200_g200_init_pci_options(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ bool has_sgram;
+ u32 option;
+ int err;
+
+ err = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ if (err != PCIBIOS_SUCCESSFUL) {
+ dev_err(dev, "pci_read_config_dword(PCI_MGA_OPTION) failed: %d\n", err);
+ return pcibios_err_to_errno(err);
+ }
+
+ has_sgram = !!(option & PCI_MGA_OPTION_HARDPWMSK);
+
+ if (has_sgram)
+ option = 0x4049cd21;
+ else
+ option = 0x40499121;
+
+ return mgag200_init_pci_options(pdev, option, 0x00008000);
+}
+
+/*
+ * DRM Device
+ */
+
+static const struct mgag200_device_info mgag200_g200_device_info =
+ MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, false, 1, 3, false);
+
+static void mgag200_g200_interpret_bios(struct mgag200_g200_device *g200,
+ const unsigned char *bios, size_t size)
+{
+ static const char matrox[] = {'M', 'A', 'T', 'R', 'O', 'X'};
+ static const unsigned int expected_length[6] = {
+ 0, 64, 64, 64, 128, 128
+ };
+ struct mga_device *mdev = &g200->base;
+ struct drm_device *dev = &mdev->base;
+ const unsigned char *pins;
+ unsigned int pins_len, version;
+ int offset;
+ int tmp;
+
+ /* Test for MATROX string. */
+ if (size < 45 + sizeof(matrox))
+ return;
+ if (memcmp(&bios[45], matrox, sizeof(matrox)) != 0)
+ return;
+
+ /* Get the PInS offset. */
+ if (size < MGA_BIOS_OFFSET + 2)
+ return;
+ offset = (bios[MGA_BIOS_OFFSET + 1] << 8) | bios[MGA_BIOS_OFFSET];
+
+ /* Get PInS data structure. */
+
+ if (size < offset + 6)
+ return;
+ pins = bios + offset;
+ if (pins[0] == 0x2e && pins[1] == 0x41) {
+ version = pins[5];
+ pins_len = pins[2];
+ } else {
+ version = 1;
+ pins_len = pins[0] + (pins[1] << 8);
+ }
+
+ if (version < 1 || version > 5) {
+ drm_warn(dev, "Unknown BIOS PInS version: %d\n", version);
+ return;
+ }
+ if (pins_len != expected_length[version]) {
+ drm_warn(dev, "Unexpected BIOS PInS size: %d expected: %d\n",
+ pins_len, expected_length[version]);
+ return;
+ }
+ if (size < offset + pins_len)
+ return;
+
+ drm_dbg_kms(dev, "MATROX BIOS PInS version %d size: %d found\n", version, pins_len);
+
+ /* Extract the clock values */
+
+ switch (version) {
+ case 1:
+ tmp = pins[24] + (pins[25] << 8);
+ if (tmp)
+ g200->pclk_max = tmp * 10;
+ break;
+ case 2:
+ if (pins[41] != 0xff)
+ g200->pclk_max = (pins[41] + 100) * 1000;
+ break;
+ case 3:
+ if (pins[36] != 0xff)
+ g200->pclk_max = (pins[36] + 100) * 1000;
+ if (pins[52] & 0x20)
+ g200->ref_clk = 14318;
+ break;
+ case 4:
+ if (pins[39] != 0xff)
+ g200->pclk_max = pins[39] * 4 * 1000;
+ if (pins[92] & 0x01)
+ g200->ref_clk = 14318;
+ break;
+ case 5:
+ tmp = pins[4] ? 8000 : 6000;
+ if (pins[123] != 0xff)
+ g200->pclk_min = pins[123] * tmp;
+ if (pins[38] != 0xff)
+ g200->pclk_max = pins[38] * tmp;
+ if (pins[110] & 0x01)
+ g200->ref_clk = 14318;
+ break;
+ default:
+ break;
+ }
+}
+
+static void mgag200_g200_init_refclk(struct mgag200_g200_device *g200)
+{
+ struct mga_device *mdev = &g200->base;
+ struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ unsigned char __iomem *rom;
+ unsigned char *bios;
+ size_t size;
+
+ g200->pclk_min = 50000;
+ g200->pclk_max = 230000;
+ g200->ref_clk = 27050;
+
+ rom = pci_map_rom(pdev, &size);
+ if (!rom)
+ return;
+
+ bios = vmalloc(size);
+ if (!bios)
+ goto out;
+ memcpy_fromio(bios, rom, size);
+
+ if (size != 0 && bios[0] == 0x55 && bios[1] == 0xaa)
+ mgag200_g200_interpret_bios(g200, bios, size);
+
+ drm_dbg_kms(dev, "pclk_min: %ld pclk_max: %ld ref_clk: %ld\n",
+ g200->pclk_min, g200->pclk_max, g200->ref_clk);
+
+ vfree(bios);
+out:
+ pci_unmap_rom(pdev, rom);
+}
+
+struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type)
+{
+ struct mgag200_g200_device *g200;
+ struct mga_device *mdev;
+ struct drm_device *dev;
+ resource_size_t vram_available;
+ int ret;
+
+ g200 = devm_drm_dev_alloc(&pdev->dev, drv, struct mgag200_g200_device, base.base);
+ if (IS_ERR(g200))
+ return ERR_CAST(g200);
+ mdev = &g200->base;
+ dev = &mdev->base;
+
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_g200_init_pci_options(pdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_preinit(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mgag200_g200_init_refclk(g200);
+
+ ret = mgag200_device_init(mdev, type, &mgag200_g200_device_info);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vram_available = mgag200_device_probe_vram(mdev);
+
+ ret = mgag200_modeset_init(mdev, vram_available);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return mdev;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
new file mode 100644
index 000000000000..1b9a22728744
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "mgag200_drv.h"
+
+/*
+ * DRM device
+ */
+
+static const struct mgag200_device_info mgag200_g200eh_device_info =
+ MGAG200_DEVICE_INFO_INIT(2048, 2048, 37500, false, 1, 0, false);
+
+struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type)
+{
+ struct mga_device *mdev;
+ struct drm_device *dev;
+ resource_size_t vram_available;
+ int ret;
+
+ mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
+ if (IS_ERR(mdev))
+ return mdev;
+ dev = &mdev->base;
+
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_preinit(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_init(mdev, type, &mgag200_g200eh_device_info);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vram_available = mgag200_device_probe_vram(mdev);
+
+ ret = mgag200_modeset_init(mdev, vram_available);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return mdev;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
new file mode 100644
index 000000000000..438cda1b14c9
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "mgag200_drv.h"
+
+/*
+ * DRM device
+ */
+
+static const struct mgag200_device_info mgag200_g200eh3_device_info =
+ MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, false, 1, 0, false);
+
+struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum mga_type type)
+{
+ struct mga_device *mdev;
+ struct drm_device *dev;
+ resource_size_t vram_available;
+ int ret;
+
+ mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
+ if (IS_ERR(mdev))
+ return mdev;
+ dev = &mdev->base;
+
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_preinit(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_init(mdev, type, &mgag200_g200eh3_device_info);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vram_available = mgag200_device_probe_vram(mdev);
+
+ ret = mgag200_modeset_init(mdev, vram_available);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return mdev;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
new file mode 100644
index 000000000000..0790d4e6463d
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "mgag200_drv.h"
+
+/*
+ * DRM device
+ */
+
+static const struct mgag200_device_info mgag200_g200er_device_info =
+ MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 1, 0, false);
+
+struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type)
+{
+ struct mga_device *mdev;
+ struct drm_device *dev;
+ resource_size_t vram_available;
+ int ret;
+
+ mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
+ if (IS_ERR(mdev))
+ return mdev;
+ dev = &mdev->base;
+
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_device_preinit(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_init(mdev, type, &mgag200_g200er_device_info);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vram_available = mgag200_device_probe_vram(mdev);
+
+ ret = mgag200_modeset_init(mdev, vram_available);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return mdev;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
new file mode 100644
index 000000000000..5353422d0eef
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "mgag200_drv.h"
+
+/*
+ * DRM device
+ */
+
+static const struct mgag200_device_info mgag200_g200ev_device_info =
+ MGAG200_DEVICE_INFO_INIT(2048, 2048, 32700, false, 0, 1, false);
+
+struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type)
+{
+ struct mga_device *mdev;
+ struct drm_device *dev;
+ resource_size_t vram_available;
+ int ret;
+
+ mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
+ if (IS_ERR(mdev))
+ return mdev;
+ dev = &mdev->base;
+
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_preinit(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_init(mdev, type, &mgag200_g200ev_device_info);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vram_available = mgag200_device_probe_vram(mdev);
+
+ ret = mgag200_modeset_init(mdev, vram_available);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return mdev;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
new file mode 100644
index 000000000000..3bfc1324cf78
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "mgag200_drv.h"
+
+/*
+ * DRM device
+ */
+
+static const struct mgag200_device_info mgag200_g200ew3_device_info =
+ MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, true, 0, 1, false);
+
+static resource_size_t mgag200_g200ew3_device_probe_vram(struct mga_device *mdev)
+{
+ resource_size_t vram_size = resource_size(mdev->vram_res);
+
+ if (vram_size >= 0x1000000)
+ vram_size = vram_size - 0x400000;
+ return mgag200_probe_vram(mdev->vram, vram_size);
+}
+
+struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum mga_type type)
+{
+ struct mga_device *mdev;
+ struct drm_device *dev;
+ resource_size_t vram_available;
+ int ret;
+
+ mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
+ if (IS_ERR(mdev))
+ return mdev;
+ dev = &mdev->base;
+
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_init_pci_options(pdev, 0x41049120, 0x0000b000);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_preinit(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_init(mdev, type, &mgag200_g200ew3_device_info);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vram_available = mgag200_g200ew3_device_probe_vram(mdev);
+
+ ret = mgag200_modeset_init(mdev, vram_available);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return mdev;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
new file mode 100644
index 000000000000..0a3e66695e22
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "mgag200_drv.h"
+
+static int mgag200_g200se_init_pci_options(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ bool has_sgram;
+ u32 option;
+ int err;
+
+ err = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ if (err != PCIBIOS_SUCCESSFUL) {
+ dev_err(dev, "pci_read_config_dword(PCI_MGA_OPTION) failed: %d\n", err);
+ return pcibios_err_to_errno(err);
+ }
+
+ has_sgram = !!(option & PCI_MGA_OPTION_HARDPWMSK);
+
+ option = 0x40049120;
+ if (has_sgram)
+ option |= PCI_MGA_OPTION_HARDPWMSK;
+
+ return mgag200_init_pci_options(pdev, option, 0x00008000);
+}
+
+/*
+ * DRM device
+ */
+
+static const struct mgag200_device_info mgag200_g200se_a_01_device_info =
+ MGAG200_DEVICE_INFO_INIT(1600, 1200, 24400, false, 0, 1, true);
+
+static const struct mgag200_device_info mgag200_g200se_a_02_device_info =
+ MGAG200_DEVICE_INFO_INIT(1920, 1200, 30100, false, 0, 1, true);
+
+static const struct mgag200_device_info mgag200_g200se_a_03_device_info =
+ MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 0, 1, false);
+
+static const struct mgag200_device_info mgag200_g200se_b_01_device_info =
+ MGAG200_DEVICE_INFO_INIT(1600, 1200, 24400, false, 0, 1, false);
+
+static const struct mgag200_device_info mgag200_g200se_b_02_device_info =
+ MGAG200_DEVICE_INFO_INIT(1920, 1200, 30100, false, 0, 1, false);
+
+static const struct mgag200_device_info mgag200_g200se_b_03_device_info =
+ MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 0, 1, false);
+
+static int mgag200_g200se_init_unique_rev_id(struct mgag200_g200se_device *g200se)
+{
+ struct mga_device *mdev = &g200se->base;
+ struct drm_device *dev = &mdev->base;
+
+ /* stash G200 SE model number for later use */
+ g200se->unique_rev_id = RREG32(0x1e24);
+ if (!g200se->unique_rev_id)
+ return -ENODEV;
+
+ drm_dbg(dev, "G200 SE unique revision id is 0x%x\n", g200se->unique_rev_id);
+
+ return 0;
+}
+
+struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type)
+{
+ struct mgag200_g200se_device *g200se;
+ const struct mgag200_device_info *info;
+ struct mga_device *mdev;
+ struct drm_device *dev;
+ resource_size_t vram_available;
+ int ret;
+
+ g200se = devm_drm_dev_alloc(&pdev->dev, drv, struct mgag200_g200se_device, base.base);
+ if (IS_ERR(g200se))
+ return ERR_CAST(g200se);
+ mdev = &g200se->base;
+ dev = &mdev->base;
+
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_g200se_init_pci_options(pdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_preinit(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_g200se_init_unique_rev_id(g200se);
+ if (ret)
+ return ERR_PTR(ret);
+
+ switch (type) {
+ case G200_SE_A:
+ if (g200se->unique_rev_id >= 0x03)
+ info = &mgag200_g200se_a_03_device_info;
+ else if (g200se->unique_rev_id >= 0x02)
+ info = &mgag200_g200se_a_02_device_info;
+ else
+ info = &mgag200_g200se_a_01_device_info;
+ break;
+ case G200_SE_B:
+ if (g200se->unique_rev_id >= 0x03)
+ info = &mgag200_g200se_b_03_device_info;
+ else if (g200se->unique_rev_id >= 0x02)
+ info = &mgag200_g200se_b_02_device_info;
+ else
+ info = &mgag200_g200se_b_01_device_info;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = mgag200_device_init(mdev, type, info);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vram_available = mgag200_device_probe_vram(mdev);
+
+ ret = mgag200_modeset_init(mdev, vram_available);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return mdev;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
new file mode 100644
index 000000000000..c8450ac8eaec
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "mgag200_drv.h"
+
+/*
+ * DRM device
+ */
+
+static const struct mgag200_device_info mgag200_g200wb_device_info =
+ MGAG200_DEVICE_INFO_INIT(1280, 1024, 31877, true, 0, 1, false);
+
+struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
+ enum mga_type type)
+{
+ struct mga_device *mdev;
+ struct drm_device *dev;
+ resource_size_t vram_available;
+ int ret;
+
+ mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
+ if (IS_ERR(mdev))
+ return mdev;
+ dev = &mdev->base;
+
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_init_pci_options(pdev, 0x41049120, 0x0000b000);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_preinit(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_init(mdev, type, &mgag200_g200wb_device_info);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vram_available = mgag200_device_probe_vram(mdev);
+
+ ret = mgag200_modeset_init(mdev, vram_available);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return mdev;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index ac8e34eef513..0c48bdf3e7f8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -86,44 +86,25 @@ static int mga_gpio_getscl(void *data)
return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
}
-struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
+static void mgag200_i2c_release(void *res)
{
- struct mga_device *mdev = to_mga_device(dev);
- struct mga_i2c_chan *i2c;
+ struct mga_i2c_chan *i2c = res;
+
+ i2c_del_adapter(&i2c->adapter);
+}
+
+int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
+{
+ struct drm_device *dev = &mdev->base;
+ const struct mgag200_device_info *info = mdev->info;
int ret;
- int data, clock;
WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
WREG_DAC(MGA1064_GEN_IO_CTL, 0);
- switch (mdev->type) {
- case G200_SE_A:
- case G200_SE_B:
- case G200_EV:
- case G200_WB:
- case G200_EW3:
- data = 1;
- clock = 2;
- break;
- case G200_EH:
- case G200_EH3:
- case G200_ER:
- data = 2;
- clock = 1;
- break;
- default:
- data = 2;
- clock = 8;
- break;
- }
-
- i2c = kzalloc(sizeof(struct mga_i2c_chan), GFP_KERNEL);
- if (!i2c)
- return NULL;
-
- i2c->data = data;
- i2c->clock = clock;
+ i2c->data = BIT(info->i2c.data_bit);
+ i2c->clock = BIT(info->i2c.clock_bit);
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
@@ -142,18 +123,8 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
i2c->bit.getscl = mga_gpio_getscl;
ret = i2c_bit_add_bus(&i2c->adapter);
- if (ret) {
- kfree(i2c);
- i2c = NULL;
- }
- return i2c;
-}
+ if (ret)
+ return ret;
-void mgag200_i2c_destroy(struct mga_i2c_chan *i2c)
-{
- if (!i2c)
- return;
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
+ return devm_add_action_or_reset(dev->dev, mgag200_i2c_release, i2c);
}
-
diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c
deleted file mode 100644
index fa996d46feed..000000000000
--- a/drivers/gpu/drm/mgag200/mgag200_mm.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors: Dave Airlie <airlied@redhat.com>
- */
-
-#include <linux/pci.h>
-
-#include <drm/drm_managed.h>
-
-#include "mgag200_drv.h"
-
-static size_t mgag200_probe_vram(struct mga_device *mdev, void __iomem *mem,
- size_t size)
-{
- int offset;
- int orig;
- int test1, test2;
- int orig1, orig2;
- size_t vram_size;
-
- /* Probe */
- orig = ioread16(mem);
- iowrite16(0, mem);
-
- vram_size = size;
-
- if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000))
- vram_size = vram_size - 0x400000;
-
- for (offset = 0x100000; offset < vram_size; offset += 0x4000) {
- orig1 = ioread8(mem + offset);
- orig2 = ioread8(mem + offset + 0x100);
-
- iowrite16(0xaa55, mem + offset);
- iowrite16(0xaa55, mem + offset + 0x100);
-
- test1 = ioread16(mem + offset);
- test2 = ioread16(mem);
-
- iowrite16(orig1, mem + offset);
- iowrite16(orig2, mem + offset + 0x100);
-
- if (test1 != 0xaa55)
- break;
-
- if (test2)
- break;
- }
-
- iowrite16(orig, mem);
-
- return offset - 65536;
-}
-
-int mgag200_mm_init(struct mga_device *mdev)
-{
- struct drm_device *dev = &mdev->base;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- u8 misc;
- resource_size_t start, len;
-
- WREG_ECRT(0x04, 0x00);
-
- misc = RREG8(MGA_MISC_IN);
- misc |= MGAREG_MISC_RAMMAPEN |
- MGAREG_MISC_HIGH_PG_SEL;
- WREG8(MGA_MISC_OUT, misc);
-
- /* BAR 0 is VRAM */
- start = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
-
- if (!devm_request_mem_region(dev->dev, start, len, "mgadrmfb_vram")) {
- drm_err(dev, "can't reserve VRAM\n");
- return -ENXIO;
- }
-
- /* Don't fail on errors, but performance might be reduced. */
- devm_arch_io_reserve_memtype_wc(dev->dev, start, len);
- devm_arch_phys_wc_add(dev->dev, start, len);
-
- mdev->vram = devm_ioremap(dev->dev, start, len);
- if (!mdev->vram)
- return -ENOMEM;
-
- mdev->mc.vram_size = mgag200_probe_vram(mdev, mdev->vram, len);
- mdev->mc.vram_base = start;
- mdev->mc.vram_window = len;
-
- mdev->vram_fb_available = mdev->mc.vram_size;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index abde7655477d..225cca2ed60e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -17,6 +17,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
@@ -32,57 +33,78 @@
* This file contains setup code for the CRTC.
*/
-static void mga_crtc_load_lut(struct drm_crtc *crtc)
+static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
+ const struct drm_format_info *format)
{
- struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = to_mga_device(dev);
- struct drm_framebuffer *fb;
- u16 *r_ptr, *g_ptr, *b_ptr;
int i;
- if (!crtc->enabled)
- return;
-
- if (!mdev->display_pipe.plane.state)
- return;
+ WREG8(DAC_INDEX + MGA1064_INDEX, 0);
- fb = mdev->display_pipe.plane.state->fb;
+ switch (format->format) {
+ case DRM_FORMAT_RGB565:
+ /* Use better interpolation, to take 32 values from 0 to 255 */
+ for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) {
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4);
+ }
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) {
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
+ }
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
+ }
+ break;
+ default:
+ drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
- r_ptr = crtc->gamma_store;
- g_ptr = r_ptr + crtc->gamma_size;
- b_ptr = g_ptr + crtc->gamma_size;
+static void mgag200_crtc_set_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ int i;
WREG8(DAC_INDEX + MGA1064_INDEX, 0);
- if (fb && fb->format->cpp[0] * 8 == 16) {
- int inc = (fb->format->depth == 15) ? 8 : 4;
- u8 r, b;
- for (i = 0; i < MGAG200_LUT_SIZE; i += inc) {
- if (fb->format->depth == 16) {
- if (i > (MGAG200_LUT_SIZE >> 1)) {
- r = b = 0;
- } else {
- r = *r_ptr++ >> 8;
- b = *b_ptr++ >> 8;
- r_ptr++;
- b_ptr++;
- }
- } else {
- r = *r_ptr++ >> 8;
- b = *b_ptr++ >> 8;
- }
- /* VGA registers */
- WREG8(DAC_INDEX + MGA1064_COL_PAL, r);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, *g_ptr++ >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, b);
+ switch (format->format) {
+ case DRM_FORMAT_RGB565:
+ /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
+ for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) {
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].red >> 8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].blue >> 8);
}
- return;
- }
- for (i = 0; i < MGAG200_LUT_SIZE; i++) {
- /* VGA registers */
- WREG8(DAC_INDEX + MGA1064_COL_PAL, *r_ptr++ >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, *g_ptr++ >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, *b_ptr++ >> 8);
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) {
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
+ }
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].red >> 8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].green >> 8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].blue >> 8);
+ }
+ break;
+ default:
+ drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
}
}
@@ -223,6 +245,9 @@ static void mgag200_set_startadd(struct mga_device *mdev,
startadd = offset / 8;
+ if (startadd > 0)
+ drm_WARN_ON_ONCE(dev, mdev->info->bug_no_startadd);
+
/*
* Can't store addresses any higher than that, but we also
* don't have more than 16 MiB of memory, so it should be fine.
@@ -353,6 +378,7 @@ static void mgag200_init_regs(struct mga_device *mdev)
static void mgag200_set_mode_regs(struct mga_device *mdev,
const struct drm_display_mode *mode)
{
+ const struct mgag200_device_info *info = mdev->info;
unsigned int hdisplay, hsyncstart, hsyncend, htotal;
unsigned int vdisplay, vsyncstart, vsyncend, vtotal;
u8 misc, crtcext1, crtcext2, crtcext5;
@@ -387,9 +413,9 @@ static void mgag200_set_mode_regs(struct mga_device *mdev,
((hdisplay & 0x100) >> 7) |
((hsyncstart & 0x100) >> 6) |
(htotal & 0x40);
- if (mdev->type == G200_WB || mdev->type == G200_EW3)
- crtcext1 |= BIT(7) | /* vrsten */
- BIT(3); /* hrsten */
+ if (info->has_vidrst)
+ crtcext1 |= MGAREG_CRTCEXT1_VRSTEN |
+ MGAREG_CRTCEXT1_HRSTEN;
crtcext2 = ((vtotal & 0xc00) >> 10) |
((vdisplay & 0x400) >> 8) |
@@ -559,13 +585,13 @@ static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb)
{
- u32 unique_rev_id = mdev->model.g200se.unique_rev_id;
+ struct mgag200_g200se_device *g200se = to_mgag200_g200se_device(&mdev->base);
unsigned int hiprilvl;
u8 crtcext6;
- if (unique_rev_id >= 0x04) {
+ if (g200se->unique_rev_id >= 0x04) {
hiprilvl = 0;
- } else if (unique_rev_id >= 0x02) {
+ } else if (g200se->unique_rev_id >= 0x02) {
unsigned int bpp;
unsigned long mb;
@@ -590,7 +616,7 @@ static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev,
else
hiprilvl = 5;
- } else if (unique_rev_id >= 0x01) {
+ } else if (g200se->unique_rev_id >= 0x01) {
hiprilvl = 3;
} else {
hiprilvl = 4;
@@ -665,176 +691,34 @@ static void mgag200_disable_display(struct mga_device *mdev)
* Connector
*/
-static int mga_vga_get_modes(struct drm_connector *connector)
-{
- struct mga_connector *mga_connector = to_mga_connector(connector);
- struct edid *edid;
- int ret = 0;
-
- edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
- if (edid) {
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
- return ret;
-}
-
-static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
- int bits_per_pixel)
-{
- uint32_t total_area, divisor;
- uint64_t active_area, pixels_per_second, bandwidth;
- uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8;
-
- divisor = 1024;
-
- if (!mode->htotal || !mode->vtotal || !mode->clock)
- return 0;
-
- active_area = mode->hdisplay * mode->vdisplay;
- total_area = mode->htotal * mode->vtotal;
-
- pixels_per_second = active_area * mode->clock * 1000;
- do_div(pixels_per_second, total_area);
-
- bandwidth = pixels_per_second * bytes_per_pixel * 100;
- do_div(bandwidth, divisor);
-
- return (uint32_t)(bandwidth);
-}
-
-#define MODE_BANDWIDTH MODE_BAD
-
-static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static int mgag200_vga_connector_helper_get_modes(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct mga_device *mdev = to_mga_device(dev);
- int bpp = 32;
-
- if (IS_G200_SE(mdev)) {
- u32 unique_rev_id = mdev->model.g200se.unique_rev_id;
-
- if (unique_rev_id == 0x01) {
- if (mode->hdisplay > 1600)
- return MODE_VIRTUAL_X;
- if (mode->vdisplay > 1200)
- return MODE_VIRTUAL_Y;
- if (mga_vga_calculate_mode_bandwidth(mode, bpp)
- > (24400 * 1024))
- return MODE_BANDWIDTH;
- } else if (unique_rev_id == 0x02) {
- if (mode->hdisplay > 1920)
- return MODE_VIRTUAL_X;
- if (mode->vdisplay > 1200)
- return MODE_VIRTUAL_Y;
- if (mga_vga_calculate_mode_bandwidth(mode, bpp)
- > (30100 * 1024))
- return MODE_BANDWIDTH;
- } else {
- if (mga_vga_calculate_mode_bandwidth(mode, bpp)
- > (55000 * 1024))
- return MODE_BANDWIDTH;
- }
- } else if (mdev->type == G200_WB) {
- if (mode->hdisplay > 1280)
- return MODE_VIRTUAL_X;
- if (mode->vdisplay > 1024)
- return MODE_VIRTUAL_Y;
- if (mga_vga_calculate_mode_bandwidth(mode, bpp) >
- (31877 * 1024))
- return MODE_BANDWIDTH;
- } else if (mdev->type == G200_EV &&
- (mga_vga_calculate_mode_bandwidth(mode, bpp)
- > (32700 * 1024))) {
- return MODE_BANDWIDTH;
- } else if (mdev->type == G200_EH &&
- (mga_vga_calculate_mode_bandwidth(mode, bpp)
- > (37500 * 1024))) {
- return MODE_BANDWIDTH;
- } else if (mdev->type == G200_ER &&
- (mga_vga_calculate_mode_bandwidth(mode,
- bpp) > (55000 * 1024))) {
- return MODE_BANDWIDTH;
- }
-
- if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
- (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
- return MODE_H_ILLEGAL;
- }
-
- if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
- mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
- mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
- mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) {
- return MODE_BAD;
- }
-
- /* Validate the mode input by the user */
- if (connector->cmdline_mode.specified) {
- if (connector->cmdline_mode.bpp_specified)
- bpp = connector->cmdline_mode.bpp;
- }
-
- if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->vram_fb_available) {
- if (connector->cmdline_mode.specified)
- connector->cmdline_mode.specified = false;
- return MODE_BAD;
- }
+ struct mga_device *mdev = to_mga_device(connector->dev);
+ int ret;
- return MODE_OK;
-}
+ /*
+ * Protect access to I/O registers from concurrent modesetting
+ * by acquiring the I/O-register lock.
+ */
+ mutex_lock(&mdev->rmmio_lock);
+ ret = drm_connector_helper_get_modes_from_ddc(connector);
+ mutex_unlock(&mdev->rmmio_lock);
-static void mga_connector_destroy(struct drm_connector *connector)
-{
- struct mga_connector *mga_connector = to_mga_connector(connector);
- mgag200_i2c_destroy(mga_connector->i2c);
- drm_connector_cleanup(connector);
+ return ret;
}
static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
- .get_modes = mga_vga_get_modes,
- .mode_valid = mga_vga_mode_valid,
+ .get_modes = mgag200_vga_connector_helper_get_modes,
};
static const struct drm_connector_funcs mga_vga_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = mga_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int mgag200_vga_connector_init(struct mga_device *mdev)
-{
- struct drm_device *dev = &mdev->base;
- struct mga_connector *mconnector = &mdev->connector;
- struct drm_connector *connector = &mconnector->base;
- struct mga_i2c_chan *i2c;
- int ret;
-
- i2c = mgag200_i2c_create(dev);
- if (!i2c)
- drm_warn(dev, "failed to add DDC bus\n");
-
- ret = drm_connector_init_with_ddc(dev, connector,
- &mga_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &i2c->adapter);
- if (ret)
- goto err_mgag200_i2c_destroy;
- drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
-
- mconnector->i2c = i2c;
-
- return 0;
-
-err_mgag200_i2c_destroy:
- mgag200_i2c_destroy(i2c);
- return ret;
-}
-
/*
* Simple Display Pipe
*/
@@ -843,6 +727,30 @@ static enum drm_mode_status
mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
+ struct mga_device *mdev = to_mga_device(pipe->crtc.dev);
+ const struct mgag200_device_info *info = mdev->info;
+
+ /*
+ * Some devices have additional limits on the size of the
+ * display mode.
+ */
+ if (mode->hdisplay > info->max_hdisplay)
+ return MODE_VIRTUAL_X;
+ if (mode->vdisplay > info->max_vdisplay)
+ return MODE_VIRTUAL_Y;
+
+ if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
+ (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
+ return MODE_H_ILLEGAL;
+ }
+
+ if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
+ mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
+ mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
+ mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) {
+ return MODE_BAD;
+ }
+
return MODE_OK;
}
@@ -855,10 +763,6 @@ mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb,
dst += drm_fb_clip_offset(fb->pitches[0], fb->format, clip);
drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, clip);
-
- /* Always scanout image at VRAM offset 0 */
- mgag200_set_startadd(mdev, (u32)0);
- mgag200_set_offset(mdev, fb);
}
static void
@@ -908,11 +812,19 @@ mgag200_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
if (mdev->type == G200_WB || mdev->type == G200_EW3)
mgag200_g200wb_release_bmc(mdev);
- mga_crtc_load_lut(crtc);
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, fb->format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, fb->format);
+
mgag200_enable_display(mdev);
mgag200_handle_damage(mdev, fb, &fullscreen, &shadow_plane_state->data[0]);
+ /* Always scanout image at VRAM offset 0 */
+ mgag200_set_startadd(mdev, (u32)0);
+ mgag200_set_offset(mdev, fb);
+
mutex_unlock(&mdev->rmmio_lock);
}
@@ -955,6 +867,14 @@ mgag200_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
return ret;
}
+ if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
+ if (crtc_state->gamma_lut->length !=
+ MGAG200_LUT_SIZE * sizeof(struct drm_color_lut)) {
+ drm_err(dev, "Wrong size for gamma_lut %zu\n",
+ crtc_state->gamma_lut->length);
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -963,20 +883,30 @@ mgag200_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane *plane = &pipe->plane;
+ struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = plane->dev;
struct mga_device *mdev = to_mga_device(dev);
struct drm_plane_state *state = plane->state;
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_rect damage;
+ struct drm_atomic_helper_damage_iter iter;
if (!fb)
return;
mutex_lock(&mdev->rmmio_lock);
- if (drm_atomic_helper_damage_merged(old_state, state, &damage))
+ if (crtc->state->color_mgmt_changed && crtc->state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, fb->format, crtc->state->gamma_lut->data);
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
mgag200_handle_damage(mdev, fb, &damage, &shadow_plane_state->data[0]);
+ }
+ /* Always scanout image at VRAM offset 0 */
+ mgag200_set_startadd(mdev, (u32)0);
+ mgag200_set_offset(mdev, fb);
mutex_unlock(&mdev->rmmio_lock);
}
@@ -1056,30 +986,81 @@ static const uint64_t mgag200_simple_display_pipe_fmtmods[] = {
* Mode config
*/
-static const struct drm_mode_config_funcs mgag200_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
+/* Calculates a mode's required memory bandwidth (in KiB/sec). */
+static uint32_t mgag200_calculate_mode_bandwidth(const struct drm_display_mode *mode,
+ unsigned int bits_per_pixel)
+{
+ uint32_t total_area, divisor;
+ uint64_t active_area, pixels_per_second, bandwidth;
+ uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8;
+
+ divisor = 1024;
-static unsigned int mgag200_preferred_depth(struct mga_device *mdev)
+ if (!mode->htotal || !mode->vtotal || !mode->clock)
+ return 0;
+
+ active_area = mode->hdisplay * mode->vdisplay;
+ total_area = mode->htotal * mode->vtotal;
+
+ pixels_per_second = active_area * mode->clock * 1000;
+ do_div(pixels_per_second, total_area);
+
+ bandwidth = pixels_per_second * bytes_per_pixel * 100;
+ do_div(bandwidth, divisor);
+
+ return (uint32_t)bandwidth;
+}
+
+static enum drm_mode_status mgag200_mode_config_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
{
- if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
- return 16;
- else
- return 32;
+ static const unsigned int max_bpp = 4; // DRM_FORMAT_XRGB8888
+ struct mga_device *mdev = to_mga_device(dev);
+ unsigned long fbsize, fbpages, max_fbpages;
+ const struct mgag200_device_info *info = mdev->info;
+
+ max_fbpages = mdev->vram_available >> PAGE_SHIFT;
+
+ fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
+
+ if (fbpages > max_fbpages)
+ return MODE_MEM;
+
+ /*
+ * Test the mode's required memory bandwidth if the device
+ * specifies a maximum. Not all devices do though.
+ */
+ if (info->max_mem_bandwidth) {
+ uint32_t mode_bandwidth = mgag200_calculate_mode_bandwidth(mode, max_bpp * 8);
+
+ if (mode_bandwidth > (info->max_mem_bandwidth * 1024))
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
}
-int mgag200_modeset_init(struct mga_device *mdev)
+static const struct drm_mode_config_funcs mgag200_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = mgag200_mode_config_mode_valid,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+int mgag200_modeset_init(struct mga_device *mdev, resource_size_t vram_available)
{
struct drm_device *dev = &mdev->base;
- struct drm_connector *connector = &mdev->connector.base;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
struct drm_simple_display_pipe *pipe = &mdev->display_pipe;
size_t format_count = ARRAY_SIZE(mgag200_simple_display_pipe_formats);
int ret;
mgag200_init_regs(mdev);
+ mdev->vram_available = vram_available;
+
ret = drmm_mode_config_init(dev);
if (ret) {
drm_err(dev, "drmm_mode_config_init() failed, error %d\n",
@@ -1089,21 +1070,26 @@ int mgag200_modeset_init(struct mga_device *mdev)
dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
-
- dev->mode_config.preferred_depth = mgag200_preferred_depth(mdev);
-
- dev->mode_config.fb_base = mdev->mc.vram_base;
-
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.fb_base = mdev->vram_res->start;
dev->mode_config.funcs = &mgag200_mode_config_funcs;
- ret = mgag200_vga_connector_init(mdev);
+ ret = mgag200_i2c_init(mdev, i2c);
if (ret) {
- drm_err(dev,
- "mgag200_vga_connector_init() failed, error %d\n",
- ret);
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
return ret;
}
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mga_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+
ret = mgag200_pixpll_init(&mdev->pixpll, mdev);
if (ret)
return ret;
@@ -1121,9 +1107,13 @@ int mgag200_modeset_init(struct mga_device *mdev)
return ret;
}
- /* FIXME: legacy gamma tables; convert to CRTC state */
+ drm_plane_enable_fb_damage_clips(&pipe->plane);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
drm_mode_crtc_set_gamma_size(&pipe->crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(&pipe->crtc, 0, false, MGAG200_LUT_SIZE);
+
drm_mode_config_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c
index 52be08b744ad..8065ca5d8de9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_pll.c
+++ b/drivers/gpu/drm/mgag200/mgag200_pll.c
@@ -13,6 +13,7 @@ static int mgag200_pixpll_compute_g200(struct mgag200_pll *pixpll, long clock,
{
struct mga_device *mdev = pixpll->mdev;
struct drm_device *dev = &mdev->base;
+ struct mgag200_g200_device *g200 = to_mgag200_g200_device(dev);
const int post_div_max = 7;
const int in_div_min = 1;
const int in_div_max = 6;
@@ -23,9 +24,9 @@ static int mgag200_pixpll_compute_g200(struct mgag200_pll *pixpll, long clock,
long f_vco;
long computed;
long delta, tmp_delta;
- long ref_clk = mdev->model.g200.ref_clk;
- long p_clk_min = mdev->model.g200.pclk_min;
- long p_clk_max = mdev->model.g200.pclk_max;
+ long ref_clk = g200->ref_clk;
+ long p_clk_min = g200->pclk_min;
+ long p_clk_max = g200->pclk_max;
if (clock > p_clk_max) {
drm_err(dev, "Pixel Clock %ld too high\n", clock);
@@ -951,6 +952,7 @@ static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200ew3 = {
int mgag200_pixpll_init(struct mgag200_pll *pixpll, struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct mgag200_g200se_device *g200se;
pixpll->mdev = mdev;
@@ -961,7 +963,9 @@ int mgag200_pixpll_init(struct mgag200_pll *pixpll, struct mga_device *mdev)
break;
case G200_SE_A:
case G200_SE_B:
- if (mdev->model.g200se.unique_rev_id >= 0x04)
+ g200se = to_mgag200_g200se_device(dev);
+
+ if (g200se->unique_rev_id >= 0x04)
pixpll->funcs = &mgag200_pixpll_funcs_g200se_04;
else
pixpll->funcs = &mgag200_pixpll_funcs_g200se_00;
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index 60e705283fe8..99a9ab7d9119 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -252,8 +252,10 @@
#define MGAREG_CRTCEXT0_OFFSET_MASK GENMASK(5, 4)
+#define MGAREG_CRTCEXT1_VRSTEN BIT(7)
#define MGAREG_CRTCEXT1_VSYNCOFF BIT(5)
#define MGAREG_CRTCEXT1_HSYNCOFF BIT(4)
+#define MGAREG_CRTCEXT1_HRSTEN BIT(3)
#define MGAREG_CRTCEXT3_MGAMODE BIT(7)
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 66395ee0862a..7274c41228ed 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -119,7 +119,6 @@ msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_catalog.o \
- dp/dp_clk_util.o \
dp/dp_ctrl.o \
dp/dp_display.o \
dp/dp_drm.o \
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index c424e9a37669..3dcec7acb384 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1666,18 +1666,10 @@ static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
{
u64 busy_cycles;
- /* Only read the gpu busy if the hardware is already active */
- if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0) {
- *out_sample_rate = 1;
- return 0;
- }
-
busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
*out_sample_rate = clk_get_rate(gpu->core_clk);
- pm_runtime_put(&gpu->pdev->dev);
-
return busy_cycles;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 9f76f5b15759..310a317885a1 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -102,7 +102,8 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
}
-void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -127,15 +128,16 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
/*
* This can get called from devfreq while the hardware is idle. Don't
- * bring up the power if it isn't already active
+ * bring up the power if it isn't already active. All we're doing here
+ * is updating the frequency so that when we come back online we're at
+ * the right rate.
*/
- if (pm_runtime_get_if_in_use(gmu->dev) == 0)
+ if (suspended)
return;
if (!gmu->legacy) {
a6xx_hfi_set_freq(gmu, perf_index);
dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
- pm_runtime_put(gmu->dev);
return;
}
@@ -159,7 +161,6 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
- pm_runtime_put(gmu->dev);
}
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
@@ -504,7 +505,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
{
- return msm_writel(value, ptr + (offset << 2));
+ msm_writel(value, ptr + (offset << 2));
}
static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
@@ -527,6 +528,8 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_in_aop = true;
else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
pdc_address_offset = 0x30090;
+ else if (adreno_is_a619(adreno_gpu))
+ pdc_address_offset = 0x300a0;
else
pdc_address_offset = 0x30080;
@@ -601,7 +604,8 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- if (adreno_is_a618(adreno_gpu) || adreno_is_a650_family(adreno_gpu))
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu))
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
else
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
@@ -895,7 +899,7 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
return;
gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
- a6xx_gmu_set_freq(gpu, gpu_opp);
+ a6xx_gmu_set_freq(gpu, gpu_opp, false);
dev_pm_opp_put(gpu_opp);
}
@@ -1537,6 +1541,12 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
+ /*
+ * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition
+ * to allocate icache/dcache here, as per downstream code flow, but it may not actually be
+ * necessary. If you omit this step and you don't get random pagefaults, you are likely
+ * good to go without this!
+ */
} else if (adreno_is_a640_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_256K - SZ_16K, 0x04000, "icache");
@@ -1547,9 +1557,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
SZ_256K - SZ_16K, 0x44000, "dcache");
if (ret)
goto err_memory;
- } else {
- BUG_ON(adreno_is_a660_family(adreno_gpu));
-
+ } else if (adreno_is_a630(adreno_gpu) || adreno_is_a615_family(adreno_gpu)) {
/* HFI v1, has sptprac */
gmu->legacy = true;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 84bd516f01e8..e034935b3986 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -98,7 +98,7 @@ static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
{
- return msm_writel(value, gmu->mmio + (offset << 2));
+ msm_writel(value, gmu->mmio + (offset << 2));
}
static inline void
@@ -138,7 +138,7 @@ static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
{
- return msm_writel(value, gmu->rscc + (offset << 2));
+ msm_writel(value, gmu->rscc + (offset << 2));
}
#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 42ed9a3c4905..4d501100b9e4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -252,6 +252,74 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_flush(gpu, ring);
}
+/* For a615 family (a615, a616, a618 and a619) */
+const struct adreno_reglist a615_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002020},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
const struct adreno_reglist a630_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
@@ -555,7 +623,7 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
}
-/* For a615, a616, a618, A619, a630, a640 and a680 */
+/* For a615, a616, a618, a619, a630, a640 and a680 */
static const u32 a6xx_protect[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
@@ -1446,7 +1514,7 @@ static void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or)
static void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value)
{
- return msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2));
+ msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2));
}
static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu)
@@ -1658,27 +1726,21 @@ static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
/* 19.2MHz */
*out_sample_rate = 19200000;
- /* Only read the gpu busy if the hardware is already active */
- if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0)
- return 0;
-
busy_cycles = gmu_read64(&a6xx_gpu->gmu,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
-
- pm_runtime_put(a6xx_gpu->gmu.dev);
-
return busy_cycles;
}
-static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
mutex_lock(&a6xx_gpu->gmu.lock);
- a6xx_gmu_set_freq(gpu, opp);
+ a6xx_gmu_set_freq(gpu, opp, suspended);
mutex_unlock(&a6xx_gpu->gmu.lock);
}
@@ -1737,7 +1799,8 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
- "gpu", 0x100000000ULL, SZ_4G);
+ "gpu", 0x100000000ULL,
+ adreno_private_address_space_size(gpu));
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
@@ -1763,6 +1826,22 @@ static u32 a618_get_speed_bin(u32 fuse)
return UINT_MAX;
}
+static u32 a619_get_speed_bin(u32 fuse)
+{
+ if (fuse == 0)
+ return 0;
+ else if (fuse == 120)
+ return 4;
+ else if (fuse == 138)
+ return 3;
+ else if (fuse == 169)
+ return 2;
+ else if (fuse == 180)
+ return 1;
+
+ return UINT_MAX;
+}
+
static u32 adreno_7c3_get_speed_bin(u32 fuse)
{
if (fuse == 0)
@@ -1782,6 +1861,9 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev))
val = a618_get_speed_bin(fuse);
+ if (adreno_cmp_rev(ADRENO_REV(6, 1, 9, ANY_ID), rev))
+ val = a619_get_speed_bin(fuse);
+
if (adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), rev))
val = adreno_7c3_get_speed_bin(fuse);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 86e0a7c3fe6d..ab853f61db63 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -77,7 +77,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
-void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp);
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index d73fce5fdf1f..2cc83e049613 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -205,8 +205,8 @@ static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
{
struct a6xx_hfi_msg_fw_version msg = { 0 };
- /* Currently supporting version 1.1 */
- msg.supported_version = (1 << 28) | (1 << 16);
+ /* Currently supporting version 1.10 */
+ msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17);
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
version, sizeof(*version));
@@ -285,6 +285,65 @@ static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
+static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ msg->bw_level_num = 13;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x0;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x50004;
+ msg->ddr_cmds_addrs[2] = 0x50080;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+ msg->ddr_cmds_data[1][0] = 0x6000030c;
+ msg->ddr_cmds_data[1][1] = 0x600000db;
+ msg->ddr_cmds_data[1][2] = 0x60000008;
+ msg->ddr_cmds_data[2][0] = 0x60000618;
+ msg->ddr_cmds_data[2][1] = 0x600001b6;
+ msg->ddr_cmds_data[2][2] = 0x60000008;
+ msg->ddr_cmds_data[3][0] = 0x60000925;
+ msg->ddr_cmds_data[3][1] = 0x60000291;
+ msg->ddr_cmds_data[3][2] = 0x60000008;
+ msg->ddr_cmds_data[4][0] = 0x60000dc1;
+ msg->ddr_cmds_data[4][1] = 0x600003dc;
+ msg->ddr_cmds_data[4][2] = 0x60000008;
+ msg->ddr_cmds_data[5][0] = 0x600010ad;
+ msg->ddr_cmds_data[5][1] = 0x600004ae;
+ msg->ddr_cmds_data[5][2] = 0x60000008;
+ msg->ddr_cmds_data[6][0] = 0x600014c3;
+ msg->ddr_cmds_data[6][1] = 0x600005d4;
+ msg->ddr_cmds_data[6][2] = 0x60000008;
+ msg->ddr_cmds_data[7][0] = 0x6000176a;
+ msg->ddr_cmds_data[7][1] = 0x60000693;
+ msg->ddr_cmds_data[7][2] = 0x60000008;
+ msg->ddr_cmds_data[8][0] = 0x60001f01;
+ msg->ddr_cmds_data[8][1] = 0x600008b5;
+ msg->ddr_cmds_data[8][2] = 0x60000008;
+ msg->ddr_cmds_data[9][0] = 0x60002940;
+ msg->ddr_cmds_data[9][1] = 0x60000b95;
+ msg->ddr_cmds_data[9][2] = 0x60000008;
+ msg->ddr_cmds_data[10][0] = 0x60002f68;
+ msg->ddr_cmds_data[10][1] = 0x60000d50;
+ msg->ddr_cmds_data[10][2] = 0x60000008;
+ msg->ddr_cmds_data[11][0] = 0x60003700;
+ msg->ddr_cmds_data[11][1] = 0x60000f71;
+ msg->ddr_cmds_data[11][2] = 0x60000008;
+ msg->ddr_cmds_data[12][0] = 0x60003fce;
+ msg->ddr_cmds_data[12][1] = 0x600011ea;
+ msg->ddr_cmds_data[12][2] = 0x60000008;
+
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x0;
+
+ msg->cnoc_cmds_addrs[0] = 0x50054;
+
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+}
+
static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
@@ -462,6 +521,8 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
if (adreno_is_a618(adreno_gpu))
a618_build_bw_table(&msg);
+ else if (adreno_is_a619(adreno_gpu))
+ a619_build_bw_table(&msg);
else if (adreno_is_a640_family(adreno_gpu))
a640_build_bw_table(&msg);
else if (adreno_is_a650(adreno_gpu))
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 8706bcdd1472..24b489b6129a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -265,6 +265,19 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
}, {
+ .rev = ADRENO_REV(6, 1, 9, ANY_ID),
+ .revn = 619,
+ .name = "A619",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a619_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mdt",
+ .hwcg = a615_hwcg,
+ }, {
.rev = ADRENO_REV(6, 3, 0, ANY_ID),
.revn = 630,
.name = "A630",
@@ -303,6 +316,7 @@ static const struct adreno_info gpulist[] = {
.init = a6xx_gpu_init,
.zapfw = "a650_zap.mdt",
.hwcg = a650_hwcg,
+ .address_space_size = SZ_16G,
}, {
.rev = ADRENO_REV(6, 6, 0, ANY_ID),
.revn = 660,
@@ -316,6 +330,7 @@ static const struct adreno_info gpulist[] = {
.init = a6xx_gpu_init,
.zapfw = "a660_zap.mdt",
.hwcg = a660_hwcg,
+ .address_space_size = SZ_16G,
}, {
.rev = ADRENO_REV(6, 3, 5, ANY_ID),
.fw = {
@@ -326,6 +341,7 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.hwcg = a660_hwcg,
+ .address_space_size = SZ_16G,
}, {
.rev = ADRENO_REV(6, 8, 0, ANY_ID),
.revn = 680,
@@ -355,6 +371,7 @@ MODULE_FIRMWARE("qcom/a530_zap.mdt");
MODULE_FIRMWARE("qcom/a530_zap.b00");
MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
+MODULE_FIRMWARE("qcom/a619_gmu.bin");
MODULE_FIRMWARE("qcom/a630_sqe.fw");
MODULE_FIRMWARE("qcom/a630_gmu.bin");
MODULE_FIRMWARE("qcom/a630_zap.mbn");
@@ -415,6 +432,12 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
if (ret)
return NULL;
+ /*
+ * Now that we have firmware loaded, and are ready to begin
+ * booting the gpu, go ahead and enable runpm:
+ */
+ pm_runtime_enable(&pdev->dev);
+
/* Make sure pm runtime is active and reset any previous errors */
pm_runtime_set_active(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 4e665c806a14..382fb7f9e497 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -21,6 +21,10 @@
#include "msm_gem.h"
#include "msm_mmu.h"
+static u64 address_space_size = 0;
+MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space");
+module_param(address_space_size, ullong, 0600);
+
static bool zap_available = true;
static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
@@ -228,6 +232,19 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
return aspace;
}
+u64 adreno_private_address_space_size(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ if (address_space_size)
+ return address_space_size;
+
+ if (adreno_gpu->info->address_space_size)
+ return adreno_gpu->info->address_space_size;
+
+ return SZ_4G;
+}
+
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
@@ -498,10 +515,15 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->cur = ring->start;
ring->next = ring->start;
-
- /* reset completed fence seqno: */
- ring->memptrs->fence = ring->fctx->completed_fence;
ring->memptrs->rptr = 0;
+
+ /* Detect and clean up an impossible fence, ie. if GPU managed
+ * to scribble something invalid, we don't want that to confuse
+ * us into mistakingly believing that submits have completed.
+ */
+ if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
+ ring->memptrs->fence = ring->fctx->last_fence;
+ }
}
return 0;
@@ -785,11 +807,11 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
for (i = 0; i < gpu->nr_rings; i++) {
drm_printf(p, " - id: %d\n", i);
drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova);
- drm_printf(p, " last-fence: %d\n", state->ring[i].seqno);
- drm_printf(p, " retired-fence: %d\n", state->ring[i].fence);
- drm_printf(p, " rptr: %d\n", state->ring[i].rptr);
- drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
- drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
+ drm_printf(p, " last-fence: %u\n", state->ring[i].seqno);
+ drm_printf(p, " retired-fence: %u\n", state->ring[i].fence);
+ drm_printf(p, " rptr: %u\n", state->ring[i].rptr);
+ drm_printf(p, " wptr: %u\n", state->ring[i].wptr);
+ drm_printf(p, " size: %u\n", MSM_GPU_RINGBUFFER_SZ);
adreno_show_object(p, &state->ring[i].data,
state->ring[i].data_size, &state->ring[i].encoded);
@@ -802,6 +824,7 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_printf(p, " - iova: 0x%016llx\n",
state->bos[i].iova);
drm_printf(p, " size: %zd\n", state->bos[i].size);
+ drm_printf(p, " name: %-32s\n", state->bos[i].name);
adreno_show_object(p, &state->bos[i].data,
state->bos[i].size, &state->bos[i].encoded);
@@ -1042,7 +1065,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
pm_runtime_set_autosuspend_delay(dev,
adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(dev);
- pm_runtime_enable(dev);
return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
gpu_name, &adreno_gpu_config);
@@ -1057,7 +1079,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
- pm_runtime_disable(&priv->gpu_pdev->dev);
+ if (pm_runtime_enabled(&priv->gpu_pdev->dev))
+ pm_runtime_disable(&priv->gpu_pdev->dev);
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index ab3b5ef80332..e7adc5c632d0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -57,7 +57,7 @@ struct adreno_reglist {
u32 value;
};
-extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[], a660_hwcg[];
+extern const struct adreno_reglist a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[], a660_hwcg[];
struct adreno_info {
struct adreno_rev rev;
@@ -70,6 +70,7 @@ struct adreno_info {
const char *zapfw;
u32 inactive_period;
const struct adreno_reglist *hwcg;
+ u64 address_space_size;
};
const struct adreno_info *adreno_info(struct adreno_rev rev);
@@ -199,7 +200,7 @@ static inline int adreno_is_a420(struct adreno_gpu *gpu)
static inline int adreno_is_a430(struct adreno_gpu *gpu)
{
- return gpu->revn == 430;
+ return gpu->revn == 430;
}
static inline int adreno_is_a506(struct adreno_gpu *gpu)
@@ -239,12 +240,17 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
- return gpu->revn == 618;
+ return gpu->revn == 618;
+}
+
+static inline int adreno_is_a619(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 619;
}
static inline int adreno_is_a630(struct adreno_gpu *gpu)
{
- return gpu->revn == 630;
+ return gpu->revn == 630;
}
static inline int adreno_is_a640_family(struct adreno_gpu *gpu)
@@ -254,32 +260,38 @@ static inline int adreno_is_a640_family(struct adreno_gpu *gpu)
static inline int adreno_is_a650(struct adreno_gpu *gpu)
{
- return gpu->revn == 650;
+ return gpu->revn == 650;
}
static inline int adreno_is_7c3(struct adreno_gpu *gpu)
{
/* The order of args is important here to handle ANY_ID correctly */
- return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev);
+ return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev);
}
static inline int adreno_is_a660(struct adreno_gpu *gpu)
{
- return gpu->revn == 660;
+ return gpu->revn == 660;
+}
+
+/* check for a615, a616, a618, a619 or any derivatives */
+static inline int adreno_is_a615_family(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 615 || gpu->revn == 616 || gpu->revn == 618 || gpu->revn == 619;
}
static inline int adreno_is_a660_family(struct adreno_gpu *gpu)
{
- return adreno_is_a660(gpu) || adreno_is_7c3(gpu);
+ return adreno_is_a660(gpu) || adreno_is_7c3(gpu);
}
/* check for a650, a660, or any derivatives */
static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
{
- return gpu->revn == 650 || gpu->revn == 620 ||
- adreno_is_a660_family(gpu);
+ return gpu->revn == 650 || gpu->revn == 620 || adreno_is_a660_family(gpu);
}
+u64 adreno_private_address_space_size(struct msm_gpu *gpu);
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len);
int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index a7492dd6ed65..1d9d83d7b99e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -53,7 +53,7 @@ static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
crtc_plane_bw += pstate->plane_fetch_bw;
}
- bw_factor = kms->catalog->perf.bw_inefficiency_factor;
+ bw_factor = kms->catalog->perf->bw_inefficiency_factor;
if (bw_factor) {
crtc_plane_bw *= bw_factor;
do_div(crtc_plane_bw, 100);
@@ -90,7 +90,7 @@ static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms,
crtc_clk = max(pstate->plane_clk, crtc_clk);
}
- clk_factor = kms->catalog->perf.clk_inefficiency_factor;
+ clk_factor = kms->catalog->perf->clk_inefficiency_factor;
if (clk_factor) {
crtc_clk *= clk_factor;
do_div(crtc_clk, 100);
@@ -128,7 +128,7 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
perf->core_clk_rate = kms->perf.fix_core_clk_rate;
} else {
perf->bw_ctl = _dpu_core_perf_calc_bw(kms, crtc);
- perf->max_per_pipe_ib = kms->catalog->perf.min_dram_ib;
+ perf->max_per_pipe_ib = kms->catalog->perf->min_dram_ib;
perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state);
}
@@ -189,7 +189,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
- threshold = kms->catalog->perf.max_bw_high;
+ threshold = kms->catalog->perf->max_bw_high;
DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
@@ -413,7 +413,7 @@ static ssize_t _dpu_core_perf_mode_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
- struct dpu_perf_cfg *cfg = &perf->catalog->perf;
+ const struct dpu_perf_cfg *cfg = perf->catalog->perf;
u32 perf_mode = 0;
int ret;
@@ -468,7 +468,7 @@ static const struct file_operations dpu_core_perf_mode_fops = {
int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
{
struct dpu_core_perf *perf = &dpu_kms->perf;
- struct dpu_mdss_cfg *catalog = perf->catalog;
+ const struct dpu_mdss_cfg *catalog = perf->catalog;
struct dentry *entry;
entry = debugfs_create_dir("core_perf", parent);
@@ -480,15 +480,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
debugfs_create_u32("threshold_low", 0600, entry,
- (u32 *)&catalog->perf.max_bw_low);
+ (u32 *)&catalog->perf->max_bw_low);
debugfs_create_u32("threshold_high", 0600, entry,
- (u32 *)&catalog->perf.max_bw_high);
+ (u32 *)&catalog->perf->max_bw_high);
debugfs_create_u32("min_core_ib", 0600, entry,
- (u32 *)&catalog->perf.min_core_ib);
+ (u32 *)&catalog->perf->min_core_ib);
debugfs_create_u32("min_llcc_ib", 0600, entry,
- (u32 *)&catalog->perf.min_llcc_ib);
+ (u32 *)&catalog->perf->min_llcc_ib);
debugfs_create_u32("min_dram_ib", 0600, entry,
- (u32 *)&catalog->perf.min_dram_ib);
+ (u32 *)&catalog->perf->min_dram_ib);
debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
debugfs_create_u64("fix_core_clk_rate", 0600, entry,
@@ -517,7 +517,7 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
- struct dpu_mdss_cfg *catalog,
+ const struct dpu_mdss_cfg *catalog,
struct clk *core_clk)
{
perf->dev = dev;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index 8dfcc6db7176..e3795995e145 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -68,7 +68,7 @@ struct dpu_core_perf_tune {
struct dpu_core_perf {
struct drm_device *dev;
struct dentry *debugfs_root;
- struct dpu_mdss_cfg *catalog;
+ const struct dpu_mdss_cfg *catalog;
struct clk *core_clk;
u64 core_clk_rate;
u64 max_core_clk_rate;
@@ -119,7 +119,7 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf);
*/
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
- struct dpu_mdss_cfg *catalog,
+ const struct dpu_mdss_cfg *catalog,
struct clk *core_clk);
struct dpu_kms;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index b56f777dbd0e..781dcd3fb283 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@@ -12,8 +13,10 @@
#include <linux/bits.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_mode.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
@@ -78,6 +81,8 @@ static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
if (!strcmp(src_name, "auto") ||
!strcmp(src_name, "lm"))
return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
+ if (!strcmp(src_name, "encoder"))
+ return DPU_CRTC_CRC_SOURCE_ENCODER;
return DPU_CRTC_CRC_SOURCE_INVALID;
}
@@ -93,23 +98,54 @@ static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
return -EINVAL;
}
- if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
+ if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) {
*values_cnt = crtc_state->num_mixers;
+ } else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) {
+ struct drm_encoder *drm_enc;
+
+ *values_cnt = 0;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
+ *values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc);
+ }
return 0;
}
+static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
+{
+ struct dpu_crtc_mixer *m;
+ int i;
+
+ for (i = 0; i < crtc_state->num_mixers; ++i) {
+ m = &crtc_state->mixers[i];
+
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+ continue;
+
+ /* Calculate MISR over 1 frame */
+ m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
+ }
+}
+
+static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc)
+{
+ struct drm_encoder *drm_enc;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_setup_misr(drm_enc);
+}
+
static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
{
enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
enum dpu_crtc_crc_source current_source;
struct dpu_crtc_state *crtc_state;
struct drm_device *drm_dev = crtc->dev;
- struct dpu_crtc_mixer *m;
bool was_enabled;
bool enable = false;
- int i, ret = 0;
+ int ret = 0;
if (source < 0) {
DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
@@ -146,16 +182,12 @@ static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
crtc_state->crc_frame_skip_count = 0;
- for (i = 0; i < crtc_state->num_mixers; ++i) {
- m = &crtc_state->mixers[i];
-
- if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
- continue;
-
- /* Calculate MISR over 1 frame */
- m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
- }
-
+ if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
+ dpu_crtc_setup_lm_misr(crtc_state);
+ else if (source == DPU_CRTC_CRC_SOURCE_ENCODER)
+ dpu_crtc_setup_encoder_misr(crtc);
+ else
+ ret = -EINVAL;
cleanup:
drm_modeset_unlock(&crtc->mutex);
@@ -174,26 +206,17 @@ static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
return dpu_encoder_get_vsync_count(encoder);
}
-
-static int dpu_crtc_get_crc(struct drm_crtc *crtc)
+static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc,
+ struct dpu_crtc_state *crtc_state)
{
- struct dpu_crtc_state *crtc_state;
struct dpu_crtc_mixer *m;
u32 crcs[CRTC_DUAL_MIXERS];
- int i = 0;
int rc = 0;
-
- crtc_state = to_dpu_crtc_state(crtc->state);
+ int i;
BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
- /* Skip first 2 frames in case of "uncooked" CRCs */
- if (crtc_state->crc_frame_skip_count < 2) {
- crtc_state->crc_frame_skip_count++;
- return 0;
- }
-
for (i = 0; i < crtc_state->num_mixers; ++i) {
m = &crtc_state->mixers[i];
@@ -214,6 +237,46 @@ static int dpu_crtc_get_crc(struct drm_crtc *crtc)
drm_crtc_accurate_vblank_count(crtc), crcs);
}
+static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc)
+{
+ struct drm_encoder *drm_enc;
+ int rc, pos = 0;
+ u32 crcs[INTF_MAX];
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) {
+ rc = dpu_encoder_get_crc(drm_enc, crcs, pos);
+ if (rc < 0) {
+ if (rc != -ENODATA)
+ DRM_DEBUG_DRIVER("MISR read failed\n");
+
+ return rc;
+ }
+
+ pos += rc;
+ }
+
+ return drm_crtc_add_crc_entry(crtc, true,
+ drm_crtc_accurate_vblank_count(crtc), crcs);
+}
+
+static int dpu_crtc_get_crc(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
+
+ /* Skip first 2 frames in case of "uncooked" CRCs */
+ if (crtc_state->crc_frame_skip_count < 2) {
+ crtc_state->crc_frame_skip_count++;
+ return 0;
+ }
+
+ if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
+ return dpu_crtc_get_lm_crc(crtc, crtc_state);
+ else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER)
+ return dpu_crtc_get_encoder_crc(crtc);
+
+ return -EINVAL;
+}
+
static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
@@ -361,6 +424,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (!state)
continue;
+ if (!state->visible)
+ continue;
+
pstate = to_dpu_plane_state(state);
fb = state->fb;
@@ -1134,6 +1200,9 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
if (cnt >= DPU_STAGE_MAX * 4)
continue;
+ if (!pstate->visible)
+ continue;
+
pstates[cnt].dpu_pstate = dpu_pstate;
pstates[cnt].drm_pstate = pstate;
pstates[cnt].stage = pstate->normalized_zpos;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index b8785c394fcc..9b67645c2574 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@@ -12,7 +13,6 @@
#include <drm/drm_crtc.h>
#include "dpu_kms.h"
#include "dpu_core_perf.h"
-#include "dpu_hw_blk.h"
#define DPU_CRTC_NAME_SIZE 12
@@ -73,11 +73,13 @@ struct dpu_crtc_smmu_state_data {
* enum dpu_crtc_crc_source: CRC source
* @DPU_CRTC_CRC_SOURCE_NONE: no source set
* @DPU_CRTC_CRC_SOURCE_LAYER_MIXER: CRC in layer mixer
+ * @DPU_CRTC_CRC_SOURCE_ENCODER: CRC in encoder
* @DPU_CRTC_CRC_SOURCE_INVALID: Invalid source
*/
enum dpu_crtc_crc_source {
DPU_CRTC_CRC_SOURCE_NONE = 0,
DPU_CRTC_CRC_SOURCE_LAYER_MIXER,
+ DPU_CRTC_CRC_SOURCE_ENCODER,
DPU_CRTC_CRC_SOURCE_MAX,
DPU_CRTC_CRC_SOURCE_INVALID = -1
};
@@ -201,6 +203,8 @@ struct dpu_crtc {
* @mixers : List of active mixers
* @num_ctls : Number of ctl paths in use
* @hw_ctls : List of active ctl paths
+ * @crc_source : CRC source
+ * @crc_frame_skip_count: Number of frames skipped before getting CRC
*/
struct dpu_crtc_state {
struct drm_crtc_state base;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 52516eb20cb8..c682d4e02d1b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -225,6 +225,70 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
return dpu_enc->wide_bus_en;
}
+int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i, num_intf = 0;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->hw_intf && phys->hw_intf->ops.setup_misr
+ && phys->hw_intf->ops.collect_misr)
+ num_intf++;
+ }
+
+ return num_intf;
+}
+
+void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
+ continue;
+
+ phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1);
+ }
+}
+
+int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ int i, rc = 0, entries_added = 0;
+
+ if (!drm_enc->crtc) {
+ DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
+ continue;
+
+ rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
+ if (rc)
+ return rc;
+ entries_added++;
+ }
+
+ return entries_added;
+}
+
static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
{
struct dpu_hw_dither_cfg dither_cfg = { 0 };
@@ -541,7 +605,6 @@ static int dpu_encoder_virt_atomic_check(
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- const struct drm_display_mode *mode;
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
struct dpu_global_state *global_state;
@@ -559,7 +622,6 @@ static int dpu_encoder_virt_atomic_check(
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode;
global_state = dpu_kms_get_global_state(crtc_state->state);
if (IS_ERR(global_state))
@@ -636,7 +698,7 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
}
if (hw_mdptop->ops.setup_vsync_source &&
- disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+ disp_info->is_cmd_mode) {
for (i = 0; i < dpu_enc->num_phys_encs; i++)
vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
@@ -720,8 +782,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
priv = drm_enc->dev->dev_private;
- is_vid_mode = dpu_enc->disp_info.capabilities &
- MSM_DISPLAY_CAP_VID_MODE;
+ is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
/*
* when idle_pc is not supported, process only KICKOFF, STOP and MODESET
@@ -1050,24 +1111,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
phys->hw_pp = dpu_enc->hw_pp[i];
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
- if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
- phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
-
- if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
- phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
-
- if (!phys->hw_intf && !phys->hw_wb) {
- DPU_ERROR_ENC(dpu_enc,
- "no intf or wb block assigned at idx: %d\n", i);
- return;
- }
-
- if (phys->hw_intf && phys->hw_wb) {
- DPU_ERROR_ENC(dpu_enc,
- "invalid phys both intf and wb block at idx: %d\n", i);
- return;
- }
-
phys->cached_mode = crtc_state->adjusted_mode;
if (phys->ops.atomic_mode_set)
phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
@@ -1207,37 +1250,37 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
mutex_unlock(&dpu_enc->enc_lock);
}
-static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
+static enum dpu_intf dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
enum dpu_intf_type type, u32 controller_id)
{
int i = 0;
- if (type != INTF_WB) {
- for (i = 0; i < catalog->intf_count; i++) {
- if (catalog->intf[i].type == type
- && catalog->intf[i].controller_id == controller_id) {
- return catalog->intf[i].id;
- }
+ if (type == INTF_WB)
+ return INTF_MAX;
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
}
}
return INTF_MAX;
}
-static enum dpu_wb dpu_encoder_get_wb(struct dpu_mdss_cfg *catalog,
+static enum dpu_wb dpu_encoder_get_wb(const struct dpu_mdss_cfg *catalog,
enum dpu_intf_type type, u32 controller_id)
{
int i = 0;
if (type != INTF_WB)
- goto end;
+ return WB_MAX;
for (i = 0; i < catalog->wb_count; i++) {
if (catalog->wb[i].id == controller_id)
return catalog->wb[i].id;
}
-end:
return WB_MAX;
}
@@ -1253,12 +1296,13 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_BEGIN("encoder_vblank_callback");
dpu_enc = to_dpu_encoder_virt(drm_enc);
+ atomic_inc(&phy_enc->vsync_cnt);
+
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
if (dpu_enc->crtc)
dpu_crtc_vblank_callback(dpu_enc->crtc);
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
- atomic_inc(&phy_enc->vsync_cnt);
DPU_ATRACE_END("encoder_vblank_callback");
}
@@ -1604,7 +1648,7 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
/* update only for command mode primary ctl */
if ((phys == dpu_enc->cur_master) &&
- (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+ disp_info->is_cmd_mode
&& ctl->ops.trigger_pending)
ctl->ops.trigger_pending(ctl);
}
@@ -1814,7 +1858,6 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
}
}
- dsc_common_mode = 0;
pic_width = dsc->drm->pic_width;
dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
@@ -2141,39 +2184,36 @@ static int dpu_encoder_virt_add_phys_encs(
return -EINVAL;
}
- if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE) {
- enc = dpu_encoder_phys_vid_init(params);
- if (IS_ERR_OR_NULL(enc)) {
- DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+ if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
+ enc = dpu_encoder_phys_wb_init(params);
+
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
PTR_ERR(enc));
- return enc == NULL ? -EINVAL : PTR_ERR(enc);
+ return PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
- }
-
- if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+ } else if (disp_info->is_cmd_mode) {
enc = dpu_encoder_phys_cmd_init(params);
- if (IS_ERR_OR_NULL(enc)) {
+ if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
PTR_ERR(enc));
- return enc == NULL ? -EINVAL : PTR_ERR(enc);
+ return PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
- }
-
- if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
- enc = dpu_encoder_phys_wb_init(params);
+ } else {
+ enc = dpu_encoder_phys_vid_init(params);
- if (IS_ERR_OR_NULL(enc)) {
- DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
- PTR_ERR(enc));
- return enc == NULL ? -EINVAL : PTR_ERR(enc);
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+ PTR_ERR(enc));
+ return PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
@@ -2232,8 +2272,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
- if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
- (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
+ if (disp_info->intf_type != DRM_MODE_ENCODER_VIRTUAL)
dpu_enc->idle_pc_supported =
dpu_kms->catalog->caps->has_idle_pc;
@@ -2296,7 +2335,25 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
atomic_set(&phys->vsync_cnt, 0);
atomic_set(&phys->underrun_cnt, 0);
+
+ if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
+ phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
+
+ if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
+ phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
+
+ if (!phys->hw_intf && !phys->hw_wb) {
+ DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
+ ret = -EINVAL;
+ }
+
+ if (phys->hw_intf && phys->hw_wb) {
+ DPU_ERROR_ENC(dpu_enc,
+ "invalid phys both intf and wb block at idx: %d\n", i);
+ ret = -EINVAL;
+ }
}
+
mutex_unlock(&dpu_enc->enc_lock);
return ret;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 781d41c91994..d4d1ecd416e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@@ -21,19 +22,19 @@
/**
* struct msm_display_info - defines display properties
* @intf_type: DRM_MODE_ENCODER_ type
- * @capabilities: Bitmask of display flags
* @num_of_h_tiles: Number of horizontal tiles in case of split interface
* @h_tile_instance: Controller instance used per tile. Number of elements is
* based on num_of_h_tiles
+ * @is_cmd_mode Boolean to indicate if the CMD mode is requested
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
* @dsc: DSC configuration data for DSC-enabled displays
*/
struct msm_display_info {
int intf_type;
- uint32_t capabilities;
uint32_t num_of_h_tiles;
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+ bool is_cmd_mode;
bool is_te_using_watchdog_timer;
struct msm_display_dsc_config *dsc;
};
@@ -175,6 +176,27 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
/**
+ * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
+ * in virtual encoder that can collect CRC values
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * Returns: Number of physical encoders for given drm encoder
+ */
+int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_setup_misr - enable misr calculations
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_setup_misr(const struct drm_encoder *drm_encoder);
+
+/**
+ * dpu_encoder_get_crc - get the crc value from interface blocks
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * Returns: 0 on success, error otherwise
+ */
+int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos);
+
+/**
* dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology.
* @drm_enc: Pointer to previously created drm encoder structure
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 4829d1ce0cf8..7cbcef6efe17 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -7,12 +7,13 @@
#include <linux/debugfs.h>
+#include <drm/drm_framebuffer.h>
+
#include "dpu_encoder_phys.h"
#include "dpu_formats.h"
#include "dpu_hw_top.h"
#include "dpu_hw_wb.h"
#include "dpu_hw_lm.h"
-#include "dpu_hw_blk.h"
#include "dpu_hw_merge3d.h"
#include "dpu_hw_interrupts.h"
#include "dpu_core_irq.h"
@@ -20,8 +21,6 @@
#include "dpu_crtc.h"
#include "disp/msm_disp_snapshot.h"
-#define DEFAULT_MAX_WRITEBACK_WIDTH 2048
-
#define to_dpu_encoder_phys_wb(x) \
container_of(x, struct dpu_encoder_phys_wb, base)
@@ -103,8 +102,8 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_hw_wb_qos_cfg qos_cfg;
- struct dpu_mdss_cfg *catalog;
- struct dpu_qos_lut_tbl *qos_lut_tb;
+ const struct dpu_mdss_cfg *catalog;
+ const struct dpu_qos_lut_tbl *qos_lut_tb;
if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
DPU_ERROR("invalid parameter(s)\n");
@@ -118,11 +117,11 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
memset(&qos_cfg, 0, sizeof(struct dpu_hw_wb_qos_cfg));
qos_cfg.danger_safe_en = true;
qos_cfg.danger_lut =
- catalog->perf.danger_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+ catalog->perf->danger_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
- qos_cfg.safe_lut = catalog->perf.safe_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+ qos_cfg.safe_lut = catalog->perf->safe_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
- qos_lut_tb = &catalog->perf.qos_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+ qos_lut_tb = &catalog->perf->qos_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
qos_cfg.creq_lut = _dpu_hw_get_qos_lut(qos_lut_tb, 0);
if (hw_wb->ops.setup_qos_lut)
@@ -166,7 +165,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
if (hw_wb->ops.setup_cdp) {
memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
- cdp_cfg.enable = phys_enc->dpu_kms->catalog->perf.cdp_cfg
+ cdp_cfg.enable = phys_enc->dpu_kms->catalog->perf->cdp_cfg
[DPU_PERF_CDP_USAGE_NRT].wr_enable;
cdp_cfg.ubwc_meta_enable =
DPU_FORMAT_IS_UBWC(wb_cfg->dest.format);
@@ -252,11 +251,6 @@ static int dpu_encoder_phys_wb_atomic_check(
DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay);
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
- return 0;
-
- fb = conn_state->writeback_job->fb;
-
if (!conn_state || !conn_state->connector) {
DPU_ERROR("invalid connector state\n");
return -EINVAL;
@@ -267,6 +261,11 @@ static int dpu_encoder_phys_wb_atomic_check(
return -EINVAL;
}
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+
DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
fb->width, fb->height);
@@ -278,9 +277,9 @@ static int dpu_encoder_phys_wb_atomic_check(
DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
mode->vdisplay);
return -EINVAL;
- } else if (fb->width > DEFAULT_MAX_WRITEBACK_WIDTH) {
+ } else if (fb->width > phys_enc->hw_wb->caps->maxlinewidth) {
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
- fb->width, DEFAULT_MAX_WRITEBACK_WIDTH);
+ fb->width, phys_enc->hw_wb->caps->maxlinewidth);
return -EINVAL;
}
@@ -574,11 +573,11 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
*/
static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
{
- DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
-
if (!phys_enc)
return;
+ DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
+
kfree(phys_enc);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 440ae93d7bd1..f436a1f3419d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <uapi/drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include "msm_media_info.h"
#include "dpu_kms.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
deleted file mode 100644
index 52e92f37eda4..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DPU_HW_BLK_H
-#define _DPU_HW_BLK_H
-
-#include <linux/types.h>
-#include <linux/list.h>
-
-struct dpu_hw_blk;
-
-
-/**
- * struct dpu_hw_blk - definition of hardware block object
- * @list: list of hardware blocks
- * @type: hardware block type
- * @id: instance id
- * @refcount: reference/usage count
- */
-struct dpu_hw_blk {
- /* opaque */
-};
-
-#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 400ebceb56bb..0239a811d5ec 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -50,11 +50,14 @@
#define DMA_CURSOR_MSM8998_MASK \
(DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
-#define MIXER_SDM845_MASK \
+#define MIXER_MSM8998_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+#define MIXER_SDM845_MASK \
+ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
+
#define MIXER_SC7180_MASK \
- (BIT(DPU_DIM_LAYER))
+ (BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
@@ -936,17 +939,17 @@ static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
};
static const struct dpu_lm_cfg msm8998_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_MSM8998_MASK,
&msm8998_lm_sblk, PINGPONG_0, LM_2, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_MSM8998_MASK,
&msm8998_lm_sblk, PINGPONG_1, LM_5, DSPP_1),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_MSM8998_MASK,
&msm8998_lm_sblk, PINGPONG_2, LM_0, 0),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
+ LM_BLK("lm_3", LM_3, 0x47000, MIXER_MSM8998_MASK,
&msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
- LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
+ LM_BLK("lm_4", LM_4, 0x48000, MIXER_MSM8998_MASK,
&msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
- LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
+ LM_BLK("lm_5", LM_5, 0x49000, MIXER_MSM8998_MASK,
&msm8998_lm_sblk, PINGPONG_3, LM_1, 0),
};
@@ -1012,7 +1015,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
static const struct dpu_lm_cfg sc7280_lm[] = {
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
- &sc7180_lm_sblk, PINGPONG_0, 0, 0),
+ &sc7180_lm_sblk, PINGPONG_0, 0, DSPP_0),
LM_BLK("lm_2", LM_2, 0x46000, MIXER_SC7180_MASK,
&sc7180_lm_sblk, PINGPONG_2, LM_3, 0),
LM_BLK("lm_3", LM_3, 0x47000, MIXER_SC7180_MASK,
@@ -1285,7 +1288,7 @@ static const struct dpu_intf_cfg qcm2290_intf[] = {
* Writeback blocks config
*************************************************************/
#define WB_BLK(_name, _id, _base, _features, _clk_ctrl, \
- __xin_id, vbif_id, _reg, _wb_done_bit) \
+ __xin_id, vbif_id, _reg, _max_linewidth, _wb_done_bit) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x2c8, \
@@ -1295,13 +1298,13 @@ static const struct dpu_intf_cfg qcm2290_intf[] = {
.clk_ctrl = _clk_ctrl, \
.xin_id = __xin_id, \
.vbif_idx = vbif_id, \
- .maxlinewidth = DEFAULT_DPU_LINE_WIDTH, \
+ .maxlinewidth = _max_linewidth, \
.intr_wb_done = DPU_IRQ_IDX(_reg, _wb_done_bit) \
}
static const struct dpu_wb_cfg sm8250_wb[] = {
WB_BLK("wb_2", WB_2, 0x65000, WB_SM8250_MASK, DPU_CLK_CTRL_WB2, 6,
- VBIF_RT, MDP_SSPP_TOP0_INTR, 4),
+ VBIF_RT, MDP_SSPP_TOP0_INTR, 4096, 4),
};
/*************************************************************
@@ -1336,6 +1339,7 @@ static const struct dpu_vbif_cfg msm8998_vbif[] = {
.default_ot_wr_limit = 32,
.features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM),
.xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x20,
.dynamic_ot_rd_tbl = {
.count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
.cfg = msm8998_ot_rdwr_cfg,
@@ -1363,6 +1367,7 @@ static const struct dpu_vbif_cfg sdm845_vbif[] = {
.base = 0, .len = 0x1040,
.features = BIT(DPU_VBIF_QOS_REMAP),
.xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x40,
.qos_rt_tbl = {
.npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
.priority_lvl = sdm845_rt_pri_lvl,
@@ -1717,275 +1722,221 @@ static const struct dpu_perf_cfg qcm2290_perf_data = {
.bw_inefficiency_factor = 120,
};
/*************************************************************
- * Hardware catalog init
+ * Hardware catalog
*************************************************************/
-/*
- * msm8998_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
- * and instance counts.
- */
-static void msm8998_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
-{
- *dpu_cfg = (struct dpu_mdss_cfg){
- .caps = &msm8998_dpu_caps,
- .mdp_count = ARRAY_SIZE(msm8998_mdp),
- .mdp = msm8998_mdp,
- .ctl_count = ARRAY_SIZE(msm8998_ctl),
- .ctl = msm8998_ctl,
- .sspp_count = ARRAY_SIZE(msm8998_sspp),
- .sspp = msm8998_sspp,
- .mixer_count = ARRAY_SIZE(msm8998_lm),
- .mixer = msm8998_lm,
- .dspp_count = ARRAY_SIZE(msm8998_dspp),
- .dspp = msm8998_dspp,
- .pingpong_count = ARRAY_SIZE(sdm845_pp),
- .pingpong = sdm845_pp,
- .intf_count = ARRAY_SIZE(msm8998_intf),
- .intf = msm8998_intf,
- .vbif_count = ARRAY_SIZE(msm8998_vbif),
- .vbif = msm8998_vbif,
- .reg_dma_count = 0,
- .perf = msm8998_perf_data,
- .mdss_irqs = IRQ_SM8250_MASK,
- };
-}
-
-/*
- * sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
- * and instance counts.
- */
-static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
-{
- *dpu_cfg = (struct dpu_mdss_cfg){
- .caps = &sdm845_dpu_caps,
- .mdp_count = ARRAY_SIZE(sdm845_mdp),
- .mdp = sdm845_mdp,
- .ctl_count = ARRAY_SIZE(sdm845_ctl),
- .ctl = sdm845_ctl,
- .sspp_count = ARRAY_SIZE(sdm845_sspp),
- .sspp = sdm845_sspp,
- .mixer_count = ARRAY_SIZE(sdm845_lm),
- .mixer = sdm845_lm,
- .pingpong_count = ARRAY_SIZE(sdm845_pp),
- .pingpong = sdm845_pp,
- .dsc_count = ARRAY_SIZE(sdm845_dsc),
- .dsc = sdm845_dsc,
- .intf_count = ARRAY_SIZE(sdm845_intf),
- .intf = sdm845_intf,
- .vbif_count = ARRAY_SIZE(sdm845_vbif),
- .vbif = sdm845_vbif,
- .reg_dma_count = 1,
- .dma_cfg = sdm845_regdma,
- .perf = sdm845_perf_data,
- .mdss_irqs = IRQ_SDM845_MASK,
- };
-}
-
-/*
- * sc7180_cfg_init(): populate sc7180 dpu sub-blocks reg offsets
- * and instance counts.
- */
-static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
-{
- *dpu_cfg = (struct dpu_mdss_cfg){
- .caps = &sc7180_dpu_caps,
- .mdp_count = ARRAY_SIZE(sc7180_mdp),
- .mdp = sc7180_mdp,
- .ctl_count = ARRAY_SIZE(sc7180_ctl),
- .ctl = sc7180_ctl,
- .sspp_count = ARRAY_SIZE(sc7180_sspp),
- .sspp = sc7180_sspp,
- .mixer_count = ARRAY_SIZE(sc7180_lm),
- .mixer = sc7180_lm,
- .dspp_count = ARRAY_SIZE(sc7180_dspp),
- .dspp = sc7180_dspp,
- .pingpong_count = ARRAY_SIZE(sc7180_pp),
- .pingpong = sc7180_pp,
- .intf_count = ARRAY_SIZE(sc7180_intf),
- .intf = sc7180_intf,
- .vbif_count = ARRAY_SIZE(sdm845_vbif),
- .vbif = sdm845_vbif,
- .reg_dma_count = 1,
- .dma_cfg = sdm845_regdma,
- .perf = sc7180_perf_data,
- .mdss_irqs = IRQ_SC7180_MASK,
- };
-}
-
-/*
- * sm8150_cfg_init(): populate sm8150 dpu sub-blocks reg offsets
- * and instance counts.
- */
-static void sm8150_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
-{
- *dpu_cfg = (struct dpu_mdss_cfg){
- .caps = &sm8150_dpu_caps,
- .mdp_count = ARRAY_SIZE(sdm845_mdp),
- .mdp = sdm845_mdp,
- .ctl_count = ARRAY_SIZE(sm8150_ctl),
- .ctl = sm8150_ctl,
- .sspp_count = ARRAY_SIZE(sdm845_sspp),
- .sspp = sdm845_sspp,
- .mixer_count = ARRAY_SIZE(sm8150_lm),
- .mixer = sm8150_lm,
- .dspp_count = ARRAY_SIZE(sm8150_dspp),
- .dspp = sm8150_dspp,
- .pingpong_count = ARRAY_SIZE(sm8150_pp),
- .pingpong = sm8150_pp,
- .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
- .merge_3d = sm8150_merge_3d,
- .intf_count = ARRAY_SIZE(sm8150_intf),
- .intf = sm8150_intf,
- .vbif_count = ARRAY_SIZE(sdm845_vbif),
- .vbif = sdm845_vbif,
- .reg_dma_count = 1,
- .dma_cfg = sm8150_regdma,
- .perf = sm8150_perf_data,
- .mdss_irqs = IRQ_SDM845_MASK,
- };
-}
-
-/*
- * sc8180x_cfg_init(): populate sc8180 dpu sub-blocks reg offsets
- * and instance counts.
- */
-static void sc8180x_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
-{
- *dpu_cfg = (struct dpu_mdss_cfg){
- .caps = &sc8180x_dpu_caps,
- .mdp_count = ARRAY_SIZE(sc8180x_mdp),
- .mdp = sc8180x_mdp,
- .ctl_count = ARRAY_SIZE(sm8150_ctl),
- .ctl = sm8150_ctl,
- .sspp_count = ARRAY_SIZE(sdm845_sspp),
- .sspp = sdm845_sspp,
- .mixer_count = ARRAY_SIZE(sm8150_lm),
- .mixer = sm8150_lm,
- .pingpong_count = ARRAY_SIZE(sm8150_pp),
- .pingpong = sm8150_pp,
- .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
- .merge_3d = sm8150_merge_3d,
- .intf_count = ARRAY_SIZE(sc8180x_intf),
- .intf = sc8180x_intf,
- .vbif_count = ARRAY_SIZE(sdm845_vbif),
- .vbif = sdm845_vbif,
- .reg_dma_count = 1,
- .dma_cfg = sm8150_regdma,
- .perf = sc8180x_perf_data,
- .mdss_irqs = IRQ_SC8180X_MASK,
- };
-}
-
-/*
- * sm8250_cfg_init(): populate sm8250 dpu sub-blocks reg offsets
- * and instance counts.
- */
-static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
-{
- *dpu_cfg = (struct dpu_mdss_cfg){
- .caps = &sm8250_dpu_caps,
- .mdp_count = ARRAY_SIZE(sm8250_mdp),
- .mdp = sm8250_mdp,
- .ctl_count = ARRAY_SIZE(sm8150_ctl),
- .ctl = sm8150_ctl,
- .sspp_count = ARRAY_SIZE(sm8250_sspp),
- .sspp = sm8250_sspp,
- .mixer_count = ARRAY_SIZE(sm8150_lm),
- .mixer = sm8150_lm,
- .dspp_count = ARRAY_SIZE(sm8150_dspp),
- .dspp = sm8150_dspp,
- .pingpong_count = ARRAY_SIZE(sm8150_pp),
- .pingpong = sm8150_pp,
- .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
- .merge_3d = sm8150_merge_3d,
- .intf_count = ARRAY_SIZE(sm8150_intf),
- .intf = sm8150_intf,
- .vbif_count = ARRAY_SIZE(sdm845_vbif),
- .vbif = sdm845_vbif,
- .wb_count = ARRAY_SIZE(sm8250_wb),
- .wb = sm8250_wb,
- .reg_dma_count = 1,
- .dma_cfg = sm8250_regdma,
- .perf = sm8250_perf_data,
- .mdss_irqs = IRQ_SM8250_MASK,
- };
-}
-
-static void sc7280_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
-{
- *dpu_cfg = (struct dpu_mdss_cfg){
- .caps = &sc7280_dpu_caps,
- .mdp_count = ARRAY_SIZE(sc7280_mdp),
- .mdp = sc7280_mdp,
- .ctl_count = ARRAY_SIZE(sc7280_ctl),
- .ctl = sc7280_ctl,
- .sspp_count = ARRAY_SIZE(sc7280_sspp),
- .sspp = sc7280_sspp,
- .mixer_count = ARRAY_SIZE(sc7280_lm),
- .mixer = sc7280_lm,
- .pingpong_count = ARRAY_SIZE(sc7280_pp),
- .pingpong = sc7280_pp,
- .intf_count = ARRAY_SIZE(sc7280_intf),
- .intf = sc7280_intf,
- .vbif_count = ARRAY_SIZE(sdm845_vbif),
- .vbif = sdm845_vbif,
- .perf = sc7280_perf_data,
- .mdss_irqs = IRQ_SC7280_MASK,
- };
-}
-
-
-/*
- * qcm2290_cfg_init(): populate qcm2290 dpu sub-blocks reg offsets
- * and instance counts.
- */
-static void qcm2290_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
-{
- *dpu_cfg = (struct dpu_mdss_cfg){
- .caps = &qcm2290_dpu_caps,
- .mdp_count = ARRAY_SIZE(qcm2290_mdp),
- .mdp = qcm2290_mdp,
- .ctl_count = ARRAY_SIZE(qcm2290_ctl),
- .ctl = qcm2290_ctl,
- .sspp_count = ARRAY_SIZE(qcm2290_sspp),
- .sspp = qcm2290_sspp,
- .mixer_count = ARRAY_SIZE(qcm2290_lm),
- .mixer = qcm2290_lm,
- .dspp_count = ARRAY_SIZE(qcm2290_dspp),
- .dspp = qcm2290_dspp,
- .pingpong_count = ARRAY_SIZE(qcm2290_pp),
- .pingpong = qcm2290_pp,
- .intf_count = ARRAY_SIZE(qcm2290_intf),
- .intf = qcm2290_intf,
- .vbif_count = ARRAY_SIZE(sdm845_vbif),
- .vbif = sdm845_vbif,
- .reg_dma_count = 1,
- .dma_cfg = sdm845_regdma,
- .perf = qcm2290_perf_data,
- .mdss_irqs = IRQ_SC7180_MASK,
- };
-}
+static const struct dpu_mdss_cfg msm8998_dpu_cfg = {
+ .caps = &msm8998_dpu_caps,
+ .mdp_count = ARRAY_SIZE(msm8998_mdp),
+ .mdp = msm8998_mdp,
+ .ctl_count = ARRAY_SIZE(msm8998_ctl),
+ .ctl = msm8998_ctl,
+ .sspp_count = ARRAY_SIZE(msm8998_sspp),
+ .sspp = msm8998_sspp,
+ .mixer_count = ARRAY_SIZE(msm8998_lm),
+ .mixer = msm8998_lm,
+ .dspp_count = ARRAY_SIZE(msm8998_dspp),
+ .dspp = msm8998_dspp,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .intf_count = ARRAY_SIZE(msm8998_intf),
+ .intf = msm8998_intf,
+ .vbif_count = ARRAY_SIZE(msm8998_vbif),
+ .vbif = msm8998_vbif,
+ .reg_dma_count = 0,
+ .perf = &msm8998_perf_data,
+ .mdss_irqs = IRQ_SM8250_MASK,
+};
+
+static const struct dpu_mdss_cfg sdm845_dpu_cfg = {
+ .caps = &sdm845_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sdm845_mdp),
+ .mdp = sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .dsc_count = ARRAY_SIZE(sdm845_dsc),
+ .dsc = sdm845_dsc,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sdm845_regdma,
+ .perf = &sdm845_perf_data,
+ .mdss_irqs = IRQ_SDM845_MASK,
+};
+
+static const struct dpu_mdss_cfg sc7180_dpu_cfg = {
+ .caps = &sc7180_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sc7180_mdp),
+ .mdp = sc7180_mdp,
+ .ctl_count = ARRAY_SIZE(sc7180_ctl),
+ .ctl = sc7180_ctl,
+ .sspp_count = ARRAY_SIZE(sc7180_sspp),
+ .sspp = sc7180_sspp,
+ .mixer_count = ARRAY_SIZE(sc7180_lm),
+ .mixer = sc7180_lm,
+ .dspp_count = ARRAY_SIZE(sc7180_dspp),
+ .dspp = sc7180_dspp,
+ .pingpong_count = ARRAY_SIZE(sc7180_pp),
+ .pingpong = sc7180_pp,
+ .intf_count = ARRAY_SIZE(sc7180_intf),
+ .intf = sc7180_intf,
+ .wb_count = ARRAY_SIZE(sm8250_wb),
+ .wb = sm8250_wb,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sdm845_regdma,
+ .perf = &sc7180_perf_data,
+ .mdss_irqs = IRQ_SC7180_MASK,
+};
+
+static const struct dpu_mdss_cfg sm8150_dpu_cfg = {
+ .caps = &sm8150_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sdm845_mdp),
+ .mdp = sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sm8150_ctl),
+ .ctl = sm8150_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .dspp_count = ARRAY_SIZE(sm8150_dspp),
+ .dspp = sm8150_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8150_pp),
+ .pingpong = sm8150_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
+ .merge_3d = sm8150_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8150_intf),
+ .intf = sm8150_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sm8150_regdma,
+ .perf = &sm8150_perf_data,
+ .mdss_irqs = IRQ_SDM845_MASK,
+};
+
+static const struct dpu_mdss_cfg sc8180x_dpu_cfg = {
+ .caps = &sc8180x_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sc8180x_mdp),
+ .mdp = sc8180x_mdp,
+ .ctl_count = ARRAY_SIZE(sm8150_ctl),
+ .ctl = sm8150_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .pingpong_count = ARRAY_SIZE(sm8150_pp),
+ .pingpong = sm8150_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
+ .merge_3d = sm8150_merge_3d,
+ .intf_count = ARRAY_SIZE(sc8180x_intf),
+ .intf = sc8180x_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sm8150_regdma,
+ .perf = &sc8180x_perf_data,
+ .mdss_irqs = IRQ_SC8180X_MASK,
+};
+
+static const struct dpu_mdss_cfg sm8250_dpu_cfg = {
+ .caps = &sm8250_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sm8250_mdp),
+ .mdp = sm8250_mdp,
+ .ctl_count = ARRAY_SIZE(sm8150_ctl),
+ .ctl = sm8150_ctl,
+ .sspp_count = ARRAY_SIZE(sm8250_sspp),
+ .sspp = sm8250_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .dspp_count = ARRAY_SIZE(sm8150_dspp),
+ .dspp = sm8150_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8150_pp),
+ .pingpong = sm8150_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
+ .merge_3d = sm8150_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8150_intf),
+ .intf = sm8150_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .wb_count = ARRAY_SIZE(sm8250_wb),
+ .wb = sm8250_wb,
+ .reg_dma_count = 1,
+ .dma_cfg = &sm8250_regdma,
+ .perf = &sm8250_perf_data,
+ .mdss_irqs = IRQ_SM8250_MASK,
+};
+
+static const struct dpu_mdss_cfg sc7280_dpu_cfg = {
+ .caps = &sc7280_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sc7280_mdp),
+ .mdp = sc7280_mdp,
+ .ctl_count = ARRAY_SIZE(sc7280_ctl),
+ .ctl = sc7280_ctl,
+ .sspp_count = ARRAY_SIZE(sc7280_sspp),
+ .sspp = sc7280_sspp,
+ .dspp_count = ARRAY_SIZE(sc7180_dspp),
+ .dspp = sc7180_dspp,
+ .mixer_count = ARRAY_SIZE(sc7280_lm),
+ .mixer = sc7280_lm,
+ .pingpong_count = ARRAY_SIZE(sc7280_pp),
+ .pingpong = sc7280_pp,
+ .intf_count = ARRAY_SIZE(sc7280_intf),
+ .intf = sc7280_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sc7280_perf_data,
+ .mdss_irqs = IRQ_SC7280_MASK,
+};
+
+static const struct dpu_mdss_cfg qcm2290_dpu_cfg = {
+ .caps = &qcm2290_dpu_caps,
+ .mdp_count = ARRAY_SIZE(qcm2290_mdp),
+ .mdp = qcm2290_mdp,
+ .ctl_count = ARRAY_SIZE(qcm2290_ctl),
+ .ctl = qcm2290_ctl,
+ .sspp_count = ARRAY_SIZE(qcm2290_sspp),
+ .sspp = qcm2290_sspp,
+ .mixer_count = ARRAY_SIZE(qcm2290_lm),
+ .mixer = qcm2290_lm,
+ .dspp_count = ARRAY_SIZE(qcm2290_dspp),
+ .dspp = qcm2290_dspp,
+ .pingpong_count = ARRAY_SIZE(qcm2290_pp),
+ .pingpong = qcm2290_pp,
+ .intf_count = ARRAY_SIZE(qcm2290_intf),
+ .intf = qcm2290_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sdm845_regdma,
+ .perf = &qcm2290_perf_data,
+ .mdss_irqs = IRQ_SC7180_MASK,
+};
static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
- { .hw_rev = DPU_HW_VER_300, .cfg_init = msm8998_cfg_init},
- { .hw_rev = DPU_HW_VER_301, .cfg_init = msm8998_cfg_init},
- { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
- { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
- { .hw_rev = DPU_HW_VER_500, .cfg_init = sm8150_cfg_init},
- { .hw_rev = DPU_HW_VER_501, .cfg_init = sm8150_cfg_init},
- { .hw_rev = DPU_HW_VER_510, .cfg_init = sc8180x_cfg_init},
- { .hw_rev = DPU_HW_VER_600, .cfg_init = sm8250_cfg_init},
- { .hw_rev = DPU_HW_VER_620, .cfg_init = sc7180_cfg_init},
- { .hw_rev = DPU_HW_VER_650, .cfg_init = qcm2290_cfg_init},
- { .hw_rev = DPU_HW_VER_720, .cfg_init = sc7280_cfg_init},
-};
-
-void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
-{
- kfree(dpu_cfg);
-}
-
-struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+ { .hw_rev = DPU_HW_VER_300, .dpu_cfg = &msm8998_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_301, .dpu_cfg = &msm8998_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_400, .dpu_cfg = &sdm845_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_401, .dpu_cfg = &sdm845_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_500, .dpu_cfg = &sm8150_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_501, .dpu_cfg = &sm8150_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_510, .dpu_cfg = &sc8180x_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_600, .dpu_cfg = &sm8250_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_620, .dpu_cfg = &sc7180_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_650, .dpu_cfg = &qcm2290_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_720, .dpu_cfg = &sc7280_dpu_cfg},
+};
+
+const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
{
int i;
struct dpu_mdss_cfg *dpu_cfg;
@@ -1995,15 +1946,12 @@ struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
- if (cfg_handler[i].hw_rev == hw_rev) {
- cfg_handler[i].cfg_init(dpu_cfg);
- dpu_cfg->hwversion = hw_rev;
- return dpu_cfg;
- }
+ if (cfg_handler[i].hw_rev == hw_rev)
+ return cfg_handler[i].dpu_cfg;
}
DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
- dpu_hw_catalog_deinit(dpu_cfg);
+
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 8cb6d1f25bf9..71fe4c505f5b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -145,6 +145,7 @@ enum {
* @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
* @DPU_MIXER_GC Gamma correction block
* @DPU_DIM_LAYER Layer mixer supports dim layer
+ * @DPU_MIXER_COMBINED_ALPHA Layer mixer has combined alpha register
* @DPU_MIXER_MAX maximum value
*/
enum {
@@ -152,6 +153,7 @@ enum {
DPU_MIXER_SOURCESPLIT,
DPU_MIXER_GC,
DPU_DIM_LAYER,
+ DPU_MIXER_COMBINED_ALPHA,
DPU_MIXER_MAX
};
@@ -707,6 +709,7 @@ struct dpu_vbif_qos_tbl {
* @ot_rd_limit default OT read limit
* @ot_wr_limit default OT write limit
* @xin_halt_timeout maximum time (in usec) for xin to halt
+ * @qos_rp_remap_size size of VBIF_XINL_QOS_RP_REMAP register space
* @dynamic_ot_rd_tbl dynamic OT read configuration table
* @dynamic_ot_wr_tbl dynamic OT write configuration table
* @qos_rt_tbl real-time QoS priority table
@@ -719,6 +722,7 @@ struct dpu_vbif_cfg {
u32 default_ot_rd_limit;
u32 default_ot_wr_limit;
u32 xin_halt_timeout;
+ u32 qos_rp_remap_size;
struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
struct dpu_vbif_qos_tbl qos_rt_tbl;
@@ -822,8 +826,6 @@ struct dpu_perf_cfg {
* @mdss_irqs: Bitmap with the irqs supported by the target
*/
struct dpu_mdss_cfg {
- u32 hwversion;
-
const struct dpu_caps *caps;
u32 mdp_count;
@@ -857,7 +859,7 @@ struct dpu_mdss_cfg {
const struct dpu_wb_cfg *wb;
u32 reg_dma_count;
- struct dpu_reg_dma_cfg dma_cfg;
+ const struct dpu_reg_dma_cfg *dma_cfg;
u32 ad_count;
@@ -866,7 +868,7 @@ struct dpu_mdss_cfg {
/* Add additional block data structures here */
- struct dpu_perf_cfg perf;
+ const struct dpu_perf_cfg *perf;
const struct dpu_format_extended *dma_formats;
const struct dpu_format_extended *cursor_formats;
const struct dpu_format_extended *vig_formats;
@@ -876,7 +878,7 @@ struct dpu_mdss_cfg {
struct dpu_mdss_hw_cfg_handler {
u32 hw_rev;
- void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
+ const struct dpu_mdss_cfg *dpu_cfg;
};
/**
@@ -886,12 +888,6 @@ struct dpu_mdss_hw_cfg_handler {
*
* Return: dpu config structure
*/
-struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
-
-/**
- * dpu_hw_catalog_deinit - dpu hardware catalog cleanup
- * @dpu_cfg: pointer returned from init function
- */
-void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
+const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index c33e7ef611a6..e12b7fa48a7b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -58,10 +58,7 @@ static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
for (i = 0; i < m->ctl_count; i++) {
if (ctl == m->ctl[i].id) {
- b->base_off = addr;
- b->blk_off = m->ctl[i].base;
- b->length = m->ctl[i].len;
- b->hwversion = m->hwversion;
+ b->blk_addr = addr + m->ctl[i].base;
b->log_mask = DPU_DBG_MASK_CTL;
return &m->ctl[i];
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 5755307089b5..7d9ad6a3f9f6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -10,7 +10,6 @@
#include "dpu_hw_util.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_sspp.h"
-#include "dpu_hw_blk.h"
/**
* dpu_ctl_mode_sel: Interface mode selection
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 4ad8991fc7d9..411689ae6382 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -158,7 +158,7 @@ static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
}
static struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc,
- struct dpu_mdss_cfg *m,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -166,10 +166,7 @@ static struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc,
for (i = 0; i < m->dsc_count; i++) {
if (dsc == m->dsc[i].id) {
- b->base_off = addr;
- b->blk_off = m->dsc[i].base;
- b->length = m->dsc[i].len;
- b->hwversion = m->hwversion;
+ b->blk_addr = addr + m->dsc[i].base;
b->log_mask = DPU_DBG_MASK_DSC;
return &m->dsc[i];
}
@@ -187,7 +184,7 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
};
struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_dsc *c;
struct dpu_dsc_cfg *cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index b39ee4ed32f7..45e4118f1fa2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -64,7 +64,7 @@ struct dpu_hw_dsc {
* Returns: Error code or allocated dpu_hw_dsc context
*/
struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_dsc_destroy - destroys dsc driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index 355894a3b48c..8ab5ace34a2d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -80,10 +80,7 @@ static const struct dpu_dspp_cfg *_dspp_offset(enum dpu_dspp dspp,
for (i = 0; i < m->dspp_count; i++) {
if (dspp == m->dspp[i].id) {
- b->base_off = addr;
- b->blk_off = m->dspp[i].base;
- b->length = m->dspp[i].len;
- b->hwversion = m->hwversion;
+ b->blk_addr = addr + m->dspp[i].base;
b->log_mask = DPU_DBG_MASK_DSPP;
return &m->dspp[i];
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
index 7fa189cfcb06..05ecfdfac93b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -5,8 +5,6 @@
#ifndef _DPU_HW_DSPP_H
#define _DPU_HW_DSPP_H
-#include "dpu_hw_blk.h"
-
struct dpu_hw_dspp;
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 61284e6c313d..cf1b6d84c18a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -398,16 +398,14 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
return intr_status;
}
-static void __intr_offset(struct dpu_mdss_cfg *m,
+static void __intr_offset(const struct dpu_mdss_cfg *m,
void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
{
- hw->base_off = addr;
- hw->blk_off = m->mdp[0].base;
- hw->hwversion = m->hwversion;
+ hw->blk_addr = addr + m->mdp[0].base;
}
struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
int nirq = MDP_INTR_MAX * 32;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 4154c5e2b4ae..46443955443c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -67,7 +67,7 @@ struct dpu_hw_intr {
* @m : pointer to mdss catalog data
*/
struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_intr_destroy(): Cleanup interrutps hw object
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 3f4d2c6e1b45..7ce66bf3f4c8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include "dpu_hwio.h"
@@ -67,6 +69,9 @@
#define INTF_CFG2_DATABUS_WIDEN BIT(0)
#define INTF_CFG2_DATA_HCTL_EN BIT(4)
+#define INTF_MISR_CTRL 0x180
+#define INTF_MISR_SIGNATURE 0x184
+
static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -77,10 +82,7 @@ static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
for (i = 0; i < m->intf_count; i++) {
if ((intf == m->intf[i].id) &&
(m->intf[i].type != INTF_NONE)) {
- b->base_off = addr;
- b->blk_off = m->intf[i].base;
- b->length = m->intf[i].len;
- b->hwversion = m->hwversion;
+ b->blk_addr = addr + m->intf[i].base;
b->log_mask = DPU_DBG_MASK_INTF;
return &m->intf[i];
}
@@ -319,6 +321,16 @@ static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
return DPU_REG_READ(c, INTF_LINE_COUNT);
}
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf, bool enable, u32 frame_count)
+{
+ dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, enable, frame_count);
+}
+
+static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value)
+{
+ return dpu_hw_collect_misr(&intf->hw, INTF_MISR_CTRL, INTF_MISR_SIGNATURE, misr_value);
+}
+
static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
unsigned long cap)
{
@@ -329,6 +341,8 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->get_line_count = dpu_hw_intf_get_line_count;
if (cap & BIT(DPU_INTF_INPUT_CTRL))
ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
+ ops->setup_misr = dpu_hw_intf_setup_misr;
+ ops->collect_misr = dpu_hw_intf_collect_misr;
}
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 7b2d96ac61e8..643dd10bc030 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_INTF_H
@@ -8,7 +10,6 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
-#include "dpu_hw_blk.h"
struct dpu_hw_intf;
@@ -57,6 +58,8 @@ struct intf_status {
* @ get_line_count: reads current vertical line counter
* @bind_pingpong_blk: enable/disable the connection with pingpong which will
* feed pixels to this interface
+ * @setup_misr: enable/disable MISR
+ * @collect_misr: read MISR signature
*/
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
@@ -77,6 +80,8 @@ struct dpu_hw_intf_ops {
void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
bool enable,
const enum dpu_pingpong pp);
+ void (*setup_misr)(struct dpu_hw_intf *intf, bool enable, u32 frame_count);
+ int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value);
};
struct dpu_hw_intf {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 462f5082099e..f5120ea91ede 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
@@ -27,11 +28,6 @@
#define LM_MISR_CTRL 0x310
#define LM_MISR_SIGNATURE 0x314
-#define LM_MISR_FRAME_COUNT_MASK 0xFF
-#define LM_MISR_CTRL_ENABLE BIT(8)
-#define LM_MISR_CTRL_STATUS BIT(9)
-#define LM_MISR_CTRL_STATUS_CLEAR BIT(10)
-#define LM_MISR_CTRL_FREE_RUN_MASK BIT(31)
static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
@@ -43,10 +39,7 @@ static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
for (i = 0; i < m->mixer_count; i++) {
if (mixer == m->mixer[i].id) {
- b->base_off = addr;
- b->blk_off = m->mixer[i].base;
- b->length = m->mixer[i].len;
- b->hwversion = m->hwversion;
+ b->blk_addr = addr + m->mixer[i].base;
b->log_mask = DPU_DBG_MASK_LM;
return &m->mixer[i];
}
@@ -108,47 +101,15 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count)
{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- u32 config = 0;
-
- DPU_REG_WRITE(c, LM_MISR_CTRL, LM_MISR_CTRL_STATUS_CLEAR);
-
- /* Clear old MISR value (in case it's read before a new value is calculated)*/
- wmb();
-
- if (enable) {
- config = (frame_count & LM_MISR_FRAME_COUNT_MASK) |
- LM_MISR_CTRL_ENABLE | LM_MISR_CTRL_FREE_RUN_MASK;
-
- DPU_REG_WRITE(c, LM_MISR_CTRL, config);
- } else {
- DPU_REG_WRITE(c, LM_MISR_CTRL, 0);
- }
-
+ dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, enable, frame_count);
}
static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- u32 ctrl = 0;
-
- if (!misr_value)
- return -EINVAL;
-
- ctrl = DPU_REG_READ(c, LM_MISR_CTRL);
-
- if (!(ctrl & LM_MISR_CTRL_ENABLE))
- return -ENODATA;
-
- if (!(ctrl & LM_MISR_CTRL_STATUS))
- return -EINVAL;
-
- *misr_value = DPU_REG_READ(c, LM_MISR_SIGNATURE);
-
- return 0;
+ return dpu_hw_collect_misr(&ctx->hw, LM_MISR_CTRL, LM_MISR_SIGNATURE, misr_value);
}
-static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
+static void dpu_hw_lm_setup_blend_config_combined_alpha(struct dpu_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -204,8 +165,8 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
unsigned long features)
{
ops->setup_mixer_out = dpu_hw_lm_setup_out;
- if (m->hwversion >= DPU_HW_VER_400)
- ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
+ if (test_bit(DPU_MIXER_COMBINED_ALPHA, &features))
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha;
else
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index d8052fb2d5da..652ddfdedec3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -8,7 +8,6 @@
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
-#include "dpu_hw_blk.h"
struct dpu_hw_mixer;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
index c06d595d5df0..def0a87fdba5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -23,10 +23,7 @@ static const struct dpu_merge_3d_cfg *_merge_3d_offset(enum dpu_merge_3d idx,
for (i = 0; i < m->merge_3d_count; i++) {
if (idx == m->merge_3d[i].id) {
- b->base_off = addr;
- b->blk_off = m->merge_3d[i].base;
- b->length = m->merge_3d[i].len;
- b->hwversion = m->hwversion;
+ b->blk_addr = addr + m->merge_3d[i].base;
b->log_mask = DPU_DBG_MASK_PINGPONG;
return &m->merge_3d[i];
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
index 870bdb14613e..81fd1d5f718e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
@@ -8,7 +8,6 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
-#include "dpu_hw_blk.h"
struct dpu_hw_merge_3d;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 47c6ab6caf95..0fcad9760b6f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -51,10 +51,7 @@ static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
for (i = 0; i < m->pingpong_count; i++) {
if (pp == m->pingpong[i].id) {
- b->base_off = addr;
- b->blk_off = m->pingpong[i].base;
- b->length = m->pingpong[i].len;
- b->hwversion = m->hwversion;
+ b->blk_addr = addr + m->pingpong[i].base;
b->log_mask = DPU_DBG_MASK_PINGPONG;
return &m->pingpong[i];
}
@@ -158,7 +155,7 @@ static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
return -EINVAL;
c = &pp->hw;
- rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+ rc = readl_poll_timeout(c->blk_addr + PP_LINE_COUNT,
val, (val & 0xffff) >= 1, 10, timeout_us);
return rc;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 12758468d9ca..c00223441d99 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -8,7 +8,6 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
-#include "dpu_hw_blk.h"
#define DITHER_MATRIX_SZ 16
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 0a0864dff783..102c21bb4192 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -761,7 +761,7 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms,
static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
void __iomem *addr,
- struct dpu_mdss_cfg *catalog,
+ const struct dpu_mdss_cfg *catalog,
struct dpu_hw_blk_reg_map *b)
{
int i;
@@ -769,10 +769,7 @@ static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
if ((sspp < SSPP_MAX) && catalog && addr && b) {
for (i = 0; i < catalog->sspp_count; i++) {
if (sspp == catalog->sspp[i].id) {
- b->base_off = addr;
- b->blk_off = catalog->sspp[i].base;
- b->length = catalog->sspp[i].len;
- b->hwversion = catalog->hwversion;
+ b->blk_addr = addr + catalog->sspp[i].base;
b->log_mask = DPU_DBG_MASK_SSPP;
return &catalog->sspp[i];
}
@@ -783,7 +780,7 @@ static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
}
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
- void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ void __iomem *addr, const struct dpu_mdss_cfg *catalog,
bool is_virtual_pipe)
{
struct dpu_hw_pipe *hw_pipe;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index a81e16657d61..78b1bc9e004f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -8,7 +8,6 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
-#include "dpu_hw_blk.h"
#include "dpu_formats.h"
struct dpu_hw_pipe;
@@ -360,7 +359,7 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_pipe {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
- struct dpu_mdss_cfg *catalog;
+ const struct dpu_mdss_cfg *catalog;
const struct dpu_mdp_cfg *mdp;
/* Pipe */
@@ -381,7 +380,7 @@ struct dpu_kms;
* @is_virtual_pipe: is this pipe virtual pipe
*/
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
- void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ void __iomem *addr, const struct dpu_mdss_cfg *catalog,
bool is_virtual_pipe);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index ab3ef162b666..c3110a25a30d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -285,10 +285,7 @@ static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
for (i = 0; i < m->mdp_count; i++) {
if (mdp == m->mdp[i].id) {
- b->base_off = addr;
- b->blk_off = m->mdp[i].base;
- b->length = m->mdp[i].len;
- b->hwversion = m->hwversion;
+ b->blk_addr = addr + m->mdp[i].base;
b->log_mask = DPU_DBG_MASK_TOP;
return &m->mdp[i];
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 3aa10c89ca1b..a1a9e44bed36 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -8,7 +8,6 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
-#include "dpu_hw_blk.h"
struct dpu_hw_mdp;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 512316f25a51..8062228eada6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -80,13 +82,13 @@ void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
/* don't need to mutex protect this */
if (c->log_mask & dpu_hw_util_log_mask)
DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
- name, c->blk_off + reg_off, val);
- writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+ name, reg_off, val);
+ writel_relaxed(val, c->blk_addr + reg_off);
}
int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
{
- return readl_relaxed(c->base_off + c->blk_off + reg_off);
+ return readl_relaxed(c->blk_addr + reg_off);
}
u32 *dpu_hw_util_get_log_mask_ptr(void)
@@ -447,3 +449,48 @@ u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
return 0;
}
+
+void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset,
+ bool enable, u32 frame_count)
+{
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, misr_ctrl_offset, MISR_CTRL_STATUS_CLEAR);
+
+ /* Clear old MISR value (in case it's read before a new value is calculated)*/
+ wmb();
+
+ if (enable) {
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, misr_ctrl_offset, config);
+ } else {
+ DPU_REG_WRITE(c, misr_ctrl_offset, 0);
+ }
+
+}
+
+int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset,
+ u32 misr_signature_offset,
+ u32 *misr_value)
+{
+ u32 ctrl = 0;
+
+ if (!misr_value)
+ return -EINVAL;
+
+ ctrl = DPU_REG_READ(c, misr_ctrl_offset);
+
+ if (!(ctrl & MISR_CTRL_ENABLE))
+ return -ENODATA;
+
+ if (!(ctrl & MISR_CTRL_STATUS))
+ return -EINVAL;
+
+ *misr_value = DPU_REG_READ(c, misr_signature_offset);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index e4a65eb4f769..27f4c39e35ab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
@@ -12,27 +13,32 @@
#include "dpu_hw_catalog.h"
#define REG_MASK(n) ((BIT(n)) - 1)
+#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define MISR_CTRL_FREE_RUN_MASK BIT(31)
/*
* This is the common struct maintained by each sub block
* for mapping the register offsets in this block to the
* absoulute IO address
- * @base_off: mdp register mapped offset
- * @blk_off: pipe offset relative to mdss offset
- * @length length of register block offset
- * @xin_id xin id
- * @hwversion mdss hw version number
+ * @blk_addr: hw block register mapped address
+ * @log_mask: log mask for this block
*/
struct dpu_hw_blk_reg_map {
- void __iomem *base_off;
- u32 blk_off;
- u32 length;
- u32 xin_id;
- u32 hwversion;
+ void __iomem *blk_addr;
u32 log_mask;
};
/**
+ * struct dpu_hw_blk - opaque hardware block object
+ */
+struct dpu_hw_blk {
+ /* opaque */
+};
+
+/**
* struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
* @enable: detail enhancer enable/disable
* @sharpen_level1: sharpening strength for noise
@@ -343,4 +349,14 @@ void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
u32 total_fl);
+void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset,
+ bool enable,
+ u32 frame_count);
+
+int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset,
+ u32 misr_signature_offset,
+ u32 *misr_value);
+
#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index b757054e1c23..16c56e240706 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -30,7 +30,7 @@
#define VBIF_XIN_HALT_CTRL0 0x0200
#define VBIF_XIN_HALT_CTRL1 0x0204
#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
-#define VBIF_XINL_QOS_LVL_REMAP_000(v) (v < DPU_HW_VER_400 ? 0x570 : 0x0590)
+#define VBIF_XINL_QOS_LVL_REMAP_000(vbif) (VBIF_XINL_QOS_RP_REMAP_000 + (vbif)->cap->qos_rp_remap_size)
static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
u32 *pnd_errors, u32 *src_errors)
@@ -163,7 +163,7 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
c = &vbif->hw;
- reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(c->hwversion);
+ reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(vbif);
reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
reg_shift = (xin_id & 0x7) * 4;
@@ -220,10 +220,7 @@ static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
for (i = 0; i < m->vbif_count; i++) {
if (vbif == m->vbif[i].id) {
- b->base_off = addr;
- b->blk_off = m->vbif[i].base;
- b->length = m->vbif[i].len;
- b->hwversion = m->hwversion;
+ b->blk_addr = addr + m->vbif[i].base;
b->log_mask = DPU_DBG_MASK_VBIF;
return &m->vbif[i];
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index bcccce292937..2d28afdf860e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -60,10 +60,7 @@ static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
for (i = 0; i < m->wb_count; i++) {
if (wb == m->wb[i].id) {
- b->base_off = addr;
- b->blk_off = m->wb[i].base;
- b->length = m->wb[i].len;
- b->hwversion = m->hwversion;
+ b->blk_addr = addr + m->wb[i].base;
return &m->wb[i];
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index bce47647d891..008e1420e6e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -16,6 +16,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
@@ -49,8 +50,6 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
-#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */
-
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@@ -584,9 +583,7 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
}
info.h_tile_instance[info.num_of_h_tiles++] = i;
- info.capabilities = msm_dsi_is_cmd_mode(priv->dsi[i]) ?
- MSM_DISPLAY_CAP_CMD_MODE :
- MSM_DISPLAY_CAP_VID_MODE;
+ info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
@@ -639,7 +636,6 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
info.num_of_h_tiles = 1;
info.h_tile_instance[0] = i;
- info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
info.intf_type = encoder->encoder_type;
rc = dpu_encoder_setup(dev, encoder, &info);
if (rc) {
@@ -747,7 +743,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
unsigned int num_encoders;
struct msm_drm_private *priv;
- struct dpu_mdss_cfg *catalog;
+ const struct dpu_mdss_cfg *catalog;
int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
int max_crtc_count;
@@ -844,8 +840,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_rm_destroy(&dpu_kms->rm);
dpu_kms->rm_init = false;
- if (dpu_kms->catalog)
- dpu_hw_catalog_deinit(dpu_kms->catalog);
dpu_kms->catalog = NULL;
if (dpu_kms->vbif[VBIF_NRT])
@@ -907,7 +901,7 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
{
int i;
struct dpu_kms *dpu_kms;
- struct dpu_mdss_cfg *cat;
+ const struct dpu_mdss_cfg *cat;
struct dpu_hw_mdp *top;
dpu_kms = to_dpu_kms(kms);
@@ -952,8 +946,8 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
- msm_disp_snapshot_add_block(disp_state, top->hw.length,
- dpu_kms->mmio + top->hw.blk_off, "top");
+ msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
+ dpu_kms->mmio + cat->mdp[0].base, "top");
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
@@ -999,32 +993,14 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
- struct iommu_domain *domain;
struct msm_gem_address_space *aspace;
- struct msm_mmu *mmu;
- struct device *dpu_dev = dpu_kms->dev->dev;
- struct device *mdss_dev = dpu_dev->parent;
- domain = iommu_domain_alloc(&platform_bus_type);
- if (!domain)
- return 0;
-
- /* IOMMUs are a part of MDSS device tree binding, not the
- * MDP/DPU device. */
- mmu = msm_iommu_new(mdss_dev, domain);
- if (IS_ERR(mmu)) {
- iommu_domain_free(domain);
- return PTR_ERR(mmu);
- }
- aspace = msm_gem_address_space_create(mmu, "dpu1",
- 0x1000, 0x100000000 - 0x1000);
-
- if (IS_ERR(aspace)) {
- mmu->funcs->destroy(mmu);
+ aspace = msm_kms_init_aspace(dpu_kms->dev);
+ if (IS_ERR(aspace))
return PTR_ERR(aspace);
- }
dpu_kms->base.aspace = aspace;
+
return 0;
}
@@ -1305,15 +1281,9 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *encoder;
struct drm_device *ddev;
- int i;
ddev = dpu_kms->dev;
- WARN_ON(!(dpu_kms->num_paths));
- /* Min vote of BW is required before turning on AXI clk */
- for (i = 0; i < dpu_kms->num_paths; i++)
- icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
-
rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 832a0769f2e7..ed80ed6784ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -69,7 +69,7 @@ struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
int core_rev;
- struct dpu_mdss_cfg *catalog;
+ const struct dpu_mdss_cfg *catalog;
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 5b5aef249390..a617a3d8b1bc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -12,7 +12,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include "msm_drv.h"
@@ -106,7 +108,7 @@ struct dpu_plane {
bool is_rt_pipe;
bool is_virtual;
struct list_head mplane_list;
- struct dpu_mdss_cfg *catalog;
+ const struct dpu_mdss_cfg *catalog;
};
static const uint64_t supported_format_modifiers[] = {
@@ -160,7 +162,7 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
vbp = mode->vtotal - mode->vsync_end;
vpw = mode->vsync_end - mode->vsync_start;
vfp = mode->vsync_start - mode->vdisplay;
- hw_latency_lines = dpu_kms->catalog->perf.min_prefill_lines;
+ hw_latency_lines = dpu_kms->catalog->perf->min_prefill_lines;
scale_factor = src_height > dst_height ?
mult_frac(src_height, 1, dst_height) : 1;
@@ -309,7 +311,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
}
qos_lut = _dpu_hw_get_qos_lut(
- &pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+ &pdpu->catalog->perf->qos_lut_tbl[lut_usage], total_fl);
trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
@@ -336,9 +338,9 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
u32 danger_lut, safe_lut;
if (!pdpu->is_rt_pipe) {
- danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ danger_lut = pdpu->catalog->perf->danger_lut_tbl
[DPU_QOS_LUT_USAGE_NRT];
- safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ safe_lut = pdpu->catalog->perf->safe_lut_tbl
[DPU_QOS_LUT_USAGE_NRT];
} else {
fmt = dpu_get_dpu_format_ext(
@@ -346,14 +348,14 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
fb->modifier);
if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
- danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ danger_lut = pdpu->catalog->perf->danger_lut_tbl
[DPU_QOS_LUT_USAGE_LINEAR];
- safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ safe_lut = pdpu->catalog->perf->safe_lut_tbl
[DPU_QOS_LUT_USAGE_LINEAR];
} else {
- danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ danger_lut = pdpu->catalog->perf->danger_lut_tbl
[DPU_QOS_LUT_USAGE_MACROTILE];
- safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ safe_lut = pdpu->catalog->perf->safe_lut_tbl
[DPU_QOS_LUT_USAGE_MACROTILE];
}
}
@@ -1225,7 +1227,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
- cdp_cfg.enable = pdpu->catalog->perf.cdp_cfg
+ cdp_cfg.enable = pdpu->catalog->perf->cdp_cfg
[DPU_PERF_CDP_USAGE_RT].rd_enable;
cdp_cfg.ubwc_meta_enable =
DPU_FORMAT_IS_UBWC(fmt);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 06f03e7081bc..73b3442e7467 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -95,7 +95,7 @@ int dpu_rm_destroy(struct dpu_rm *rm)
}
int dpu_rm_init(struct dpu_rm *rm,
- struct dpu_mdss_cfg *cat,
+ const struct dpu_mdss_cfg *cat,
void __iomem *mmio)
{
int rc, i;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 2f34a31d8d0d..59de72b381f9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -42,7 +42,7 @@ struct dpu_rm {
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_init(struct dpu_rm *rm,
- struct dpu_mdss_cfg *cat,
+ const struct dpu_mdss_cfg *cat,
void __iomem *mmio);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 399115e4e217..088ec990a2f2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -3,6 +3,8 @@
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
+#include <drm/drm_edid.h>
+
#include "dpu_writeback.h"
static int dpu_wb_conn_get_modes(struct drm_connector *connector)
@@ -11,7 +13,14 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector)
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
- return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_linewidth,
+ /*
+ * We should ideally be limiting the modes only to the maxlinewidth but
+ * on some chipsets this will allow even 4k modes to be added which will
+ * fail the per SSPP bandwidth checks. So, till we have dual-SSPP support
+ * and source split support added lets limit the modes based on max_mixer_width
+ * as 4K modes can then be supported.
+ */
+ return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width,
dev->mode_config.max_height);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index fb48c8c19ec3..964573d26d26 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -13,8 +13,6 @@
#include "msm_mmu.h"
#include "mdp4_kms.h"
-static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
-
static int mdp4_hw_init(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
@@ -216,6 +214,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
+ of_node_put(panel_node);
return PTR_ERR(encoder);
}
@@ -225,6 +224,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
+ of_node_put(panel_node);
return PTR_ERR(connector);
}
@@ -384,13 +384,17 @@ static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
static int mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
- struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
+ struct iommu_domain *iommu;
struct msm_gem_address_space *aspace;
int irq, ret;
u32 major, minor;
+ unsigned long max_clk;
+
+ /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
+ max_clk = 266667000;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
@@ -458,7 +462,7 @@ static int mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- clk_set_rate(mdp4_kms->clk, config->max_clk);
+ clk_set_rate(mdp4_kms->clk, max_clk);
read_mdp_hw_revision(mdp4_kms, &major, &minor);
@@ -478,7 +482,7 @@ static int mdp4_kms_init(struct drm_device *dev)
ret = PTR_ERR(mdp4_kms->lut_clk);
goto fail;
}
- clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+ clk_set_rate(mdp4_kms->lut_clk, max_clk);
}
pm_runtime_enable(dev->dev);
@@ -495,9 +499,9 @@ static int mdp4_kms_init(struct drm_device *dev)
mdp4_disable(mdp4_kms);
mdelay(16);
- if (config->iommu) {
- struct msm_mmu *mmu = msm_iommu_new(&pdev->dev,
- config->iommu);
+ iommu = iommu_domain_alloc(pdev->dev.bus);
+ if (iommu) {
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
aspace = msm_gem_address_space_create(mmu,
"mdp4", 0x1000, 0x100000000 - 0x1000);
@@ -551,17 +555,6 @@ fail:
return ret;
}
-static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
-{
- static struct mdp4_platform_config config = {};
-
- /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
- config.max_clk = 266667000;
- config.iommu = iommu_domain_alloc(&platform_bus_type);
-
- return &config;
-}
-
static const struct dev_pm_ops mdp4_pm_ops = {
.prepare = msm_pm_prepare,
.complete = msm_pm_complete,
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index e8ee92ab7956..01179e764a29 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -42,12 +42,6 @@ struct mdp4_kms {
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
-/* platform config data (ie. from DT, or pdata) */
-struct mdp4_platform_config {
- struct iommu_domain *iommu;
- uint32_t max_clk;
-};
-
static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
{
msm_writel(data, mdp4_kms->mmio + reg);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 3e20f72d75ef..b689b618da78 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -7,6 +7,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include "mdp4_kms.h"
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 1bf9ff5dbabc..1f1555aa02d2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -837,6 +837,11 @@ static const struct mdp5_cfg_hw msm8x53_config = {
[2] = INTF_DSI,
},
},
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
.max_clk = 400000000,
};
@@ -1248,8 +1253,6 @@ static const struct mdp5_cfg_handler cfg_handlers_v3[] = {
{ .revision = 3, .config = { .hw = &sdm630_config } },
};
-static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
-
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
{
return cfg_handler->config.hw;
@@ -1274,10 +1277,8 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor)
{
struct drm_device *dev = mdp5_kms->dev;
- struct platform_device *pdev = to_platform_device(dev->dev);
struct mdp5_cfg_handler *cfg_handler;
const struct mdp5_cfg_handler *cfg_handlers;
- struct mdp5_cfg_platform *pconfig;
int i, ret = 0, num_handlers;
cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
@@ -1320,9 +1321,6 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
cfg_handler->revision = minor;
cfg_handler->config.hw = mdp5_cfg;
- pconfig = mdp5_get_config(pdev);
- memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig));
-
DBG("MDP5: %s hw config selected", mdp5_cfg->name);
return cfg_handler;
@@ -1333,12 +1331,3 @@ fail:
return ERR_PTR(ret);
}
-
-static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
-{
- static struct mdp5_cfg_platform config = {};
-
- config.iommu = iommu_domain_alloc(&platform_bus_type);
-
- return &config;
-}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
index 6b03d7899309..c2502cc33864 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
@@ -104,14 +104,8 @@ struct mdp5_cfg_hw {
uint32_t max_clk;
};
-/* platform config data (ie. from DT, or pdata) */
-struct mdp5_cfg_platform {
- struct iommu_domain *iommu;
-};
-
struct mdp5_cfg {
const struct mdp5_cfg_hw *hw;
- struct mdp5_cfg_platform platform;
};
struct mdp5_kms;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 31447da0af25..e86421c69bd1 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -8,6 +8,7 @@
#include <linux/sort.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
#include <drm/drm_mode.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 3d5621a68f85..d2a48caf9d27 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -557,7 +557,6 @@ static int mdp5_kms_init(struct drm_device *dev)
struct msm_kms *kms;
struct msm_gem_address_space *aspace;
int irq, i, ret;
- struct device *iommu_dev;
ret = mdp5_init(to_platform_device(dev->dev), dev);
@@ -601,32 +600,14 @@ static int mdp5_kms_init(struct drm_device *dev)
}
mdelay(16);
- if (config->platform.iommu) {
- struct msm_mmu *mmu;
-
- iommu_dev = &pdev->dev;
- if (!dev_iommu_fwspec_get(iommu_dev))
- iommu_dev = iommu_dev->parent;
-
- mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
-
- aspace = msm_gem_address_space_create(mmu, "mdp5",
- 0x1000, 0x100000000 - 0x1000);
-
- if (IS_ERR(aspace)) {
- if (!IS_ERR(mmu))
- mmu->funcs->destroy(mmu);
- ret = PTR_ERR(aspace);
- goto fail;
- }
-
- kms->aspace = aspace;
- } else {
- DRM_DEV_INFO(&pdev->dev,
- "no iommu, fallback to phys contig buffers for scanout\n");
- aspace = NULL;
+ aspace = msm_kms_init_aspace(mdp5_kms->dev);
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
}
+ kms->aspace = aspace;
+
pm_runtime_put_sync(&pdev->dev);
ret = modeset_init(mdp5_kms);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index a4f5cb90f3e8..e4b8a789835a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -123,12 +123,13 @@ int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
- struct mdp5_global_state *state = mdp5_get_global_state(s);
+ struct mdp5_global_state *state;
struct mdp5_hw_pipe_state *new_state;
if (!hwpipe)
return 0;
+ state = mdp5_get_global_state(s);
if (IS_ERR(state))
return PTR_ERR(state);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index e8c47a4a1d31..bd2c4ac45601 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -6,8 +6,10 @@
*/
#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index 5495d8b3f5b9..025595336f26 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -5,6 +5,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include "msm_drv.h"
#include "mdp_kms.h"
diff --git a/drivers/gpu/drm/msm/dp/dp_clk_util.c b/drivers/gpu/drm/msm/dp/dp_clk_util.c
deleted file mode 100644
index 44a4fc59ff31..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_clk_util.c
+++ /dev/null
@@ -1,120 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
- * All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/of.h>
-
-#include <drm/drm_print.h>
-
-#include "dp_clk_util.h"
-
-void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
-{
- int i;
-
- for (i = num_clk - 1; i >= 0; i--) {
- if (clk_arry[i].clk)
- clk_put(clk_arry[i].clk);
- clk_arry[i].clk = NULL;
- }
-}
-
-int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
-{
- int i, rc = 0;
-
- for (i = 0; i < num_clk; i++) {
- clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
- rc = PTR_ERR_OR_ZERO(clk_arry[i].clk);
- if (rc) {
- DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name, rc);
- goto error;
- }
- }
-
- return rc;
-
-error:
- for (i--; i >= 0; i--) {
- if (clk_arry[i].clk)
- clk_put(clk_arry[i].clk);
- clk_arry[i].clk = NULL;
- }
-
- return rc;
-}
-
-int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
-{
- int i, rc = 0;
-
- for (i = 0; i < num_clk; i++) {
- if (clk_arry[i].clk) {
- if (clk_arry[i].type != DSS_CLK_AHB) {
- DEV_DBG("%pS->%s: '%s' rate %ld\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name,
- clk_arry[i].rate);
- rc = clk_set_rate(clk_arry[i].clk,
- clk_arry[i].rate);
- if (rc) {
- DEV_ERR("%pS->%s: %s failed. rc=%d\n",
- __builtin_return_address(0),
- __func__,
- clk_arry[i].clk_name, rc);
- break;
- }
- }
- } else {
- DEV_ERR("%pS->%s: '%s' is not available\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name);
- rc = -EPERM;
- break;
- }
- }
-
- return rc;
-}
-
-int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
-{
- int i, rc = 0;
-
- if (enable) {
- for (i = 0; i < num_clk; i++) {
- DEV_DBG("%pS->%s: enable '%s'\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name);
- rc = clk_prepare_enable(clk_arry[i].clk);
- if (rc)
- DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
- __builtin_return_address(0),
- __func__,
- clk_arry[i].clk_name, rc);
-
- if (rc && i) {
- msm_dss_enable_clk(&clk_arry[i - 1],
- i - 1, false);
- break;
- }
- }
- } else {
- for (i = num_clk - 1; i >= 0; i--) {
- DEV_DBG("%pS->%s: disable '%s'\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name);
-
- clk_disable_unprepare(clk_arry[i].clk);
- }
- }
-
- return rc;
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_clk_util.h b/drivers/gpu/drm/msm/dp/dp_clk_util.h
deleted file mode 100644
index 067bf87f3d97..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_clk_util.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DP_CLK_UTIL_H__
-#define __DP_CLK_UTIL_H__
-
-#include <linux/platform_device.h>
-#include <linux/types.h>
-
-#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args)
-#define DEV_INFO(fmt, args...) pr_info(fmt, ##args)
-#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
-#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
-
-enum dss_clk_type {
- DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
- DSS_CLK_PCLK,
-};
-
-struct dss_clk {
- struct clk *clk; /* clk handle */
- char clk_name[32];
- enum dss_clk_type type;
- unsigned long rate;
- unsigned long max_rate;
-};
-
-struct dss_module_power {
- unsigned int num_clk;
- struct dss_clk *clk_config;
-};
-
-int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
-void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
-int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
-int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
-#endif /* __DP_CLK_UTIL_H__ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index d21971baa24c..ab6aa13b1639 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1321,9 +1321,9 @@ static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
enum dp_pm_type module, char *name, unsigned long rate)
{
u32 num = ctrl->parser->mp[module].num_clk;
- struct dss_clk *cfg = ctrl->parser->mp[module].clk_config;
+ struct clk_bulk_data *cfg = ctrl->parser->mp[module].clocks;
- while (num && strcmp(cfg->clk_name, name)) {
+ while (num && strcmp(cfg->id, name)) {
num--;
cfg++;
}
@@ -1332,7 +1332,7 @@ static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
rate, name);
if (num)
- cfg->rate = rate;
+ clk_set_rate(cfg->clk, rate);
else
DRM_ERROR("%s clock doesn't exit to set rate %lu\n",
name, rate);
@@ -1349,12 +1349,11 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
opts_dp->lanes = ctrl->link->link_params.num_lanes;
opts_dp->link_rate = ctrl->link->link_params.rate / 100;
opts_dp->ssc = drm_dp_max_downspread(dpcd);
- dp_ctrl_set_clock_rate(ctrl, DP_CTRL_PM, "ctrl_link",
- ctrl->link->link_params.rate * 1000);
phy_configure(phy, &dp_io->phy_opts);
phy_power_on(phy);
+ dev_pm_opp_set_rate(ctrl->dev, ctrl->link->link_params.rate * 1000);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, true);
if (ret)
DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
@@ -1390,8 +1389,13 @@ void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
dp_catalog_ctrl_reset(ctrl->catalog);
- if (enable)
- dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
+ /*
+ * all dp controller programmable registers will not
+ * be reset to default value after DP_SW_RESET
+ * therefore interrupt mask bits have to be updated
+ * to enable/disable interrupts
+ */
+ dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
}
void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
@@ -1457,6 +1461,7 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
* link clock might have been adjusted as part of the
* link maintenance.
*/
+ dev_pm_opp_set_rate(ctrl->dev, 0);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
if (ret) {
DRM_ERROR("Failed to disable clocks. ret=%d\n", ret);
@@ -1488,6 +1493,7 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
dp_catalog_ctrl_reset(ctrl->catalog);
+ dev_pm_opp_set_rate(ctrl->dev, 0);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
if (ret) {
DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
@@ -1529,6 +1535,8 @@ end:
return ret;
}
+static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
+
static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
{
int ret = 0;
@@ -1552,7 +1560,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
if (!ret)
- ret = dp_ctrl_on_stream(&ctrl->dp_ctrl);
+ ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
else
DRM_ERROR("failed to enable DP link controller\n");
@@ -1808,7 +1816,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
return dp_ctrl_setup_main_link(ctrl, &training_step);
}
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
+static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
+{
+ int ret;
+ struct dp_ctrl_private *ctrl;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+ ret = dp_ctrl_enable_stream_clocks(ctrl);
+ if (ret) {
+ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ return ret;
+ }
+
+ dp_ctrl_send_phy_test_pattern(ctrl);
+
+ return 0;
+}
+
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
{
int ret = 0;
bool mainlink_ready = false;
@@ -1844,12 +1872,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
goto end;
}
- if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
- dp_ctrl_send_phy_test_pattern(ctrl);
- return 0;
- }
-
- if (!dp_ctrl_channel_eq_ok(ctrl))
+ if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
dp_ctrl_link_retrain(ctrl);
/* stop txing train pattern to end link training */
@@ -1907,6 +1930,7 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
}
}
+ dev_pm_opp_set_rate(ctrl->dev, 0);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
if (ret) {
DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
@@ -1975,6 +1999,7 @@ int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
if (ret)
DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret);
+ dev_pm_opp_set_rate(ctrl->dev, 0);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
if (ret) {
DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index 0745fde01b45..b563e2e3bfe5 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -21,7 +21,7 @@ struct dp_ctrl {
};
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 09174c2a9827..bfd0aeff3f0d 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -131,35 +131,43 @@ struct msm_dp_config {
size_t num_descs;
};
+static const struct msm_dp_desc sc7180_dp_descs[] = {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+};
+
static const struct msm_dp_config sc7180_dp_cfg = {
- .descs = (const struct msm_dp_desc[]) {
- [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
- },
- .num_descs = 1,
+ .descs = sc7180_dp_descs,
+ .num_descs = ARRAY_SIZE(sc7180_dp_descs),
+};
+
+static const struct msm_dp_desc sc7280_dp_descs[] = {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
};
static const struct msm_dp_config sc7280_dp_cfg = {
- .descs = (const struct msm_dp_desc[]) {
- [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
- [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
- },
- .num_descs = 2,
+ .descs = sc7280_dp_descs,
+ .num_descs = ARRAY_SIZE(sc7280_dp_descs),
+};
+
+static const struct msm_dp_desc sc8180x_dp_descs[] = {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ [MSM_DP_CONTROLLER_1] = { .io_start = 0x0ae98000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ [MSM_DP_CONTROLLER_2] = { .io_start = 0x0ae9a000, .connector_type = DRM_MODE_CONNECTOR_eDP },
};
static const struct msm_dp_config sc8180x_dp_cfg = {
- .descs = (const struct msm_dp_desc[]) {
- [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
- [MSM_DP_CONTROLLER_1] = { .io_start = 0x0ae98000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
- [MSM_DP_CONTROLLER_2] = { .io_start = 0x0ae9a000, .connector_type = DRM_MODE_CONNECTOR_eDP },
- },
- .num_descs = 3,
+ .descs = sc8180x_dp_descs,
+ .num_descs = ARRAY_SIZE(sc8180x_dp_descs),
+};
+
+static const struct msm_dp_desc sm8350_dp_descs[] = {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
};
static const struct msm_dp_config sm8350_dp_cfg = {
- .descs = (const struct msm_dp_desc[]) {
- [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
- },
- .num_descs = 1,
+ .descs = sm8350_dp_descs,
+ .num_descs = ARRAY_SIZE(sm8350_dp_descs),
};
static const struct of_device_id dp_dt_match[] = {
@@ -309,12 +317,15 @@ static void dp_display_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
/* disable all HPD interrupts */
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+ if (dp->core_initialized)
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
kthread_stop(dp->ev_tsk);
dp_power_client_deinit(dp->power);
dp_aux_unregister(dp->aux);
+ dp->drm_dev = NULL;
+ dp->aux->drm_dev = NULL;
priv->dp[dp->id] = NULL;
}
@@ -603,19 +614,10 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
- /*
- * add fail safe mode outside event_mutex scope
- * to avoid potiential circular lock with drm thread
- */
- dp_panel_add_fail_safe_mode(dp->dp_display.connector);
-
/* uevent will complete connection part */
return 0;
};
-static int dp_display_enable(struct dp_display_private *dp, u32 data);
-static int dp_display_disable(struct dp_display_private *dp, u32 data);
-
static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
bool plugged)
{
@@ -862,12 +864,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display,
return 0;
}
-static int dp_display_prepare(struct msm_dp *dp_display)
-{
- return 0;
-}
-
-static int dp_display_enable(struct dp_display_private *dp, u32 data)
+static int dp_display_enable(struct dp_display_private *dp, bool force_link_train)
{
int rc = 0;
struct msm_dp *dp_display = &dp->dp_display;
@@ -878,7 +875,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
return 0;
}
- rc = dp_ctrl_on_stream(dp->ctrl);
+ rc = dp_ctrl_on_stream(dp->ctrl, force_link_train);
if (!rc)
dp_display->power_on = true;
@@ -904,7 +901,7 @@ static int dp_display_post_enable(struct msm_dp *dp_display)
return 0;
}
-static int dp_display_disable(struct dp_display_private *dp, u32 data)
+static int dp_display_disable(struct dp_display_private *dp)
{
struct msm_dp *dp_display = &dp->dp_display;
@@ -943,11 +940,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
return 0;
}
-static int dp_display_unprepare(struct msm_dp *dp_display)
-{
- return 0;
-}
-
int dp_display_set_plugged_cb(struct msm_dp *dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev)
{
@@ -995,7 +987,7 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
- return MODE_BAD;
+ return MODE_CLOCK_HIGH;
dp_display = container_of(dp, struct dp_display_private, dp_display);
link_info = &dp_display->panel->link_info;
@@ -1463,21 +1455,9 @@ static int dp_pm_suspend(struct device *dev)
return 0;
}
-static int dp_pm_prepare(struct device *dev)
-{
- return 0;
-}
-
-static void dp_pm_complete(struct device *dev)
-{
-
-}
-
static const struct dev_pm_ops dp_pm_ops = {
.suspend = dp_pm_suspend,
.resume = dp_pm_resume,
- .prepare = dp_pm_prepare,
- .complete = dp_pm_complete,
};
static struct platform_driver dp_display_driver = {
@@ -1627,8 +1607,6 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
return ret;
}
- dp_display->encoder = encoder;
-
ret = dp_display_get_next_bridge(dp_display);
if (ret)
return ret;
@@ -1644,7 +1622,7 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
priv->bridges[priv->num_bridges++] = dp_display->bridge;
- dp_display->connector = dp_drm_connector_init(dp_display);
+ dp_display->connector = dp_drm_connector_init(dp_display, encoder);
if (IS_ERR(dp_display->connector)) {
ret = PTR_ERR(dp_display->connector);
DRM_DEV_ERROR(dev->dev,
@@ -1665,6 +1643,7 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
int rc = 0;
struct dp_display_private *dp_display;
u32 state;
+ bool force_link_train = false;
dp_display = container_of(dp, struct dp_display_private, dp_display);
if (!dp_display->dp_mode.drm_mode.clock) {
@@ -1690,31 +1669,21 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
return;
}
- rc = dp_display_prepare(dp);
- if (rc) {
- DRM_ERROR("DP display prepare failed, rc=%d\n", rc);
- mutex_unlock(&dp_display->event_mutex);
- return;
- }
-
state = dp_display->hpd_state;
- if (state == ST_DISPLAY_OFF)
+ if (state == ST_DISPLAY_OFF) {
dp_display_host_phy_init(dp_display);
+ force_link_train = true;
+ }
- dp_display_enable(dp_display, 0);
+ dp_display_enable(dp_display, force_link_train);
rc = dp_display_post_enable(dp);
if (rc) {
DRM_ERROR("DP display post enable failed, rc=%d\n", rc);
- dp_display_disable(dp_display, 0);
- dp_display_unprepare(dp);
+ dp_display_disable(dp_display);
}
- /* manual kick off plug event to train link */
- if (state == ST_DISPLAY_OFF)
- dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
-
/* completed connection */
dp_display->hpd_state = ST_CONNECTED;
@@ -1737,7 +1706,6 @@ void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = dp_bridge->dp_display;
- int rc = 0;
u32 state;
struct dp_display_private *dp_display;
@@ -1754,11 +1722,7 @@ void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
return;
}
- dp_display_disable(dp_display, 0);
-
- rc = dp_display_unprepare(dp);
- if (rc)
- DRM_ERROR("DP display unprepare failed, rc=%d\n", rc);
+ dp_display_disable(dp_display);
state = dp_display->hpd_state;
if (state == ST_DISCONNECT_PENDING) {
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 4f9fe4d7610b..dcedf021f7fe 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -15,7 +15,6 @@ struct msm_dp {
struct device *codec_dev;
struct drm_bridge *bridge;
struct drm_connector *connector;
- struct drm_encoder *encoder;
struct drm_bridge *next_bridge;
bool is_connected;
bool audio_enabled;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 62d58b9c4647..6df25f7662e7 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -116,7 +116,7 @@ struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *
}
if (dp_display->next_bridge) {
- rc = drm_bridge_attach(dp_display->encoder,
+ rc = drm_bridge_attach(encoder,
dp_display->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc < 0) {
@@ -130,15 +130,15 @@ struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *
}
/* connector initialization */
-struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
- connector = drm_bridge_connector_init(dp_display->drm_dev, dp_display->encoder);
+ connector = drm_bridge_connector_init(dp_display->drm_dev, encoder);
if (IS_ERR(connector))
return connector;
- drm_connector_attach_encoder(connector, dp_display->encoder);
+ drm_connector_attach_encoder(connector, encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index f4b1ed1e24f7..82035dbb0578 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -19,7 +19,7 @@ struct msm_dp_bridge {
#define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge)
-struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display);
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder);
struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 2be1733534a9..5149cebc93f6 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -153,15 +153,6 @@ static int dp_panel_update_modes(struct drm_connector *connector,
return rc;
}
-void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
-{
- /* fail safe edid */
- mutex_lock(&connector->dev->mode_config.mutex);
- if (drm_add_modes_noedid(connector, 640, 480))
- drm_set_preferred_mode(connector, 640, 480);
- mutex_unlock(&connector->dev->mode_config.mutex);
-}
-
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
@@ -217,8 +208,6 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
rc = -ETIMEDOUT;
goto end;
}
-
- dp_panel_add_fail_safe_mode(connector);
}
if (panel->aux_cfg_update_done) {
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index acb1987fa45f..d861197ac1c8 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -58,7 +58,6 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
int dp_panel_deinit(struct dp_panel *dp_panel);
int dp_panel_timing_cfg(struct dp_panel *dp_panel);
void dp_panel_dump_regs(struct dp_panel *dp_panel);
-void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector);
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index 8f9fed9fdafc..f6ab3b5586ce 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -102,14 +102,12 @@ static int dp_parser_ctrl_res(struct dp_parser *parser)
static int dp_parser_misc(struct dp_parser *parser)
{
struct device_node *of_node = parser->pdev->dev.of_node;
- int len = 0;
- const char *data_lane_property = "data-lanes";
+ int len;
- len = of_property_count_elems_of_size(of_node,
- data_lane_property, sizeof(u32));
+ len = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
if (len < 0) {
- DRM_WARN("Invalid property %s, default max DP lanes = %d\n",
- data_lane_property, DP_MAX_NUM_DP_LANES);
+ DRM_WARN("Invalid property \"data-lanes\", default max DP lanes = %d\n",
+ DP_MAX_NUM_DP_LANES);
len = DP_MAX_NUM_DP_LANES;
}
@@ -162,11 +160,11 @@ static int dp_parser_init_clk_data(struct dp_parser *parser)
}
core_power->num_clk = core_clk_count;
- core_power->clk_config = devm_kzalloc(dev,
- sizeof(struct dss_clk) * core_power->num_clk,
+ core_power->clocks = devm_kcalloc(dev,
+ core_power->num_clk, sizeof(struct clk_bulk_data),
GFP_KERNEL);
- if (!core_power->clk_config)
- return -EINVAL;
+ if (!core_power->clocks)
+ return -ENOMEM;
/* Initialize the CTRL power module */
if (ctrl_clk_count == 0) {
@@ -175,12 +173,12 @@ static int dp_parser_init_clk_data(struct dp_parser *parser)
}
ctrl_power->num_clk = ctrl_clk_count;
- ctrl_power->clk_config = devm_kzalloc(dev,
- sizeof(struct dss_clk) * ctrl_power->num_clk,
+ ctrl_power->clocks = devm_kcalloc(dev,
+ ctrl_power->num_clk, sizeof(struct clk_bulk_data),
GFP_KERNEL);
- if (!ctrl_power->clk_config) {
+ if (!ctrl_power->clocks) {
ctrl_power->num_clk = 0;
- return -EINVAL;
+ return -ENOMEM;
}
/* Initialize the STREAM power module */
@@ -190,12 +188,12 @@ static int dp_parser_init_clk_data(struct dp_parser *parser)
}
stream_power->num_clk = stream_clk_count;
- stream_power->clk_config = devm_kzalloc(dev,
- sizeof(struct dss_clk) * stream_power->num_clk,
+ stream_power->clocks = devm_kcalloc(dev,
+ stream_power->num_clk, sizeof(struct clk_bulk_data),
GFP_KERNEL);
- if (!stream_power->clk_config) {
+ if (!stream_power->clocks) {
stream_power->num_clk = 0;
- return -EINVAL;
+ return -ENOMEM;
}
return 0;
@@ -234,29 +232,16 @@ static int dp_parser_clock(struct dp_parser *parser)
}
if (dp_parser_check_prefix("core", clk_name) &&
core_clk_index < core_clk_count) {
- struct dss_clk *clk =
- &core_power->clk_config[core_clk_index];
- strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
- clk->type = DSS_CLK_AHB;
+ core_power->clocks[core_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
core_clk_index++;
} else if (dp_parser_check_prefix("stream", clk_name) &&
stream_clk_index < stream_clk_count) {
- struct dss_clk *clk =
- &stream_power->clk_config[stream_clk_index];
- strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
- clk->type = DSS_CLK_PCLK;
+ stream_power->clocks[stream_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
stream_clk_index++;
} else if (dp_parser_check_prefix("ctrl", clk_name) &&
ctrl_clk_index < ctrl_clk_count) {
- struct dss_clk *clk =
- &ctrl_power->clk_config[ctrl_clk_index];
- strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+ ctrl_power->clocks[ctrl_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
ctrl_clk_index++;
- if (dp_parser_check_prefix("ctrl_link", clk_name) ||
- dp_parser_check_prefix("stream_pixel", clk_name))
- clk->type = DSS_CLK_PCLK;
- else
- clk->type = DSS_CLK_AHB;
}
}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 3a4d7972c069..9abddc6d50c0 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -10,7 +10,6 @@
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
-#include "dp_clk_util.h"
#include "msm_drv.h"
#define DP_LABEL "MDSS DP DISPLAY"
@@ -106,6 +105,11 @@ struct dp_regulator_cfg {
struct dp_reg_entry regs[DP_DEV_REGULATOR_MAX];
};
+struct dss_module_power {
+ unsigned int num_clk;
+ struct clk_bulk_data *clocks;
+};
+
/**
* struct dp_parser - DP parser's data exposed to clients
*
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index d9e011775ad8..b415b35c2b8c 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -106,107 +106,30 @@ static int dp_power_clk_init(struct dp_power_private *power)
ctrl = &power->parser->mp[DP_CTRL_PM];
stream = &power->parser->mp[DP_STREAM_PM];
- rc = msm_dss_get_clk(dev, core->clk_config, core->num_clk);
+ rc = devm_clk_bulk_get(dev, core->num_clk, core->clocks);
if (rc) {
DRM_ERROR("failed to get %s clk. err=%d\n",
dp_parser_pm_name(DP_CORE_PM), rc);
return rc;
}
- rc = msm_dss_get_clk(dev, ctrl->clk_config, ctrl->num_clk);
+ rc = devm_clk_bulk_get(dev, ctrl->num_clk, ctrl->clocks);
if (rc) {
DRM_ERROR("failed to get %s clk. err=%d\n",
dp_parser_pm_name(DP_CTRL_PM), rc);
- msm_dss_put_clk(core->clk_config, core->num_clk);
return -ENODEV;
}
- rc = msm_dss_get_clk(dev, stream->clk_config, stream->num_clk);
+ rc = devm_clk_bulk_get(dev, stream->num_clk, stream->clocks);
if (rc) {
DRM_ERROR("failed to get %s clk. err=%d\n",
dp_parser_pm_name(DP_CTRL_PM), rc);
- msm_dss_put_clk(core->clk_config, core->num_clk);
return -ENODEV;
}
return 0;
}
-static int dp_power_clk_deinit(struct dp_power_private *power)
-{
- struct dss_module_power *core, *ctrl, *stream;
-
- core = &power->parser->mp[DP_CORE_PM];
- ctrl = &power->parser->mp[DP_CTRL_PM];
- stream = &power->parser->mp[DP_STREAM_PM];
-
- if (!core || !ctrl || !stream) {
- DRM_ERROR("invalid power_data\n");
- return -EINVAL;
- }
-
- msm_dss_put_clk(ctrl->clk_config, ctrl->num_clk);
- msm_dss_put_clk(core->clk_config, core->num_clk);
- msm_dss_put_clk(stream->clk_config, stream->num_clk);
- return 0;
-}
-
-static int dp_power_clk_set_link_rate(struct dp_power_private *power,
- struct dss_clk *clk_arry, int num_clk, int enable)
-{
- u32 rate;
- int i, rc = 0;
-
- for (i = 0; i < num_clk; i++) {
- if (clk_arry[i].clk) {
- if (clk_arry[i].type == DSS_CLK_PCLK) {
- if (enable)
- rate = clk_arry[i].rate;
- else
- rate = 0;
-
- rc = dev_pm_opp_set_rate(power->dev, rate);
- if (rc)
- break;
- }
-
- }
- }
- return rc;
-}
-
-static int dp_power_clk_set_rate(struct dp_power_private *power,
- enum dp_pm_type module, bool enable)
-{
- int rc = 0;
- struct dss_module_power *mp = &power->parser->mp[module];
-
- if (module == DP_CTRL_PM) {
- rc = dp_power_clk_set_link_rate(power, mp->clk_config, mp->num_clk, enable);
- if (rc) {
- DRM_ERROR("failed to set link clks rate\n");
- return rc;
- }
- } else {
-
- if (enable) {
- rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
- if (rc) {
- DRM_ERROR("failed to set clks rate\n");
- return rc;
- }
- }
- }
-
- rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
- if (rc) {
- DRM_ERROR("failed to %d clks, err: %d\n", enable, rc);
- return rc;
- }
-
- return 0;
-}
-
int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
{
struct dp_power_private *power;
@@ -234,6 +157,7 @@ int dp_power_clk_enable(struct dp_power *dp_power,
{
int rc = 0;
struct dp_power_private *power;
+ struct dss_module_power *mp;
power = container_of(dp_power, struct dp_power_private, dp_power);
@@ -266,8 +190,9 @@ int dp_power_clk_enable(struct dp_power *dp_power,
if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) {
drm_dbg_dp(power->drm_dev,
"Enable core clks before link clks\n");
+ mp = &power->parser->mp[DP_CORE_PM];
- rc = dp_power_clk_set_rate(power, DP_CORE_PM, enable);
+ rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks);
if (rc) {
DRM_ERROR("fail to enable clks: %s. err=%d\n",
dp_parser_pm_name(DP_CORE_PM), rc);
@@ -277,12 +202,15 @@ int dp_power_clk_enable(struct dp_power *dp_power,
}
}
- rc = dp_power_clk_set_rate(power, pm_type, enable);
- if (rc) {
- DRM_ERROR("failed to '%s' clks for: %s. err=%d\n",
- enable ? "enable" : "disable",
- dp_parser_pm_name(pm_type), rc);
- return rc;
+ mp = &power->parser->mp[pm_type];
+ if (enable) {
+ rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks);
+ if (rc) {
+ DRM_ERROR("failed to enable clks, err: %d\n", rc);
+ return rc;
+ }
+ } else {
+ clk_bulk_disable_unprepare(mp->num_clk, mp->clocks);
}
if (pm_type == DP_CORE_PM)
@@ -347,9 +275,7 @@ void dp_power_client_deinit(struct dp_power *dp_power)
power = container_of(dp_power, struct dp_power_private, dp_power);
- dp_power_clk_deinit(power);
pm_runtime_disable(&power->pdev->dev);
-
}
int dp_power_init(struct dp_power *dp_power, bool flip)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index a95d5df52653..a34078497af1 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -21,6 +21,8 @@
#include <video/mipi_display.h>
+#include <drm/drm_of.h>
+
#include "dsi.h"
#include "dsi.xml.h"
#include "sfpb.xml.h"
@@ -1080,12 +1082,32 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
static void dsi_sw_reset(struct msm_dsi_host *msm_host)
{
+ u32 ctrl;
+
+ ctrl = dsi_read(msm_host, REG_DSI_CTRL);
+
+ if (ctrl & DSI_CTRL_ENABLE) {
+ dsi_write(msm_host, REG_DSI_CTRL, ctrl & ~DSI_CTRL_ENABLE);
+ /*
+ * dsi controller need to be disabled before
+ * clocks turned on
+ */
+ wmb();
+ }
+
dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
wmb(); /* clocks need to be enabled before reset */
+ /* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1);
msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
+ wmb(); /* controller out of reset */
+
+ if (ctrl & DSI_CTRL_ENABLE) {
+ dsi_write(msm_host, REG_DSI_CTRL, ctrl);
+ wmb(); /* make sure dsi controller enabled again */
+ }
}
static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
@@ -1478,32 +1500,6 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
return len;
}
-static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
-{
- u32 data0, data1;
-
- data0 = dsi_read(msm_host, REG_DSI_CTRL);
- data1 = data0;
- data1 &= ~DSI_CTRL_ENABLE;
- dsi_write(msm_host, REG_DSI_CTRL, data1);
- /*
- * dsi controller need to be disabled before
- * clocks turned on
- */
- wmb();
-
- dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
- wmb(); /* make sure clocks enabled */
-
- /* dsi controller can only be reset while clocks are running */
- dsi_write(msm_host, REG_DSI_RESET, 1);
- msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
- dsi_write(msm_host, REG_DSI_RESET, 0);
- wmb(); /* controller out of reset */
- dsi_write(msm_host, REG_DSI_CTRL, data0);
- wmb(); /* make sure dsi controller enabled again */
-}
-
static void dsi_hpd_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
@@ -1520,7 +1516,7 @@ static void dsi_err_worker(struct work_struct *work)
pr_err_ratelimited("%s: status=%x\n", __func__, status);
if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
- dsi_sw_reset_restore(msm_host);
+ dsi_sw_reset(msm_host);
/* It is safe to clear here because error irq is disabled. */
msm_host->err_work_state = 0;
@@ -1779,11 +1775,10 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
return 0;
}
- num_lanes = len / sizeof(u32);
-
- if (num_lanes < 1 || num_lanes > 4) {
+ num_lanes = drm_of_get_data_lanes_count(ep, 1, 4);
+ if (num_lanes < 0) {
DRM_DEV_ERROR(dev, "bad number of data lanes\n");
- return -EINVAL;
+ return num_lanes;
}
msm_host->num_data_lanes = num_lanes;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index cf24e68864ba..93fe61b86967 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -9,6 +9,7 @@
#include <linux/of_gpio.h>
#include <drm/drm_bridge_connector.h>
+#include <drm/drm_of.h>
#include <sound/hdmi-codec.h>
#include "hdmi.h"
@@ -133,6 +134,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->config = config;
spin_lock_init(&hdmi->reg_lock);
+ ret = drm_of_find_panel_or_bridge(pdev->dev.of_node, 1, 0, NULL, &hdmi->next_bridge);
+ if (ret && ret != -ENODEV)
+ goto fail;
+
hdmi->mmio = msm_ioremap(pdev, config->mmio_name);
if (IS_ERR(hdmi->mmio)) {
ret = PTR_ERR(hdmi->mmio);
@@ -180,6 +185,9 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
goto fail;
}
+ for (i = 0; i < config->pwr_reg_cnt; i++)
+ hdmi->pwr_regs[i].supply = config->pwr_reg_names[i];
+
ret = devm_regulator_bulk_get(&pdev->dev, config->pwr_reg_cnt, hdmi->pwr_regs);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %d\n", ret);
@@ -230,6 +238,20 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->pwr_clks[i] = clk;
}
+ hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
+ /* This will catch e.g. -EPROBE_DEFER */
+ if (IS_ERR(hdmi->hpd_gpiod)) {
+ ret = PTR_ERR(hdmi->hpd_gpiod);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd gpio: (%d)\n", ret);
+ goto fail;
+ }
+
+ if (!hdmi->hpd_gpiod)
+ DBG("failed to get HPD gpio");
+
+ if (hdmi->hpd_gpiod)
+ gpiod_set_consumer_name(hdmi->hpd_gpiod, "HDMI_HPD");
+
pm_runtime_enable(&pdev->dev);
hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
@@ -291,6 +313,15 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
+ if (hdmi->next_bridge) {
+ ret = drm_bridge_attach(hdmi->encoder, hdmi->next_bridge, hdmi->bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to attach next HDMI bridge: %d\n", ret);
+ goto fail;
+ }
+ }
+
hdmi->connector = drm_bridge_connector_init(hdmi->dev, encoder);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
@@ -353,12 +384,7 @@ fail:
.item ## _names = item ##_names_ ## entry, \
.item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
-static const char *pwr_reg_names_none[] = {};
-static const char *hpd_reg_names_none[] = {};
-
-static struct hdmi_platform_config hdmi_tx_8660_config;
-
-static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"};
+static const char *hpd_reg_names_8960[] = {"core-vdda"};
static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
static struct hdmi_platform_config hdmi_tx_8960_config = {
@@ -367,59 +393,17 @@ static struct hdmi_platform_config hdmi_tx_8960_config = {
};
static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
-static const char *hpd_reg_names_8x74[] = {"hpd-gdsc", "hpd-5v"};
static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
static struct hdmi_platform_config hdmi_tx_8974_config = {
HDMI_CFG(pwr_reg, 8x74),
- HDMI_CFG(hpd_reg, 8x74),
HDMI_CFG(pwr_clk, 8x74),
HDMI_CFG(hpd_clk, 8x74),
.hpd_freq = hpd_clk_freq_8x74,
};
-static const char *hpd_reg_names_8084[] = {"hpd-gdsc", "hpd-5v", "hpd-5v-en"};
-
-static struct hdmi_platform_config hdmi_tx_8084_config = {
- HDMI_CFG(pwr_reg, 8x74),
- HDMI_CFG(hpd_reg, 8084),
- HDMI_CFG(pwr_clk, 8x74),
- HDMI_CFG(hpd_clk, 8x74),
- .hpd_freq = hpd_clk_freq_8x74,
-};
-
-static struct hdmi_platform_config hdmi_tx_8994_config = {
- HDMI_CFG(pwr_reg, 8x74),
- HDMI_CFG(hpd_reg, none),
- HDMI_CFG(pwr_clk, 8x74),
- HDMI_CFG(hpd_clk, 8x74),
- .hpd_freq = hpd_clk_freq_8x74,
-};
-
-static struct hdmi_platform_config hdmi_tx_8996_config = {
- HDMI_CFG(pwr_reg, none),
- HDMI_CFG(hpd_reg, none),
- HDMI_CFG(pwr_clk, 8x74),
- HDMI_CFG(hpd_clk, 8x74),
- .hpd_freq = hpd_clk_freq_8x74,
-};
-
-static const struct {
- const char *name;
- const bool output;
- const int value;
- const char *label;
-} msm_hdmi_gpio_pdata[] = {
- { "qcom,hdmi-tx-ddc-clk", true, 1, "HDMI_DDC_CLK" },
- { "qcom,hdmi-tx-ddc-data", true, 1, "HDMI_DDC_DATA" },
- { "qcom,hdmi-tx-hpd", false, 1, "HDMI_HPD" },
- { "qcom,hdmi-tx-mux-en", true, 1, "HDMI_MUX_EN" },
- { "qcom,hdmi-tx-mux-sel", true, 0, "HDMI_MUX_SEL" },
- { "qcom,hdmi-tx-mux-lpm", true, 1, "HDMI_MUX_LPM" },
-};
-
/*
* HDMI audio codec callbacks
*/
@@ -531,7 +515,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
- int i, err;
+ int err;
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
@@ -543,42 +527,6 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg->mmio_name = "core_physical";
hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
- for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
- const char *name = msm_hdmi_gpio_pdata[i].name;
- struct gpio_desc *gpiod;
-
- /*
- * We are fetching the GPIO lines "as is" since the connector
- * code is enabling and disabling the lines. Until that point
- * the power-on default value will be kept.
- */
- gpiod = devm_gpiod_get_optional(dev, name, GPIOD_ASIS);
- /* This will catch e.g. -PROBE_DEFER */
- if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
- if (!gpiod) {
- /* Try a second time, stripping down the name */
- char name3[32];
-
- /*
- * Try again after stripping out the "qcom,hdmi-tx"
- * prefix. This is mainly to match "hpd-gpios" used
- * in the upstream bindings.
- */
- if (sscanf(name, "qcom,hdmi-tx-%s", name3))
- gpiod = devm_gpiod_get_optional(dev, name3, GPIOD_ASIS);
- if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
- if (!gpiod)
- DBG("failed to get gpio: %s", name);
- }
- hdmi_cfg->gpios[i].gpiod = gpiod;
- if (gpiod)
- gpiod_set_consumer_name(gpiod, msm_hdmi_gpio_pdata[i].label);
- hdmi_cfg->gpios[i].output = msm_hdmi_gpio_pdata[i].output;
- hdmi_cfg->gpios[i].value = msm_hdmi_gpio_pdata[i].value;
- }
-
dev->platform_data = hdmi_cfg;
hdmi = msm_hdmi_init(to_platform_device(dev));
@@ -626,12 +574,12 @@ static int msm_hdmi_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id msm_hdmi_dt_match[] = {
- { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
- { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
- { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
+ { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
+ { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8974_config },
+ { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
- { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
+ { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8960_config },
{}
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 736f348befb3..04a74381aaf7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -19,17 +19,9 @@
#include "msm_drv.h"
#include "hdmi.xml.h"
-#define HDMI_MAX_NUM_GPIO 6
-
struct hdmi_phy;
struct hdmi_platform_config;
-struct hdmi_gpio_data {
- struct gpio_desc *gpiod;
- bool output;
- int value;
-};
-
struct hdmi_audio {
bool enabled;
struct hdmi_audio_infoframe infoframe;
@@ -61,6 +53,8 @@ struct hdmi {
struct clk **hpd_clks;
struct clk **pwr_clks;
+ struct gpio_desc *hpd_gpiod;
+
struct hdmi_phy *phy;
struct device *phy_dev;
@@ -68,6 +62,8 @@ struct hdmi {
struct drm_connector *connector;
struct drm_bridge *bridge;
+ struct drm_bridge *next_bridge;
+
/* the encoder we are hooked to (outside of hdmi block) */
struct drm_encoder *encoder;
@@ -109,9 +105,6 @@ struct hdmi_platform_config {
/* clks that need to be on for screen pwr (ie pixel clk): */
const char **pwr_clk_names;
int pwr_clk_cnt;
-
- /* gpio's: */
- struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO];
};
struct hdmi_bridge {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 97c24010c4d1..9b1391d27ed3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -6,6 +6,7 @@
#include <linux/delay.h>
#include <drm/drm_bridge_connector.h>
+#include <drm/drm_edid.h>
#include "msm_kms.h"
#include "hdmi.h"
@@ -159,14 +160,6 @@ static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
msm_hdmi_hdcp_on(hdmi->hdcp_ctrl);
}
-static void msm_hdmi_bridge_enable(struct drm_bridge *bridge)
-{
-}
-
-static void msm_hdmi_bridge_disable(struct drm_bridge *bridge)
-{
-}
-
static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
@@ -306,8 +299,6 @@ static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
.pre_enable = msm_hdmi_bridge_pre_enable,
- .enable = msm_hdmi_bridge_enable,
- .disable = msm_hdmi_bridge_disable,
.post_disable = msm_hdmi_bridge_post_disable,
.mode_set = msm_hdmi_bridge_mode_set,
.mode_valid = msm_hdmi_bridge_mode_valid,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index 75605ddac7c4..bfa827b47989 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -60,48 +60,6 @@ static void msm_hdmi_phy_reset(struct hdmi *hdmi)
}
}
-static int gpio_config(struct hdmi *hdmi, bool on)
-{
- const struct hdmi_platform_config *config = hdmi->config;
- int i;
-
- if (on) {
- for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
- struct hdmi_gpio_data gpio = config->gpios[i];
-
- if (gpio.gpiod) {
- if (gpio.output) {
- gpiod_direction_output(gpio.gpiod,
- gpio.value);
- } else {
- gpiod_direction_input(gpio.gpiod);
- gpiod_set_value_cansleep(gpio.gpiod,
- gpio.value);
- }
- }
- }
-
- DBG("gpio on");
- } else {
- for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
- struct hdmi_gpio_data gpio = config->gpios[i];
-
- if (!gpio.gpiod)
- continue;
-
- if (gpio.output) {
- int value = gpio.value ? 0 : 1;
-
- gpiod_set_value_cansleep(gpio.gpiod, value);
- }
- }
-
- DBG("gpio off");
- }
-
- return 0;
-}
-
static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
{
const struct hdmi_platform_config *config = hdmi->config;
@@ -154,11 +112,8 @@ int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
goto fail;
}
- ret = gpio_config(hdmi, true);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret);
- goto fail;
- }
+ if (hdmi->hpd_gpiod)
+ gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1);
pm_runtime_get_sync(dev);
enable_hpd_clocks(hdmi, true);
@@ -207,10 +162,6 @@ void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge)
enable_hpd_clocks(hdmi, false);
pm_runtime_put(dev);
- ret = gpio_config(hdmi, false);
- if (ret)
- dev_warn(dev, "failed to unconfigure GPIOs: %d\n", ret);
-
ret = pinctrl_pm_select_sleep_state(dev);
if (ret)
dev_warn(dev, "pinctrl state chg failed: %d\n", ret);
@@ -269,10 +220,7 @@ static enum drm_connector_status detect_reg(struct hdmi *hdmi)
#define HPD_GPIO_INDEX 2
static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
{
- const struct hdmi_platform_config *config = hdmi->config;
- struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
-
- return gpiod_get_value(hpd_gpio.gpiod) ?
+ return gpiod_get_value(hdmi->hpd_gpiod) ?
connector_status_connected :
connector_status_disconnected;
}
@@ -282,8 +230,6 @@ enum drm_connector_status msm_hdmi_bridge_detect(
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
enum drm_connector_status stat_gpio, stat_reg;
int retry = 20;
@@ -291,7 +237,7 @@ enum drm_connector_status msm_hdmi_bridge_detect(
* some platforms may not have hpd gpio. Rely only on the status
* provided by REG_HDMI_HPD_INT_STATUS in this case.
*/
- if (!hpd_gpio.gpiod)
+ if (!hdmi->hpd_gpiod)
return detect_reg(hdmi);
do {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index 95f2928cb2cb..1d97640d8c24 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -122,8 +122,20 @@ static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
HDMI_8x60_PHY_REG2_PD_DESER);
}
+static const char * const hdmi_phy_8x60_reg_names[] = {
+ "core-vdda",
+};
+
+static const char * const hdmi_phy_8x60_clk_names[] = {
+ "slave_iface",
+};
+
const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg = {
.type = MSM_HDMI_PHY_8x60,
.powerup = hdmi_phy_8x60_powerup,
.powerdown = hdmi_phy_8x60_powerdown,
+ .reg_names = hdmi_phy_8x60_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8x60_reg_names),
+ .clk_names = hdmi_phy_8x60_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8x60_clk_names),
};
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index ea2a20699cb4..7d2dab260f86 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -10,6 +10,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
#include "msm_drv.h"
#include "msm_gpu.h"
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 44485363f37a..1ed4cd09dbf8 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -26,6 +26,7 @@
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_kms.h"
+#include "msm_mmu.h"
#include "adreno/adreno_gpu.h"
/*
@@ -267,12 +268,56 @@ static int msm_drm_uninit(struct device *dev)
#include <linux/of_address.h>
+struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
+{
+ struct iommu_domain *domain;
+ struct msm_gem_address_space *aspace;
+ struct msm_mmu *mmu;
+ struct device *mdp_dev = dev->dev;
+ struct device *mdss_dev = mdp_dev->parent;
+ struct device *iommu_dev;
+
+ /*
+ * IOMMUs can be a part of MDSS device tree binding, or the
+ * MDP/DPU device.
+ */
+ if (device_iommu_mapped(mdp_dev))
+ iommu_dev = mdp_dev;
+ else
+ iommu_dev = mdss_dev;
+
+ domain = iommu_domain_alloc(iommu_dev->bus);
+ if (!domain) {
+ drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
+ return NULL;
+ }
+
+ mmu = msm_iommu_new(iommu_dev, domain);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(domain);
+ return ERR_CAST(mmu);
+ }
+
+ aspace = msm_gem_address_space_create(mmu, "mdp_kms",
+ 0x1000, 0x100000000 - 0x1000);
+ if (IS_ERR(aspace))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
bool msm_use_mmu(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- /* a2xx comes with its own MMU */
- return priv->is_a2xx || iommu_present(&platform_bus_type);
+ /*
+ * a2xx comes with its own MMU
+ * On other platforms IOMMU can be declared specified either for the
+ * MDP/DPU device or for its parent, MDSS device.
+ */
+ return priv->is_a2xx ||
+ device_iommu_mapped(dev->dev) ||
+ device_iommu_mapped(dev->dev->parent);
}
static int msm_init_vram(struct drm_device *dev)
@@ -633,12 +678,25 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_gem_new *args = data;
+ uint32_t flags = args->flags;
if (args->flags & ~MSM_BO_FLAGS) {
DRM_ERROR("invalid flags: %08x\n", args->flags);
return -EINVAL;
}
+ /*
+ * Uncached CPU mappings are deprecated, as of:
+ *
+ * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
+ *
+ * So promote them to WC.
+ */
+ if (flags & MSM_BO_UNCACHED) {
+ flags &= ~MSM_BO_CACHED;
+ flags |= MSM_BO_WC;
+ }
+
return msm_gem_new_handle(dev, file, args->size,
args->flags, &args->handle, NULL);
}
@@ -948,7 +1006,24 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_FOPS(fops);
+static void msm_fop_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct drm_file *file = f->private_data;
+ struct drm_device *dev = file->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!priv->gpu)
+ return;
+
+ msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, &p);
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ DRM_GEM_FOPS,
+ .show_fdinfo = msm_fop_show_fdinfo,
+};
static const struct drm_driver msm_driver = {
.driver_features = DRIVER_GEM |
@@ -964,7 +1039,7 @@ static const struct drm_driver msm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
+ .gem_prime_mmap = msm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 08388d742d65..b3689a2d27d7 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -62,16 +62,6 @@ enum msm_dp_controller {
#define MAX_H_TILES_PER_DISPLAY 2
/**
- * enum msm_display_caps - features/capabilities supported by displays
- * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
- * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
- */
-enum msm_display_caps {
- MSM_DISPLAY_CAP_VID_MODE = BIT(0),
- MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
-};
-
-/**
* enum msm_event_wait - type of HW events to wait for
* @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
* @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
@@ -234,6 +224,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev);
bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
@@ -246,6 +237,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t
void msm_gem_shrinker_init(struct drm_device *dev);
void msm_gem_shrinker_cleanup(struct drm_device *dev);
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 4269da268a4a..e3f61c39df69 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -8,6 +8,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 4c39ef9dd75d..46168eccfac4 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -8,6 +8,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_prime.h>
#include "msm_drv.h"
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 3df255402a33..a47e5837c528 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -28,6 +28,14 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
fctx->fenceptr = fenceptr;
spin_lock_init(&fctx->spinlock);
+ /*
+ * Start out close to the 32b fence rollover point, so we can
+ * catch bugs with fence comparisons.
+ */
+ fctx->last_fence = 0xffffff00;
+ fctx->completed_fence = fctx->last_fence;
+ *fctx->fenceptr = fctx->last_fence;
+
return fctx;
}
@@ -46,12 +54,15 @@ bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
(int32_t)(*fctx->fenceptr - fence) >= 0;
}
-/* called from workqueue */
+/* called from irq handler and workqueue (in recover path) */
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
{
- spin_lock(&fctx->spinlock);
- fctx->completed_fence = max(fence, fctx->completed_fence);
- spin_unlock(&fctx->spinlock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fctx->spinlock, flags);
+ if (fence_after(fence, fctx->completed_fence))
+ fctx->completed_fence = fence;
+ spin_unlock_irqrestore(&fctx->spinlock, flags);
}
struct msm_fence {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 97d5b4d8b9b0..8ddbd2e001d4 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -129,7 +129,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
update_inactive(msm_obj);
@@ -160,7 +160,7 @@ static void put_pages(struct drm_gem_object *obj)
* pages are clean because display controller,
* GPU, etc. are not coherent:
*/
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
@@ -213,7 +213,7 @@ void msm_gem_put_pages(struct drm_gem_object *obj)
static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
{
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
return pgprot_writecombine(prot);
return prot;
}
@@ -259,7 +259,8 @@ static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+
out_unlock:
msm_gem_unlock(obj);
out:
@@ -439,14 +440,12 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
return ret;
}
-void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj));
- msm_gem_unpin_vma(vma);
-
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
@@ -586,7 +585,8 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
msm_gem_lock(obj);
vma = lookup_vma(obj, aspace);
if (!GEM_WARN_ON(!vma)) {
- msm_gem_unpin_vma_locked(obj, vma);
+ msm_gem_unpin_vma(vma);
+ msm_gem_unpin_locked(obj);
}
msm_gem_unlock(obj);
}
@@ -1005,7 +1005,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
#endif
/* don't call directly! Use drm_gem_object_put() */
-void msm_gem_free_object(struct drm_gem_object *obj)
+static void msm_gem_free_object(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
@@ -1021,8 +1021,6 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list);
mutex_unlock(&priv->mm_lock);
- msm_gem_lock(obj);
-
/* object should not be on active list: */
GEM_WARN_ON(is_active(msm_obj));
@@ -1038,17 +1036,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
put_iova_vmas(obj);
- /* dma_buf_detach() grabs resv lock, so we need to unlock
- * prior to drm_prime_gem_destroy
- */
- msm_gem_unlock(obj);
-
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
put_iova_vmas(obj);
- msm_gem_unlock(obj);
}
drm_gem_object_release(obj);
@@ -1060,7 +1052,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
@@ -1115,7 +1107,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
- case MSM_BO_UNCACHED:
case MSM_BO_CACHED:
case MSM_BO_WC:
break;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c75d3b879a53..432032ad4aed 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -145,7 +145,7 @@ struct msm_gem_object {
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
-void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+void msm_gem_unpin_locked(struct drm_gem_object *obj);
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
int msm_gem_get_iova(struct drm_gem_object *obj,
@@ -175,7 +175,6 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
void msm_gem_active_put(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
-void msm_gem_free_object(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
@@ -230,7 +229,19 @@ msm_gem_unlock(struct drm_gem_object *obj)
static inline bool
msm_gem_is_locked(struct drm_gem_object *obj)
{
- return dma_resv_is_locked(obj->resv);
+ /*
+ * Destroying the object is a special case.. msm_gem_free_object()
+ * calls many things that WARN_ON if the obj lock is not held. But
+ * acquiring the obj lock in msm_gem_free_object() can cause a
+ * locking order inversion between reservation_ww_class_mutex and
+ * fs_reclaim.
+ *
+ * This deadlock is not actually possible, because no one should
+ * be already holding the lock when msm_gem_free_object() is called.
+ * Unfortunately lockdep is not aware of this detail. So when the
+ * refcount drops to zero, we pretend it is already locked.
+ */
+ return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0);
}
static inline bool is_active(struct msm_gem_object *msm_obj)
@@ -377,10 +388,11 @@ struct msm_gem_submit {
} *cmd; /* array of size nr_cmds */
struct {
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
-#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
-#define BO_LOCKED 0x4000 /* obj lock is held */
-#define BO_ACTIVE 0x2000 /* active refcnt is held */
-#define BO_PINNED 0x1000 /* obj is pinned and on active list */
+#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
+#define BO_LOCKED 0x4000 /* obj lock is held */
+#define BO_ACTIVE 0x2000 /* active refcnt is held */
+#define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */
+#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */
uint32_t flags;
union {
struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 94ab705e9b8a..dcc8a573bc76 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -11,6 +11,21 @@
#include "msm_drv.h"
#include "msm_gem.h"
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ /* Ensure the mmap offset is initialized. We lazily initialize it,
+ * so if it has not been first mmap'd directly as a GEM object, the
+ * mmap offset will not be already initialized.
+ */
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ return ret;
+
+ return drm_gem_prime_mmap(obj, vma);
+}
+
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 086dacf2f26a..6e39d959b9f0 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -15,7 +15,7 @@
/* Default disabled for now until it has some more testing on the different
* iommu combinations that can be paired with the driver:
*/
-bool enable_eviction = false;
+static bool enable_eviction = false;
MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
module_param(enable_eviction, bool, 0600);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 80975229b4de..c9e4aeb14f4a 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -232,8 +232,11 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
*/
submit->bos[i].flags &= ~cleanup_flags;
- if (flags & BO_PINNED)
- msm_gem_unpin_vma_locked(obj, submit->bos[i].vma);
+ if (flags & BO_VMA_PINNED)
+ msm_gem_unpin_vma(submit->bos[i].vma);
+
+ if (flags & BO_OBJ_PINNED)
+ msm_gem_unpin_locked(obj);
if (flags & BO_ACTIVE)
msm_gem_active_put(obj);
@@ -244,7 +247,9 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
{
- submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE | BO_LOCKED);
+ unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
+ BO_ACTIVE | BO_LOCKED;
+ submit_cleanup_bo(submit, i, cleanup_flags);
if (!(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
@@ -375,7 +380,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
if (ret)
break;
- submit->bos[i].flags |= BO_PINNED;
+ submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
submit->bos[i].vma = vma;
if (vma->iova == submit->bos[i].iova) {
@@ -511,7 +516,7 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error)
unsigned i;
if (error)
- cleanup_flags |= BO_PINNED | BO_ACTIVE;
+ cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
@@ -529,7 +534,8 @@ void msm_submit_retire(struct msm_gem_submit *submit)
struct drm_gem_object *obj = &submit->bos[i].obj->base;
msm_gem_lock(obj);
- submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE);
+ /* Note, VMA already fence-unpinned before submit: */
+ submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
msm_gem_unlock(obj);
drm_gem_object_put(obj);
}
@@ -922,7 +928,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
INT_MAX, GFP_KERNEL);
}
if (submit->fence_id < 0) {
- ret = submit->fence_id = 0;
+ ret = submit->fence_id;
submit->fence_id = 0;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 3c1dc9241831..c471aebcdbab 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -62,8 +62,7 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
unsigned size = vma->node.size;
/* Print a message if we try to purge a vma in use */
- if (GEM_WARN_ON(msm_gem_vma_inuse(vma)))
- return;
+ GEM_WARN_ON(msm_gem_vma_inuse(vma));
/* Don't do anything if the memory isn't mapped */
if (!vma->mapped)
@@ -128,8 +127,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
{
- if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped))
- return;
+ GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
spin_lock(&aspace->lock);
if (vma->iova)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index eb8a6663f309..c2bfcf3f1f40 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -4,6 +4,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include "drm/drm_drv.h"
+
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -146,6 +148,16 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
return 0;
}
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ struct drm_printer *p)
+{
+ drm_printf(p, "drm-driver:\t%s\n", gpu->dev->driver->name);
+ drm_printf(p, "drm-client-id:\t%u\n", ctx->seqno);
+ drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
+ drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
+ drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
+}
+
int msm_gpu_hw_init(struct msm_gpu *gpu)
{
int ret;
@@ -164,24 +176,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
return ret;
}
-static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
- uint32_t fence)
-{
- struct msm_gem_submit *submit;
- unsigned long flags;
-
- spin_lock_irqsave(&ring->submit_lock, flags);
- list_for_each_entry(submit, &ring->submits, node) {
- if (fence_after(submit->seqno, fence))
- break;
-
- msm_update_fence(submit->ring->fctx,
- submit->hw_fence->seqno);
- dma_fence_signal(submit->hw_fence);
- }
- spin_unlock_irqrestore(&ring->submit_lock, flags);
-}
-
#ifdef CONFIG_DEV_COREDUMP
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
@@ -227,7 +221,7 @@ static void msm_gpu_devcoredump_free(void *data)
}
static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
- struct msm_gem_object *obj, u64 iova, u32 flags)
+ struct msm_gem_object *obj, u64 iova, bool full)
{
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
@@ -235,8 +229,11 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
state_bo->size = obj->base.size;
state_bo->iova = iova;
- /* Only store data for non imported buffer objects marked for read */
- if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
+ BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(obj->name));
+
+ memcpy(state_bo->name, obj->name, sizeof(state_bo->name));
+
+ if (full) {
void *ptr;
state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
@@ -282,34 +279,15 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
state->fault_info = gpu->fault_info;
if (submit) {
- int i, nr = 0;
-
- /* count # of buffers to dump: */
- for (i = 0; i < submit->nr_bos; i++)
- if (should_dump(submit, i))
- nr++;
- /* always dump cmd bo's, but don't double count them: */
- for (i = 0; i < submit->nr_cmds; i++)
- if (!should_dump(submit, submit->cmd[i].idx))
- nr++;
-
- state->bos = kcalloc(nr,
+ int i;
+
+ state->bos = kcalloc(submit->nr_bos,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
for (i = 0; state->bos && i < submit->nr_bos; i++) {
- if (should_dump(submit, i)) {
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
- submit->bos[i].iova, submit->bos[i].flags);
- }
- }
-
- for (i = 0; state->bos && i < submit->nr_cmds; i++) {
- int idx = submit->cmd[i].idx;
-
- if (!should_dump(submit, submit->cmd[i].idx)) {
- msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
- submit->bos[idx].iova, submit->bos[idx].flags);
- }
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+ submit->bos[i].iova,
+ should_dump(submit, i));
}
}
@@ -436,9 +414,9 @@ static void recover_worker(struct kthread_work *work)
* one more to clear the faulting submit
*/
if (ring == cur_ring)
- fence++;
+ ring->memptrs->fence = ++fence;
- update_fences(gpu, ring, fence);
+ msm_update_fence(ring->fctx, fence);
}
if (msm_gpu_active(gpu)) {
@@ -652,7 +630,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
{
int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
volatile struct msm_gpu_submit_stats *stats;
- u64 elapsed, clock = 0;
+ u64 elapsed, clock = 0, cycles;
unsigned long flags;
stats = &ring->memptrs->stats[index];
@@ -660,19 +638,23 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
do_div(elapsed, 192);
+ cycles = stats->cpcycles_end - stats->cpcycles_start;
+
/* Calculate the clock frequency from the number of CP cycles */
if (elapsed) {
- clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
+ clock = cycles * 1000;
do_div(clock, elapsed);
}
+ submit->queue->ctx->elapsed_ns += elapsed;
+ submit->queue->ctx->cycles += cycles;
+
trace_msm_gpu_submit_retired(submit, elapsed, clock,
stats->alwayson_start, stats->alwayson_end);
msm_submit_retire(submit);
pm_runtime_mark_last_busy(&gpu->pdev->dev);
- pm_runtime_put_autosuspend(&gpu->pdev->dev);
spin_lock_irqsave(&ring->submit_lock, flags);
list_del(&submit->node);
@@ -686,6 +668,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
msm_devfreq_idle(gpu);
mutex_unlock(&gpu->active_lock);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
msm_gem_submit_put(submit);
}
@@ -735,7 +719,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
int i;
for (i = 0; i < gpu->nr_rings; i++)
- update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
+ msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
kthread_queue_work(gpu->worker, &gpu->retire_work);
update_sw_cntrs(gpu);
@@ -934,7 +918,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
- check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
+ check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 6def00883046..4d935fedd2ac 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -64,11 +64,14 @@ struct msm_gpu_funcs {
/* for generation specific debugfs: */
void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
+ /* note: gpu_busy() can assume that we have been pm_resumed */
u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
int (*gpu_state_put)(struct msm_gpu_state *state);
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
- void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
+ /* note: gpu_set_freq() can assume that we have been pm_resumed */
+ void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended);
struct msm_gem_address_space *(*create_address_space)
(struct msm_gpu *gpu, struct platform_device *pdev);
struct msm_gem_address_space *(*create_private_address_space)
@@ -92,6 +95,9 @@ struct msm_gpu_devfreq {
/** devfreq: devfreq instance */
struct devfreq *devfreq;
+ /** lock: lock for "suspended", "busy_cycles", and "time" */
+ struct mutex lock;
+
/**
* idle_constraint:
*
@@ -135,6 +141,9 @@ struct msm_gpu_devfreq {
* elapsed
*/
struct msm_hrtimer_work boost_work;
+
+ /** suspended: tracks if we're suspended */
+ bool suspended;
};
struct msm_gpu {
@@ -362,6 +371,22 @@ struct msm_file_private {
char *cmdline;
/**
+ * elapsed:
+ *
+ * The total (cumulative) elapsed time GPU was busy with rendering
+ * from this context in ns.
+ */
+ uint64_t elapsed_ns;
+
+ /**
+ * cycles:
+ *
+ * The total (cumulative) GPU cycles elapsed attributed to this
+ * context.
+ */
+ uint64_t cycles;
+
+ /**
* entities:
*
* Table of per-priority-level sched entities used by submitqueues
@@ -464,6 +489,7 @@ struct msm_gpu_state_bo {
size_t size;
void *data;
bool encoded;
+ char name[32];
};
struct msm_gpu_state {
@@ -544,6 +570,9 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ struct drm_printer *p);
+
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index d2539ca78c29..d1f70426f554 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -20,6 +20,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
struct dev_pm_opp *opp;
/*
@@ -32,10 +33,13 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
- if (gpu->funcs->gpu_set_freq)
- gpu->funcs->gpu_set_freq(gpu, opp);
- else
+ if (gpu->funcs->gpu_set_freq) {
+ mutex_lock(&df->lock);
+ gpu->funcs->gpu_set_freq(gpu, opp, df->suspended);
+ mutex_unlock(&df->lock);
+ } else {
clk_set_rate(gpu->core_clk, *freq);
+ }
dev_pm_opp_put(opp);
@@ -58,18 +62,27 @@ static void get_raw_dev_status(struct msm_gpu *gpu,
unsigned long sample_rate;
ktime_t time;
+ mutex_lock(&df->lock);
+
status->current_frequency = get_freq(gpu);
- busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate);
time = ktime_get();
-
- busy_time = busy_cycles - df->busy_cycles;
status->total_time = ktime_us_delta(time, df->time);
+ df->time = time;
+ if (df->suspended) {
+ mutex_unlock(&df->lock);
+ status->busy_time = 0;
+ return;
+ }
+
+ busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate);
+ busy_time = busy_cycles - df->busy_cycles;
df->busy_cycles = busy_cycles;
- df->time = time;
+
+ mutex_unlock(&df->lock);
busy_time *= USEC_PER_SEC;
- do_div(busy_time, sample_rate);
+ busy_time = div64_ul(busy_time, sample_rate);
if (WARN_ON(busy_time > ~0LU))
busy_time = ~0LU;
@@ -175,6 +188,8 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (!gpu->funcs->gpu_busy)
return;
+ mutex_init(&df->lock);
+
dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq,
DEV_PM_QOS_MAX_FREQUENCY,
PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
@@ -244,12 +259,16 @@ void msm_devfreq_cleanup(struct msm_gpu *gpu)
void msm_devfreq_resume(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
+ unsigned long sample_rate;
if (!has_devfreq(gpu))
return;
- df->busy_cycles = 0;
+ mutex_lock(&df->lock);
+ df->busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate);
df->time = ktime_get();
+ df->suspended = false;
+ mutex_unlock(&df->lock);
devfreq_resume_device(df->devfreq);
}
@@ -261,6 +280,10 @@ void msm_devfreq_suspend(struct msm_gpu *gpu)
if (!has_devfreq(gpu))
return;
+ mutex_lock(&df->lock);
+ df->suspended = true;
+ mutex_unlock(&df->lock);
+
devfreq_suspend_device(df->devfreq);
cancel_idle_work(df);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index bcaddbba564d..a54ed354578b 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
u64 addr = iova;
unsigned int i;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ for_each_sgtable_sg(sgt, sg, i) {
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 0454a571adf7..e13c5c12b775 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/interconnect.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdesc.h>
@@ -25,6 +26,8 @@
#define UBWC_CTRL_2 0x150
#define UBWC_PREDICTION_MODE 0x154
+#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
+
struct msm_mdss {
struct device *dev;
@@ -36,8 +39,47 @@ struct msm_mdss {
unsigned long enabled_mask;
struct irq_domain *domain;
} irq_controller;
+ struct icc_path *path[2];
+ u32 num_paths;
};
+static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
+ struct msm_mdss *msm_mdss)
+{
+ struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
+ struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
+
+ if (IS_ERR_OR_NULL(path0))
+ return PTR_ERR_OR_ZERO(path0);
+
+ msm_mdss->path[0] = path0;
+ msm_mdss->num_paths = 1;
+
+ if (!IS_ERR_OR_NULL(path1)) {
+ msm_mdss->path[1] = path1;
+ msm_mdss->num_paths++;
+ }
+
+ return 0;
+}
+
+static void msm_mdss_put_icc_path(void *data)
+{
+ struct msm_mdss *msm_mdss = data;
+ int i;
+
+ for (i = 0; i < msm_mdss->num_paths; i++)
+ icc_put(msm_mdss->path[i]);
+}
+
+static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
+{
+ int i;
+
+ for (i = 0; i < msm_mdss->num_paths; i++)
+ icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
+}
+
static void msm_mdss_irq(struct irq_desc *desc)
{
struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
@@ -136,6 +178,13 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
{
int ret;
+ /*
+ * Several components have AXI clocks that can only be turned on if
+ * the interconnect is enabled (non-zero bandwidth). Let's make sure
+ * that the interconnects are at least at a minimum amount.
+ */
+ msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
+
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
@@ -178,6 +227,7 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
static int msm_mdss_disable(struct msm_mdss *msm_mdss)
{
clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
+ msm_mdss_icc_request_bw(msm_mdss, 0);
return 0;
}
@@ -271,6 +321,13 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
+ ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+ ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+
if (is_mdp5)
ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
else
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 43066320ff8c..56eecb4a72dc 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -25,7 +25,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_gem_lock(obj);
msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
- submit->bos[i].flags &= ~BO_PINNED;
+ submit->bos[i].flags &= ~BO_VMA_PINNED;
msm_gem_unlock(obj);
}
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 987170e16ebd..873551b4552f 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -19,3 +19,19 @@ config DRM_MXSFB
i.MX28, i.MX6SX, i.MX7 and i.MX8M).
If M is selected the module will be called mxsfb.
+
+config DRM_IMX_LCDIF
+ tristate "i.MX LCDIFv3 LCD controller"
+ depends on DRM && OF
+ depends on COMMON_CLK
+ select DRM_MXS
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_PANEL
+ select DRM_PANEL_BRIDGE
+ help
+ Choose this option if you have an LCDIFv3 LCD controller.
+ Those devices are found in various i.MX SoC (i.MX8MP,
+ i.MXRT).
+
+ If M is selected the module will be called imx-lcdif.
diff --git a/drivers/gpu/drm/mxsfb/Makefile b/drivers/gpu/drm/mxsfb/Makefile
index 26d153896d72..3fa44059b9d8 100644
--- a/drivers/gpu/drm/mxsfb/Makefile
+++ b/drivers/gpu/drm/mxsfb/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
mxsfb-y := mxsfb_drv.o mxsfb_kms.o
obj-$(CONFIG_DRM_MXSFB) += mxsfb.o
+imx-lcdif-y := lcdif_drv.o lcdif_kms.o
+obj-$(CONFIG_DRM_IMX_LCDIF) += imx-lcdif.o
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
new file mode 100644
index 000000000000..befad33dcb95
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2022 Marek Vasut <marex@denx.de>
+ *
+ * This code is based on drivers/gpu/drm/mxsfb/mxsfb*
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_module.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "lcdif_drv.h"
+#include "lcdif_regs.h"
+
+static const struct drm_mode_config_funcs lcdif_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_mode_config_helper_funcs lcdif_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+static int lcdif_attach_bridge(struct lcdif_drm_private *lcdif)
+{
+ struct drm_device *drm = lcdif->drm;
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel,
+ &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = devm_drm_panel_bridge_add_typed(drm->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ if (!bridge)
+ return -ENODEV;
+
+ ret = drm_bridge_attach(&lcdif->encoder, bridge, NULL, 0);
+ if (ret)
+ return dev_err_probe(drm->dev, ret, "Failed to attach bridge\n");
+
+ lcdif->bridge = bridge;
+
+ return 0;
+}
+
+static irqreturn_t lcdif_irq_handler(int irq, void *data)
+{
+ struct drm_device *drm = data;
+ struct lcdif_drm_private *lcdif = drm->dev_private;
+ u32 reg, stat;
+
+ stat = readl(lcdif->base + LCDC_V8_INT_STATUS_D0);
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & INT_STATUS_D0_VS_BLANK) {
+ reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ if (!(reg & CTRLDESCL0_5_SHADOW_LOAD_EN))
+ drm_crtc_handle_vblank(&lcdif->crtc);
+ }
+
+ writel(stat, lcdif->base + LCDC_V8_INT_STATUS_D0);
+
+ return IRQ_HANDLED;
+}
+
+static int lcdif_load(struct drm_device *drm)
+{
+ struct platform_device *pdev = to_platform_device(drm->dev);
+ struct lcdif_drm_private *lcdif;
+ struct resource *res;
+ int ret;
+
+ lcdif = devm_kzalloc(&pdev->dev, sizeof(*lcdif), GFP_KERNEL);
+ if (!lcdif)
+ return -ENOMEM;
+
+ lcdif->drm = drm;
+ drm->dev_private = lcdif;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lcdif->base = devm_ioremap_resource(drm->dev, res);
+ if (IS_ERR(lcdif->base))
+ return PTR_ERR(lcdif->base);
+
+ lcdif->clk = devm_clk_get(drm->dev, "pix");
+ if (IS_ERR(lcdif->clk))
+ return PTR_ERR(lcdif->clk);
+
+ lcdif->clk_axi = devm_clk_get(drm->dev, "axi");
+ if (IS_ERR(lcdif->clk_axi))
+ return PTR_ERR(lcdif->clk_axi);
+
+ lcdif->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi");
+ if (IS_ERR(lcdif->clk_disp_axi))
+ return PTR_ERR(lcdif->clk_disp_axi);
+
+ platform_set_drvdata(pdev, drm);
+
+ ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(36));
+ if (ret)
+ return ret;
+
+ /* Modeset init */
+ drm_mode_config_init(drm);
+
+ ret = lcdif_kms_init(lcdif);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialize KMS pipeline\n");
+ return ret;
+ }
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialise vblank\n");
+ return ret;
+ }
+
+ /* Start with vertical blanking interrupt reporting disabled. */
+ drm_crtc_vblank_off(&lcdif->crtc);
+
+ ret = lcdif_attach_bridge(lcdif);
+ if (ret)
+ return dev_err_probe(drm->dev, ret, "Cannot connect bridge\n");
+
+ drm->mode_config.min_width = LCDIF_MIN_XRES;
+ drm->mode_config.min_height = LCDIF_MIN_YRES;
+ drm->mode_config.max_width = LCDIF_MAX_XRES;
+ drm->mode_config.max_height = LCDIF_MAX_YRES;
+ drm->mode_config.funcs = &lcdif_mode_config_funcs;
+ drm->mode_config.helper_private = &lcdif_mode_config_helpers;
+
+ drm_mode_config_reset(drm);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ lcdif->irq = ret;
+
+ ret = devm_request_irq(drm->dev, lcdif->irq, lcdif_irq_handler, 0,
+ drm->driver->name, drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to install IRQ handler\n");
+ return ret;
+ }
+
+ drm_kms_helper_poll_init(drm);
+
+ drm_helper_hpd_irq_event(drm);
+
+ pm_runtime_enable(drm->dev);
+
+ return 0;
+}
+
+static void lcdif_unload(struct drm_device *drm)
+{
+ struct lcdif_drm_private *lcdif = drm->dev_private;
+
+ pm_runtime_get_sync(drm->dev);
+
+ drm_crtc_vblank_off(&lcdif->crtc);
+
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+
+ pm_runtime_put_sync(drm->dev);
+ pm_runtime_disable(drm->dev);
+
+ drm->dev_private = NULL;
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(fops);
+
+static const struct drm_driver lcdif_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ DRM_GEM_CMA_DRIVER_OPS,
+ .fops = &fops,
+ .name = "imx-lcdif",
+ .desc = "i.MX LCDIF Controller DRM",
+ .date = "20220417",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id lcdif_dt_ids[] = {
+ { .compatible = "fsl,imx8mp-lcdif" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lcdif_dt_ids);
+
+static int lcdif_probe(struct platform_device *pdev)
+{
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&lcdif_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ ret = lcdif_load(drm);
+ if (ret)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_unload;
+
+ drm_fbdev_generic_setup(drm, 32);
+
+ return 0;
+
+err_unload:
+ lcdif_unload(drm);
+err_free:
+ drm_dev_put(drm);
+
+ return ret;
+}
+
+static int lcdif_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+ lcdif_unload(drm);
+ drm_dev_put(drm);
+
+ return 0;
+}
+
+static void lcdif_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(drm);
+}
+
+static int __maybe_unused lcdif_rpm_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct lcdif_drm_private *lcdif = drm->dev_private;
+
+ /* These clock supply the DISPLAY CLOCK Domain */
+ clk_disable_unprepare(lcdif->clk);
+ /* These clock supply the System Bus, AXI, Write Path, LFIFO */
+ clk_disable_unprepare(lcdif->clk_disp_axi);
+ /* These clock supply the Control Bus, APB, APBH Ctrl Registers */
+ clk_disable_unprepare(lcdif->clk_axi);
+
+ return 0;
+}
+
+static int __maybe_unused lcdif_rpm_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct lcdif_drm_private *lcdif = drm->dev_private;
+
+ /* These clock supply the Control Bus, APB, APBH Ctrl Registers */
+ clk_prepare_enable(lcdif->clk_axi);
+ /* These clock supply the System Bus, AXI, Write Path, LFIFO */
+ clk_prepare_enable(lcdif->clk_disp_axi);
+ /* These clock supply the DISPLAY CLOCK Domain */
+ clk_prepare_enable(lcdif->clk);
+
+ return 0;
+}
+
+static int __maybe_unused lcdif_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ int ret;
+
+ ret = drm_mode_config_helper_suspend(drm);
+ if (ret)
+ return ret;
+
+ return lcdif_rpm_suspend(dev);
+}
+
+static int __maybe_unused lcdif_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ lcdif_rpm_resume(dev);
+
+ return drm_mode_config_helper_resume(drm);
+}
+
+static const struct dev_pm_ops lcdif_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(lcdif_suspend, lcdif_resume)
+ SET_RUNTIME_PM_OPS(lcdif_rpm_suspend, lcdif_rpm_resume, NULL)
+};
+
+static struct platform_driver lcdif_platform_driver = {
+ .probe = lcdif_probe,
+ .remove = lcdif_remove,
+ .shutdown = lcdif_shutdown,
+ .driver = {
+ .name = "imx-lcdif",
+ .of_match_table = lcdif_dt_ids,
+ .pm = &lcdif_pm_ops,
+ },
+};
+
+drm_module_platform_driver(lcdif_platform_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Freescale LCDIF DRM/KMS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.h b/drivers/gpu/drm/mxsfb/lcdif_drv.h
new file mode 100644
index 000000000000..cb916341e845
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022 Marek Vasut <marex@denx.de>
+ *
+ * i.MX8MP/i.MXRT LCDIFv3 LCD controller driver.
+ */
+
+#ifndef __LCDIF_DRV_H__
+#define __LCDIF_DRV_H__
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_plane.h>
+
+struct clk;
+
+struct lcdif_drm_private {
+ void __iomem *base; /* registers */
+ struct clk *clk;
+ struct clk *clk_axi;
+ struct clk *clk_disp_axi;
+
+ unsigned int irq;
+
+ struct drm_device *drm;
+ struct {
+ struct drm_plane primary;
+ /* i.MXRT does support overlay planes, add them here. */
+ } planes;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_bridge *bridge;
+};
+
+static inline struct lcdif_drm_private *
+to_lcdif_drm_private(struct drm_device *drm)
+{
+ return drm->dev_private;
+}
+
+int lcdif_kms_init(struct lcdif_drm_private *lcdif);
+
+#endif /* __LCDIF_DRV_H__ */
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
new file mode 100644
index 000000000000..1bec1279c8b5
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2022 Marek Vasut <marex@denx.de>
+ *
+ * This code is based on drivers/gpu/drm/mxsfb/mxsfb*
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/media-bus-format.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "lcdif_drv.h"
+#include "lcdif_regs.h"
+
+/* -----------------------------------------------------------------------------
+ * CRTC
+ */
+static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
+ const u32 bus_format)
+{
+ struct drm_device *drm = lcdif->drm;
+ const u32 format = lcdif->crtc.primary->state->fb->format->format;
+
+ writel(CSC0_CTRL_BYPASS, lcdif->base + LCDC_V8_CSC0_CTRL);
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ writel(DISP_PARA_LINE_PATTERN_RGB565,
+ lcdif->base + LCDC_V8_DISP_PARA);
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ writel(DISP_PARA_LINE_PATTERN_RGB888,
+ lcdif->base + LCDC_V8_DISP_PARA);
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ writel(DISP_PARA_LINE_PATTERN_UYVY_H,
+ lcdif->base + LCDC_V8_DISP_PARA);
+
+ /* CSC: BT.601 Full Range RGB to YCbCr coefficients. */
+ writel(CSC0_COEF0_A2(0x096) | CSC0_COEF0_A1(0x04c),
+ lcdif->base + LCDC_V8_CSC0_COEF0);
+ writel(CSC0_COEF1_B1(0x7d5) | CSC0_COEF1_A3(0x01d),
+ lcdif->base + LCDC_V8_CSC0_COEF1);
+ writel(CSC0_COEF2_B3(0x080) | CSC0_COEF2_B2(0x7ac),
+ lcdif->base + LCDC_V8_CSC0_COEF2);
+ writel(CSC0_COEF3_C2(0x795) | CSC0_COEF3_C1(0x080),
+ lcdif->base + LCDC_V8_CSC0_COEF3);
+ writel(CSC0_COEF4_D1(0x000) | CSC0_COEF4_C3(0x7ec),
+ lcdif->base + LCDC_V8_CSC0_COEF4);
+ writel(CSC0_COEF5_D3(0x080) | CSC0_COEF5_D2(0x080),
+ lcdif->base + LCDC_V8_CSC0_COEF5);
+
+ writel(CSC0_CTRL_CSC_MODE_RGB2YCbCr,
+ lcdif->base + LCDC_V8_CSC0_CTRL);
+
+ break;
+ default:
+ dev_err(drm->dev, "Unknown media bus format 0x%x\n", bus_format);
+ break;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ writel(CTRLDESCL0_5_BPP_16_RGB565,
+ lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ break;
+ case DRM_FORMAT_RGB888:
+ writel(CTRLDESCL0_5_BPP_24_RGB888,
+ lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ break;
+ case DRM_FORMAT_XRGB1555:
+ writel(CTRLDESCL0_5_BPP_16_ARGB1555,
+ lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ break;
+ case DRM_FORMAT_XRGB4444:
+ writel(CTRLDESCL0_5_BPP_16_ARGB4444,
+ lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ break;
+ case DRM_FORMAT_XBGR8888:
+ writel(CTRLDESCL0_5_BPP_32_ABGR8888,
+ lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ break;
+ case DRM_FORMAT_XRGB8888:
+ writel(CTRLDESCL0_5_BPP_32_ARGB8888,
+ lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ break;
+ default:
+ dev_err(drm->dev, "Unknown pixel format 0x%x\n", format);
+ break;
+ }
+}
+
+static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
+{
+ struct drm_display_mode *m = &lcdif->crtc.state->adjusted_mode;
+ u32 ctrl = 0;
+
+ if (m->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl |= CTRL_INV_HS;
+ if (m->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl |= CTRL_INV_VS;
+ if (bus_flags & DRM_BUS_FLAG_DE_LOW)
+ ctrl |= CTRL_INV_DE;
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ ctrl |= CTRL_INV_PXCK;
+
+ writel(ctrl, lcdif->base + LCDC_V8_CTRL);
+
+ writel(DISP_SIZE_DELTA_Y(m->crtc_vdisplay) |
+ DISP_SIZE_DELTA_X(m->crtc_hdisplay),
+ lcdif->base + LCDC_V8_DISP_SIZE);
+
+ writel(HSYN_PARA_BP_H(m->htotal - m->hsync_end) |
+ HSYN_PARA_FP_H(m->hsync_start - m->hdisplay),
+ lcdif->base + LCDC_V8_HSYN_PARA);
+
+ writel(VSYN_PARA_BP_V(m->vtotal - m->vsync_end) |
+ VSYN_PARA_FP_V(m->vsync_start - m->vdisplay),
+ lcdif->base + LCDC_V8_VSYN_PARA);
+
+ writel(VSYN_HSYN_WIDTH_PW_V(m->vsync_end - m->vsync_start) |
+ VSYN_HSYN_WIDTH_PW_H(m->hsync_end - m->hsync_start),
+ lcdif->base + LCDC_V8_VSYN_HSYN_WIDTH);
+
+ writel(CTRLDESCL0_1_HEIGHT(m->crtc_vdisplay) |
+ CTRLDESCL0_1_WIDTH(m->crtc_hdisplay),
+ lcdif->base + LCDC_V8_CTRLDESCL0_1);
+
+ writel(CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]),
+ lcdif->base + LCDC_V8_CTRLDESCL0_3);
+}
+
+static void lcdif_enable_controller(struct lcdif_drm_private *lcdif)
+{
+ u32 reg;
+
+ reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
+ reg |= DISP_PARA_DISP_ON;
+ writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
+
+ reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ reg |= CTRLDESCL0_5_EN;
+ writel(reg, lcdif->base + LCDC_V8_CTRLDESCL0_5);
+}
+
+static void lcdif_disable_controller(struct lcdif_drm_private *lcdif)
+{
+ u32 reg;
+ int ret;
+
+ reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ reg &= ~CTRLDESCL0_5_EN;
+ writel(reg, lcdif->base + LCDC_V8_CTRLDESCL0_5);
+
+ ret = readl_poll_timeout(lcdif->base + LCDC_V8_CTRLDESCL0_5,
+ reg, !(reg & CTRLDESCL0_5_EN),
+ 0, 36000); /* Wait ~2 frame times max */
+ if (ret)
+ drm_err(lcdif->drm, "Failed to disable controller!\n");
+
+ reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
+ reg &= ~DISP_PARA_DISP_ON;
+ writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
+}
+
+static void lcdif_reset_block(struct lcdif_drm_private *lcdif)
+{
+ writel(CTRL_SW_RESET, lcdif->base + LCDC_V8_CTRL + REG_SET);
+ readl(lcdif->base + LCDC_V8_CTRL);
+ writel(CTRL_SW_RESET, lcdif->base + LCDC_V8_CTRL + REG_CLR);
+ readl(lcdif->base + LCDC_V8_CTRL);
+}
+
+static void lcdif_crtc_mode_set_nofb(struct lcdif_drm_private *lcdif,
+ struct drm_bridge_state *bridge_state,
+ const u32 bus_format)
+{
+ struct drm_device *drm = lcdif->crtc.dev;
+ struct drm_display_mode *m = &lcdif->crtc.state->adjusted_mode;
+ u32 bus_flags = 0;
+
+ if (lcdif->bridge && lcdif->bridge->timings)
+ bus_flags = lcdif->bridge->timings->input_bus_flags;
+ else if (bridge_state)
+ bus_flags = bridge_state->input_bus_cfg.flags;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
+ m->crtc_clock,
+ (int)(clk_get_rate(lcdif->clk) / 1000));
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
+ bus_flags);
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
+
+ /* Mandatory eLCDIF reset as per the Reference Manual */
+ lcdif_reset_block(lcdif);
+
+ lcdif_set_formats(lcdif, bus_format);
+
+ lcdif_set_mode(lcdif, bus_flags);
+}
+
+static int lcdif_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ bool has_primary = crtc_state->plane_mask &
+ drm_plane_mask(crtc->primary);
+
+ /* The primary plane has to be enabled when the CRTC is active. */
+ if (crtc_state->active && !has_primary)
+ return -EINVAL;
+
+ return drm_atomic_add_affected_planes(state, crtc);
+}
+
+static void lcdif_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev);
+ struct drm_pending_vblank_event *event;
+ u32 reg;
+
+ reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ reg |= CTRLDESCL0_5_SHADOW_LOAD_EN;
+ writel(reg, lcdif->base + LCDC_V8_CTRLDESCL0_5);
+
+ event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ if (!event)
+ return;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev);
+ struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
+ crtc->primary);
+ struct drm_display_mode *m = &lcdif->crtc.state->adjusted_mode;
+ struct drm_bridge_state *bridge_state = NULL;
+ struct drm_device *drm = lcdif->drm;
+ u32 bus_format = 0;
+ dma_addr_t paddr;
+
+ /* If there is a bridge attached to the LCDIF, use its bus format */
+ if (lcdif->bridge) {
+ bridge_state =
+ drm_atomic_get_new_bridge_state(state,
+ lcdif->bridge);
+ if (!bridge_state)
+ bus_format = MEDIA_BUS_FMT_FIXED;
+ else
+ bus_format = bridge_state->input_bus_cfg.format;
+
+ if (bus_format == MEDIA_BUS_FMT_FIXED) {
+ dev_warn_once(drm->dev,
+ "Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n"
+ "Please fix bridge driver by handling atomic_get_input_bus_fmts.\n");
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ }
+ }
+
+ /* If all else fails, default to RGB888_1X24 */
+ if (!bus_format)
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ clk_set_rate(lcdif->clk, m->crtc_clock * 1000);
+
+ pm_runtime_get_sync(drm->dev);
+
+ lcdif_crtc_mode_set_nofb(lcdif, bridge_state, bus_format);
+
+ /* Write cur_buf as well to avoid an initial corrupt frame */
+ paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ if (paddr) {
+ writel(lower_32_bits(paddr),
+ lcdif->base + LCDC_V8_CTRLDESCL_LOW0_4);
+ writel(CTRLDESCL_HIGH0_4_ADDR_HIGH(upper_32_bits(paddr)),
+ lcdif->base + LCDC_V8_CTRLDESCL_HIGH0_4);
+ }
+ lcdif_enable_controller(lcdif);
+
+ drm_crtc_vblank_on(crtc);
+}
+
+static void lcdif_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev);
+ struct drm_device *drm = lcdif->drm;
+ struct drm_pending_vblank_event *event;
+
+ drm_crtc_vblank_off(crtc);
+
+ lcdif_disable_controller(lcdif);
+
+ spin_lock_irq(&drm->event_lock);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irq(&drm->event_lock);
+
+ pm_runtime_put_sync(drm->dev);
+}
+
+static int lcdif_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev);
+
+ /* Clear and enable VBLANK IRQ */
+ writel(INT_STATUS_D0_VS_BLANK, lcdif->base + LCDC_V8_INT_STATUS_D0);
+ writel(INT_ENABLE_D0_VS_BLANK_EN, lcdif->base + LCDC_V8_INT_ENABLE_D0);
+
+ return 0;
+}
+
+static void lcdif_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev);
+
+ /* Disable and clear VBLANK IRQ */
+ writel(0, lcdif->base + LCDC_V8_INT_ENABLE_D0);
+ writel(INT_STATUS_D0_VS_BLANK, lcdif->base + LCDC_V8_INT_STATUS_D0);
+}
+
+static const struct drm_crtc_helper_funcs lcdif_crtc_helper_funcs = {
+ .atomic_check = lcdif_crtc_atomic_check,
+ .atomic_flush = lcdif_crtc_atomic_flush,
+ .atomic_enable = lcdif_crtc_atomic_enable,
+ .atomic_disable = lcdif_crtc_atomic_disable,
+};
+
+static const struct drm_crtc_funcs lcdif_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = lcdif_crtc_enable_vblank,
+ .disable_vblank = lcdif_crtc_disable_vblank,
+};
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs lcdif_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/* -----------------------------------------------------------------------------
+ * Planes
+ */
+
+static int lcdif_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct lcdif_drm_private *lcdif = to_lcdif_drm_private(plane->dev);
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ &lcdif->crtc);
+
+ return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+}
+
+static void lcdif_plane_primary_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct lcdif_drm_private *lcdif = to_lcdif_drm_private(plane->dev);
+ struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
+ plane);
+ dma_addr_t paddr;
+
+ paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ if (paddr) {
+ writel(lower_32_bits(paddr),
+ lcdif->base + LCDC_V8_CTRLDESCL_LOW0_4);
+ writel(CTRLDESCL_HIGH0_4_ADDR_HIGH(upper_32_bits(paddr)),
+ lcdif->base + LCDC_V8_CTRLDESCL_HIGH0_4);
+ }
+}
+
+static bool lcdif_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
+static const struct drm_plane_helper_funcs lcdif_plane_primary_helper_funcs = {
+ .atomic_check = lcdif_plane_atomic_check,
+ .atomic_update = lcdif_plane_primary_atomic_update,
+};
+
+static const struct drm_plane_funcs lcdif_plane_funcs = {
+ .format_mod_supported = lcdif_format_mod_supported,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const u32 lcdif_primary_plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const u64 lcdif_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+int lcdif_kms_init(struct lcdif_drm_private *lcdif)
+{
+ struct drm_encoder *encoder = &lcdif->encoder;
+ struct drm_crtc *crtc = &lcdif->crtc;
+ int ret;
+
+ drm_plane_helper_add(&lcdif->planes.primary,
+ &lcdif_plane_primary_helper_funcs);
+ ret = drm_universal_plane_init(lcdif->drm, &lcdif->planes.primary, 1,
+ &lcdif_plane_funcs,
+ lcdif_primary_plane_formats,
+ ARRAY_SIZE(lcdif_primary_plane_formats),
+ lcdif_modifiers, DRM_PLANE_TYPE_PRIMARY,
+ NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &lcdif_crtc_helper_funcs);
+ ret = drm_crtc_init_with_planes(lcdif->drm, crtc,
+ &lcdif->planes.primary, NULL,
+ &lcdif_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ return drm_encoder_init(lcdif->drm, encoder, &lcdif_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+}
diff --git a/drivers/gpu/drm/mxsfb/lcdif_regs.h b/drivers/gpu/drm/mxsfb/lcdif_regs.h
new file mode 100644
index 000000000000..c70220651e3a
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/lcdif_regs.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022 Marek Vasut <marex@denx.de>
+ *
+ * i.MX8MP/i.MXRT LCDIF LCD controller driver.
+ */
+
+#ifndef __LCDIF_REGS_H__
+#define __LCDIF_REGS_H__
+
+#define REG_SET 4
+#define REG_CLR 8
+
+/* V8 register set */
+#define LCDC_V8_CTRL 0x00
+#define LCDC_V8_DISP_PARA 0x10
+#define LCDC_V8_DISP_SIZE 0x14
+#define LCDC_V8_HSYN_PARA 0x18
+#define LCDC_V8_VSYN_PARA 0x1c
+#define LCDC_V8_VSYN_HSYN_WIDTH 0x20
+#define LCDC_V8_INT_STATUS_D0 0x24
+#define LCDC_V8_INT_ENABLE_D0 0x28
+#define LCDC_V8_INT_STATUS_D1 0x30
+#define LCDC_V8_INT_ENABLE_D1 0x34
+#define LCDC_V8_CTRLDESCL0_1 0x200
+#define LCDC_V8_CTRLDESCL0_3 0x208
+#define LCDC_V8_CTRLDESCL_LOW0_4 0x20c
+#define LCDC_V8_CTRLDESCL_HIGH0_4 0x210
+#define LCDC_V8_CTRLDESCL0_5 0x214
+#define LCDC_V8_CSC0_CTRL 0x21c
+#define LCDC_V8_CSC0_COEF0 0x220
+#define LCDC_V8_CSC0_COEF1 0x224
+#define LCDC_V8_CSC0_COEF2 0x228
+#define LCDC_V8_CSC0_COEF3 0x22c
+#define LCDC_V8_CSC0_COEF4 0x230
+#define LCDC_V8_CSC0_COEF5 0x234
+#define LCDC_V8_PANIC0_THRES 0x238
+
+#define CTRL_SFTRST BIT(31)
+#define CTRL_CLKGATE BIT(30)
+#define CTRL_BYPASS_COUNT BIT(19)
+#define CTRL_VSYNC_MODE BIT(18)
+#define CTRL_DOTCLK_MODE BIT(17)
+#define CTRL_DATA_SELECT BIT(16)
+#define CTRL_BUS_WIDTH_16 (0 << 10)
+#define CTRL_BUS_WIDTH_8 (1 << 10)
+#define CTRL_BUS_WIDTH_18 (2 << 10)
+#define CTRL_BUS_WIDTH_24 (3 << 10)
+#define CTRL_BUS_WIDTH_MASK (0x3 << 10)
+#define CTRL_WORD_LENGTH_16 (0 << 8)
+#define CTRL_WORD_LENGTH_8 (1 << 8)
+#define CTRL_WORD_LENGTH_18 (2 << 8)
+#define CTRL_WORD_LENGTH_24 (3 << 8)
+#define CTRL_MASTER BIT(5)
+#define CTRL_DF16 BIT(3)
+#define CTRL_DF18 BIT(2)
+#define CTRL_DF24 BIT(1)
+#define CTRL_RUN BIT(0)
+
+#define CTRL1_RECOVER_ON_UNDERFLOW BIT(24)
+#define CTRL1_FIFO_CLEAR BIT(21)
+#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
+#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
+#define CTRL1_CUR_FRAME_DONE_IRQ_EN BIT(13)
+#define CTRL1_CUR_FRAME_DONE_IRQ BIT(9)
+
+#define CTRL2_SET_OUTSTANDING_REQS_1 0
+#define CTRL2_SET_OUTSTANDING_REQS_2 (0x1 << 21)
+#define CTRL2_SET_OUTSTANDING_REQS_4 (0x2 << 21)
+#define CTRL2_SET_OUTSTANDING_REQS_8 (0x3 << 21)
+#define CTRL2_SET_OUTSTANDING_REQS_16 (0x4 << 21)
+#define CTRL2_SET_OUTSTANDING_REQS_MASK (0x7 << 21)
+
+#define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
+#define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
+#define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
+#define TRANSFER_COUNT_GET_HCOUNT(x) ((x) & 0xffff)
+
+#define VDCTRL0_ENABLE_PRESENT BIT(28)
+#define VDCTRL0_VSYNC_ACT_HIGH BIT(27)
+#define VDCTRL0_HSYNC_ACT_HIGH BIT(26)
+#define VDCTRL0_DOTCLK_ACT_FALLING BIT(25)
+#define VDCTRL0_ENABLE_ACT_HIGH BIT(24)
+#define VDCTRL0_VSYNC_PERIOD_UNIT BIT(21)
+#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT BIT(20)
+#define VDCTRL0_HALF_LINE BIT(19)
+#define VDCTRL0_HALF_LINE_MODE BIT(18)
+#define VDCTRL0_SET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+#define VDCTRL0_GET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+
+#define VDCTRL2_SET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+#define VDCTRL2_GET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+
+#define VDCTRL3_MUX_SYNC_SIGNALS BIT(29)
+#define VDCTRL3_VSYNC_ONLY BIT(28)
+#define SET_HOR_WAIT_CNT(x) (((x) & 0xfff) << 16)
+#define GET_HOR_WAIT_CNT(x) (((x) >> 16) & 0xfff)
+#define SET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+#define GET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+
+#define VDCTRL4_SET_DOTCLK_DLY(x) (((x) & 0x7) << 29) /* v4 only */
+#define VDCTRL4_GET_DOTCLK_DLY(x) (((x) >> 29) & 0x7) /* v4 only */
+#define VDCTRL4_SYNC_SIGNALS_ON BIT(18)
+#define SET_DOTCLK_H_VALID_DATA_CNT(x) ((x) & 0x3ffff)
+
+#define DEBUG0_HSYNC BIT(26)
+#define DEBUG0_VSYNC BIT(25)
+
+#define AS_CTRL_PS_DISABLE BIT(23)
+#define AS_CTRL_ALPHA_INVERT BIT(20)
+#define AS_CTRL_ALPHA(a) (((a) & 0xff) << 8)
+#define AS_CTRL_FORMAT_RGB565 (0xe << 4)
+#define AS_CTRL_FORMAT_RGB444 (0xd << 4)
+#define AS_CTRL_FORMAT_RGB555 (0xc << 4)
+#define AS_CTRL_FORMAT_ARGB4444 (0x9 << 4)
+#define AS_CTRL_FORMAT_ARGB1555 (0x8 << 4)
+#define AS_CTRL_FORMAT_RGB888 (0x4 << 4)
+#define AS_CTRL_FORMAT_ARGB8888 (0x0 << 4)
+#define AS_CTRL_ENABLE_COLORKEY BIT(3)
+#define AS_CTRL_ALPHA_CTRL_ROP (3 << 1)
+#define AS_CTRL_ALPHA_CTRL_MULTIPLY (2 << 1)
+#define AS_CTRL_ALPHA_CTRL_OVERRIDE (1 << 1)
+#define AS_CTRL_ALPHA_CTRL_EMBEDDED (0 << 1)
+#define AS_CTRL_AS_ENABLE BIT(0)
+
+/* V8 register set */
+#define CTRL_SW_RESET BIT(31)
+#define CTRL_FETCH_START_OPTION_FPV 0
+#define CTRL_FETCH_START_OPTION_PWV BIT(8)
+#define CTRL_FETCH_START_OPTION_BPV BIT(9)
+#define CTRL_FETCH_START_OPTION_RESV GENMASK(9, 8)
+#define CTRL_FETCH_START_OPTION_MASK GENMASK(9, 8)
+#define CTRL_NEG BIT(4)
+#define CTRL_INV_PXCK BIT(3)
+#define CTRL_INV_DE BIT(2)
+#define CTRL_INV_VS BIT(1)
+#define CTRL_INV_HS BIT(0)
+
+#define DISP_PARA_DISP_ON BIT(31)
+#define DISP_PARA_SWAP_EN BIT(30)
+#define DISP_PARA_LINE_PATTERN_UYVY_H (GENMASK(29, 28) | BIT(26))
+#define DISP_PARA_LINE_PATTERN_RGB565 GENMASK(28, 26)
+#define DISP_PARA_LINE_PATTERN_RGB888 0
+#define DISP_PARA_LINE_PATTERN_MASK GENMASK(29, 26)
+#define DISP_PARA_DISP_MODE_MASK GENMASK(25, 24)
+#define DISP_PARA_BGND_R_MASK GENMASK(23, 16)
+#define DISP_PARA_BGND_G_MASK GENMASK(15, 8)
+#define DISP_PARA_BGND_B_MASK GENMASK(7, 0)
+
+#define DISP_SIZE_DELTA_Y(n) (((n) & 0xffff) << 16)
+#define DISP_SIZE_DELTA_Y_MASK GENMASK(31, 16)
+#define DISP_SIZE_DELTA_X(n) ((n) & 0xffff)
+#define DISP_SIZE_DELTA_X_MASK GENMASK(15, 0)
+
+#define HSYN_PARA_BP_H(n) (((n) & 0xffff) << 16)
+#define HSYN_PARA_BP_H_MASK GENMASK(31, 16)
+#define HSYN_PARA_FP_H(n) ((n) & 0xffff)
+#define HSYN_PARA_FP_H_MASK GENMASK(15, 0)
+
+#define VSYN_PARA_BP_V(n) (((n) & 0xffff) << 16)
+#define VSYN_PARA_BP_V_MASK GENMASK(31, 16)
+#define VSYN_PARA_FP_V(n) ((n) & 0xffff)
+#define VSYN_PARA_FP_V_MASK GENMASK(15, 0)
+
+#define VSYN_HSYN_WIDTH_PW_V(n) (((n) & 0xffff) << 16)
+#define VSYN_HSYN_WIDTH_PW_V_MASK GENMASK(31, 16)
+#define VSYN_HSYN_WIDTH_PW_H(n) ((n) & 0xffff)
+#define VSYN_HSYN_WIDTH_PW_H_MASK GENMASK(15, 0)
+
+#define INT_STATUS_D0_FIFO_EMPTY BIT(24)
+#define INT_STATUS_D0_DMA_DONE BIT(16)
+#define INT_STATUS_D0_DMA_ERR BIT(8)
+#define INT_STATUS_D0_VS_BLANK BIT(2)
+#define INT_STATUS_D0_UNDERRUN BIT(1)
+#define INT_STATUS_D0_VSYNC BIT(0)
+
+#define INT_ENABLE_D0_FIFO_EMPTY_EN BIT(24)
+#define INT_ENABLE_D0_DMA_DONE_EN BIT(16)
+#define INT_ENABLE_D0_DMA_ERR_EN BIT(8)
+#define INT_ENABLE_D0_VS_BLANK_EN BIT(2)
+#define INT_ENABLE_D0_UNDERRUN_EN BIT(1)
+#define INT_ENABLE_D0_VSYNC_EN BIT(0)
+
+#define INT_STATUS_D1_PLANE_PANIC BIT(0)
+
+#define INT_ENABLE_D1_PLANE_PANIC_EN BIT(0)
+
+#define CTRLDESCL0_1_HEIGHT(n) (((n) & 0xffff) << 16)
+#define CTRLDESCL0_1_HEIGHT_MASK GENMASK(31, 16)
+#define CTRLDESCL0_1_WIDTH(n) ((n) & 0xffff)
+#define CTRLDESCL0_1_WIDTH_MASK GENMASK(15, 0)
+
+#define CTRLDESCL0_3_PITCH(n) ((n) & 0xffff)
+#define CTRLDESCL0_3_PITCH_MASK GENMASK(15, 0)
+
+#define CTRLDESCL_HIGH0_4_ADDR_HIGH(n) ((n) & 0xf)
+#define CTRLDESCL_HIGH0_4_ADDR_HIGH_MASK GENMASK(3, 0)
+
+#define CTRLDESCL0_5_EN BIT(31)
+#define CTRLDESCL0_5_SHADOW_LOAD_EN BIT(30)
+#define CTRLDESCL0_5_BPP_16_RGB565 BIT(26)
+#define CTRLDESCL0_5_BPP_16_ARGB1555 (BIT(26) | BIT(24))
+#define CTRLDESCL0_5_BPP_16_ARGB4444 (BIT(26) | BIT(25))
+#define CTRLDESCL0_5_BPP_YCbCr422 (BIT(26) | BIT(25) | BIT(24))
+#define CTRLDESCL0_5_BPP_24_RGB888 BIT(27)
+#define CTRLDESCL0_5_BPP_32_ARGB8888 (BIT(27) | BIT(24))
+#define CTRLDESCL0_5_BPP_32_ABGR8888 (BIT(27) | BIT(25))
+#define CTRLDESCL0_5_BPP_MASK GENMASK(27, 24)
+#define CTRLDESCL0_5_YUV_FORMAT_Y2VY1U 0
+#define CTRLDESCL0_5_YUV_FORMAT_Y2UY1V BIT(14)
+#define CTRLDESCL0_5_YUV_FORMAT_VY2UY1 BIT(15)
+#define CTRLDESCL0_5_YUV_FORMAT_UY2VY1 (BIT(15) | BIT(14))
+#define CTRLDESCL0_5_YUV_FORMAT_MASK GENMASK(15, 14)
+
+#define CSC0_CTRL_CSC_MODE_RGB2YCbCr GENMASK(2, 1)
+#define CSC0_CTRL_CSC_MODE_MASK GENMASK(2, 1)
+#define CSC0_CTRL_BYPASS BIT(0)
+
+#define CSC0_COEF0_A2(n) (((n) << 16) & CSC0_COEF0_A2_MASK)
+#define CSC0_COEF0_A2_MASK GENMASK(26, 16)
+#define CSC0_COEF0_A1(n) ((n) & CSC0_COEF0_A1_MASK)
+#define CSC0_COEF0_A1_MASK GENMASK(10, 0)
+
+#define CSC0_COEF1_B1(n) (((n) << 16) & CSC0_COEF1_B1_MASK)
+#define CSC0_COEF1_B1_MASK GENMASK(26, 16)
+#define CSC0_COEF1_A3(n) ((n) & CSC0_COEF1_A3_MASK)
+#define CSC0_COEF1_A3_MASK GENMASK(10, 0)
+
+#define CSC0_COEF2_B3(n) (((n) << 16) & CSC0_COEF2_B3_MASK)
+#define CSC0_COEF2_B3_MASK GENMASK(26, 16)
+#define CSC0_COEF2_B2(n) ((n) & CSC0_COEF2_B2_MASK)
+#define CSC0_COEF2_B2_MASK GENMASK(10, 0)
+
+#define CSC0_COEF3_C2(n) (((n) << 16) & CSC0_COEF3_C2_MASK)
+#define CSC0_COEF3_C2_MASK GENMASK(26, 16)
+#define CSC0_COEF3_C1(n) ((n) & CSC0_COEF3_C1_MASK)
+#define CSC0_COEF3_C1_MASK GENMASK(10, 0)
+
+#define CSC0_COEF4_D1(n) (((n) << 16) & CSC0_COEF4_D1_MASK)
+#define CSC0_COEF4_D1_MASK GENMASK(24, 16)
+#define CSC0_COEF4_C3(n) ((n) & CSC0_COEF4_C3_MASK)
+#define CSC0_COEF4_C3_MASK GENMASK(10, 0)
+
+#define CSC0_COEF5_D3(n) (((n) << 16) & CSC0_COEF5_D3_MASK)
+#define CSC0_COEF5_D3_MASK GENMASK(24, 16)
+#define CSC0_COEF5_D2(n) ((n) & CSC0_COEF5_D2_MASK)
+#define CSC0_COEF5_D2_MASK GENMASK(8, 0)
+
+#define PANIC0_THRES_LOW_MASK GENMASK(24, 16)
+#define PANIC0_THRES_HIGH_MASK GENMASK(8, 0)
+
+#define LCDIF_MIN_XRES 120
+#define LCDIF_MIN_YRES 120
+#define LCDIF_MAX_XRES 0xffff
+#define LCDIF_MAX_YRES 0xffff
+
+#endif /* __LCDIF_REGS_H__ */
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index f021ab2c4520..e38ce5737a5f 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/media-bus-format.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
@@ -21,6 +22,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane.h>
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 34760164c271..03d12caf9e26 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -11,7 +11,6 @@ config DRM_NOUVEAU
select DRM_TTM
select DRM_TTM_HELPER
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
- select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
select MXM_WMI if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 4347f0b61797..ade2988e85f3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2623,14 +2623,6 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_encoder *encoder;
- struct drm_plane *plane;
-
- drm_for_each_plane(plane, dev) {
- struct nv50_wndw *wndw = nv50_wndw(plane);
- if (plane->funcs != &nv50_wndw)
- continue;
- nv50_wndw_fini(wndw);
- }
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
@@ -2646,7 +2638,6 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
{
struct nv50_core *core = nv50_disp(dev)->core;
struct drm_encoder *encoder;
- struct drm_plane *plane;
if (resume || runtime)
core->func->init(core);
@@ -2659,13 +2650,6 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
}
}
- drm_for_each_plane(plane, dev) {
- struct nv50_wndw *wndw = nv50_wndw(plane);
- if (plane->funcs != &nv50_wndw)
- continue;
- nv50_wndw_init(wndw);
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index bb8a4601e0d9..b21f49f0eae5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -32,6 +32,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_fourcc.h>
@@ -693,18 +694,6 @@ nv50_wndw_notify(struct nvif_notify *notify)
return NVIF_NOTIFY_KEEP;
}
-void
-nv50_wndw_fini(struct nv50_wndw *wndw)
-{
- nvif_notify_put(&wndw->notify);
-}
-
-void
-nv50_wndw_init(struct nv50_wndw *wndw)
-{
- nvif_notify_get(&wndw->notify);
-}
-
static const u64 nv50_cursor_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 9c9f2c2a71a5..96542ce666fc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -40,8 +40,6 @@ int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
enum drm_plane_type, const char *name, int index,
const u32 *format, enum nv50_disp_interlock_type,
u32 interlock_data, u32 heads, struct nv50_wndw **);
-void nv50_wndw_init(struct nv50_wndw *);
-void nv50_wndw_fini(struct nv50_wndw *);
void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock,
struct nv50_wndw_atom *);
void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush,
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/drf.h b/drivers/gpu/drm/nouveau/include/nvhw/drf.h
index bd0fc41446e2..d6969c0e2f29 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/drf.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/drf.h
@@ -190,7 +190,7 @@
#define DRF_MD_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,IMPL,...) IMPL
#define DRF_MD(A...) DRF_MD_(X, ##A, DRF_MD_I, DRF_MD_N)(X, ##A)
-/* Helper for testing against field value in aribtrary object */
+/* Helper for testing against field value in arbitrary object */
#define DRF_TV_N(X,e,p,o,d,r, f,cmp,v) \
NVVAL_TEST_X(DRF_RD_X(e, (p), (o), d##_##r ), d##_##r##_##f, cmp, (v))
#define DRF_TV_I(X,e,p,o,d,r,i,f,cmp,v) \
@@ -198,7 +198,7 @@
#define DRF_TV_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,IMPL,...) IMPL
#define DRF_TV(A...) DRF_TV_(X, ##A, DRF_TV_I, DRF_TV_N)(X, ##A)
-/* Helper for testing against field definition in aribtrary object */
+/* Helper for testing against field definition in arbitrary object */
#define DRF_TD_N(X,e,p,o,d,r, f,cmp,v) \
NVVAL_TEST_X(DRF_RD_X(e, (p), (o), d##_##r ), d##_##r##_##f, cmp, d##_##r##_##f##_##v)
#define DRF_TD_I(X,e,p,o,d,r,i,f,cmp,v) \
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 1e4c158d20fa..f52399caee82 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -22,6 +22,12 @@ struct nvif_object {
} map;
};
+static inline bool
+nvif_object_constructed(struct nvif_object *object)
+{
+ return object->client != NULL;
+}
+
int nvif_object_ctor(struct nvif_object *, const char *name, u32 handle,
s32 oclass, void *, u32, struct nvif_object *);
void nvif_object_dtor(struct nvif_object *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index a18b6cfda07e..efede1f11e1d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -2,7 +2,6 @@
#ifndef __NVKM_DEVICE_H__
#define __NVKM_DEVICE_H__
#include <core/oclass.h>
-#include <core/event.h>
enum nvkm_subdev_type;
enum nvkm_device_type {
@@ -28,8 +27,6 @@ struct nvkm_device {
void __iomem *pri;
- struct nvkm_event event;
-
u32 debug;
const struct nvkm_device_chip *chip;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 306125d17ece..b593407b9e36 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -4,7 +4,6 @@
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
#include <core/engine.h>
struct nvkm_fifo_chan;
-struct nvkm_gpuobj;
enum nvkm_falcon_dmaidx {
FALCON_DMAIDX_UCODE = 0,
@@ -51,15 +50,6 @@ struct nvkm_falcon {
struct nvkm_engine engine;
};
-/* This constructor must be called from the owner's oneinit() hook and
- * *not* its constructor. This is to ensure that DEVINIT has been
- * completed, and that the device is correctly enabled before we touch
- * falcon registers.
- */
-int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
- struct nvkm_falcon **);
-
-void nvkm_falcon_del(struct nvkm_falcon **);
int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 64ee82c7c1be..15099913504d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -26,7 +26,6 @@ struct nvkm_fifo_chan {
struct nvkm_gpuobj *inst;
struct nvkm_gpuobj *push;
struct nvkm_vmm *vmm;
- void __iomem *user;
u64 addr;
u32 size;
@@ -44,7 +43,6 @@ struct nvkm_fifo {
struct mutex mutex;
struct nvkm_event uevent; /* async user trigger */
- struct nvkm_event cevent; /* channel creation event */
struct nvkm_event kevent; /* channel killed */
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index 05b99c9e9a26..d5d8877064a7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -2,7 +2,6 @@
#ifndef __NVKM_CLK_H__
#define __NVKM_CLK_H__
#include <core/subdev.h>
-#include <core/notify.h>
#include <subdev/pci.h>
struct nvbios_pll;
struct nvkm_pll_vals;
@@ -94,7 +93,6 @@ struct nvkm_clk {
wait_queue_head_t wait;
atomic_t waiting;
- struct nvkm_notify pwrsrc_ntfy;
int pwrsrc;
int pstate; /* current */
int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
@@ -124,6 +122,7 @@ int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
+int nvkm_clk_pwrsrc(struct nvkm_device *);
int nv04_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
int nv40_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 581458ad38e0..9c78f072d62b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -1,6 +1,7 @@
#ifndef __NVKM_FAULT_H__
#define __NVKM_FAULT_H__
#include <core/subdev.h>
+#include <core/event.h>
#include <core/notify.h>
struct nvkm_fault {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
deleted file mode 100644
index b57fe4ae93ba..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECURE_BOOT_H__
-#define __NVKM_SECURE_BOOT_H__
-
-#include <core/subdev.h>
-
-enum nvkm_secboot_falcon {
- NVKM_SECBOOT_FALCON_PMU = 0,
- NVKM_SECBOOT_FALCON_RESERVED = 1,
- NVKM_SECBOOT_FALCON_FECS = 2,
- NVKM_SECBOOT_FALCON_GPCCS = 3,
- NVKM_SECBOOT_FALCON_SEC2 = 7,
- NVKM_SECBOOT_FALCON_END = 8,
- NVKM_SECBOOT_FALCON_INVALID = 0xffffffff,
-};
-
-extern const char *nvkm_secboot_falcon_name[];
-
-/**
- * @wpr_set: whether the WPR region is currently set
-*/
-struct nvkm_secboot {
- const struct nvkm_secboot_func *func;
- struct nvkm_acr *acr;
- struct nvkm_subdev subdev;
- struct nvkm_falcon *boot_falcon;
- struct nvkm_falcon *halt_falcon;
-
- u64 wpr_addr;
- u32 wpr_size;
-
- bool wpr_set;
-};
-#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
-
-bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon);
-int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long);
-
-int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
-int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
-int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
-int gp108_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
-int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 4107b7006539..5bee655e7e63 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -126,9 +126,8 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{
struct nouveau_abi16_ntfy *ntfy, *temp;
- /* wait for all activity to stop before releasing notify object, which
- * may be still in use */
- if (chan->chan && chan->ntfy)
+ /* wait for all activity to stop before cleaning up */
+ if (chan->chan)
nouveau_channel_idle(chan->chan);
/* cleanup notifier state */
@@ -147,7 +146,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
/* destroy channel object, all children will be killed too */
if (chan->chan) {
- nouveau_channel_idle(chan->chan);
+ nvif_object_dtor(&chan->ce);
nouveau_channel_del(&chan->chan);
}
@@ -325,6 +324,31 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
init->nr_subchan = 2;
}
+ /* Workaround "nvc0" gallium driver using classes it doesn't allocate on
+ * Kepler and above. NVKM no longer always sets CE_CTX_VALID as part of
+ * channel init, now we know what that stuff actually is.
+ *
+ * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
+ *
+ * Userspace was fixed prior to adding Ampere support.
+ */
+ switch (device->info.family) {
+ case NV_DEVICE_INFO_V0_VOLTA:
+ ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
+ NULL, 0, &chan->ce);
+ if (ret)
+ goto done;
+ break;
+ case NV_DEVICE_INFO_V0_TURING:
+ ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
+ NULL, 0, &chan->ce);
+ if (ret)
+ goto done;
+ break;
+ default:
+ break;
+ }
+
/* Named memory object area */
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 70f6aa5c9dd1..27eae85f33e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -21,6 +21,7 @@ struct nouveau_abi16_ntfy {
struct nouveau_abi16_chan {
struct list_head head;
struct nouveau_channel *chan;
+ struct nvif_object ce;
struct list_head notifiers;
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index daf9f87477ba..a2141d3d9b1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -46,8 +46,9 @@ static bool
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
struct nouveau_backlight *bl)
{
- const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
- if (nb < 0 || nb >= 100)
+ const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL);
+
+ if (nb < 0)
return false;
if (nb > 0)
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
@@ -414,7 +415,7 @@ nouveau_backlight_init(struct drm_connector *connector)
nv_encoder, ops, &props);
if (IS_ERR(bl->dev)) {
if (bl->id >= 0)
- ida_simple_remove(&bl_ida, bl->id);
+ ida_free(&bl_ida, bl->id);
ret = PTR_ERR(bl->dev);
goto fail_alloc;
}
@@ -442,7 +443,7 @@ nouveau_backlight_fini(struct drm_connector *connector)
return;
if (bl->id >= 0)
- ida_simple_remove(&bl_ida, bl->id);
+ ida_free(&bl_ida, bl->id);
backlight_device_unregister(bl->dev);
nv_conn->backlight = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index ea7769135b0d..48dea5d0c580 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -385,7 +385,9 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nv_dma_v0 args = {};
int ret, i;
- nvif_object_map(&chan->user, NULL, 0);
+ ret = nvif_object_map(&chan->user, NULL, 0);
+ if (ret)
+ return ret;
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO &&
chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index b2a970aa9bf4..84df5ddae4d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -102,7 +102,6 @@ struct nouveau_cli {
struct list_head head;
void *abi16;
struct list_head objects;
- struct list_head notifys;
char name[32];
struct work_struct work;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 4f9b3aa5deda..5226323e55d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -39,6 +39,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_atomic.h>
@@ -605,6 +606,7 @@ nouveau_fbcon_fini(struct drm_device *dev)
if (!drm->fbcon)
return;
+ drm_kms_helper_poll_fini(dev);
nouveau_fbcon_accel_fini(dev);
nouveau_fbcon_destroy(dev, drm->fbcon);
kfree(drm->fbcon);
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 52f5793b7274..df0fe58ca3ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -72,39 +72,10 @@ nvkm_client_suspend(void *priv)
}
static int
-nvkm_client_ntfy(const void *header, u32 length, const void *data, u32 size)
-{
- const union {
- struct nvif_notify_req_v0 v0;
- } *args = header;
- u8 route;
-
- if (length == sizeof(args->v0) && args->v0.version == 0) {
- route = args->v0.route;
- } else {
- WARN_ON(1);
- return NVKM_NOTIFY_DROP;
- }
-
- switch (route) {
- case NVDRM_NOTIFY_NVIF:
- return nvif_notify(header, length, data, size);
- case NVDRM_NOTIFY_USIF:
- return usif_notify(header, length, data, size);
- default:
- WARN_ON(1);
- break;
- }
-
- return NVKM_NOTIFY_DROP;
-}
-
-static int
nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
const char *dbg, void **ppriv)
{
- return nvkm_client_new(name, device, cfg, dbg, nvkm_client_ntfy,
- (struct nvkm_client **)ppriv);
+ return nvkm_client_new(name, device, cfg, dbg, nvif_notify, (struct nvkm_client **)ppriv);
}
const struct nvif_driver
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 5da1f4d223d7..36df6840c099 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -26,232 +26,15 @@
#include "nouveau_usif.h"
#include "nouveau_abi16.h"
-#include <nvif/notify.h>
#include <nvif/unpack.h>
#include <nvif/client.h>
-#include <nvif/event.h>
#include <nvif/ioctl.h>
#include <nvif/class.h>
#include <nvif/cl0080.h>
-struct usif_notify_p {
- struct drm_pending_event base;
- struct {
- struct drm_event base;
- u8 data[];
- } e;
-};
-
-struct usif_notify {
- struct list_head head;
- atomic_t enabled;
- u32 handle;
- u16 reply;
- u8 route;
- u64 token;
- struct usif_notify_p *p;
-};
-
-static inline struct usif_notify *
-usif_notify_find(struct drm_file *filp, u32 handle)
-{
- struct nouveau_cli *cli = nouveau_cli(filp);
- struct usif_notify *ntfy;
- list_for_each_entry(ntfy, &cli->notifys, head) {
- if (ntfy->handle == handle)
- return ntfy;
- }
- return NULL;
-}
-
-static inline void
-usif_notify_dtor(struct usif_notify *ntfy)
-{
- list_del(&ntfy->head);
- kfree(ntfy);
-}
-
-int
-usif_notify(const void *header, u32 length, const void *data, u32 size)
-{
- struct usif_notify *ntfy = NULL;
- const union {
- struct nvif_notify_rep_v0 v0;
- } *rep = header;
- struct drm_device *dev;
- struct drm_file *filp;
- unsigned long flags;
-
- if (length == sizeof(rep->v0) && rep->v0.version == 0) {
- if (WARN_ON(!(ntfy = (void *)(unsigned long)rep->v0.token)))
- return NVIF_NOTIFY_DROP;
- BUG_ON(rep->v0.route != NVDRM_NOTIFY_USIF);
- } else
- if (WARN_ON(1))
- return NVIF_NOTIFY_DROP;
-
- if (WARN_ON(!ntfy->p || ntfy->reply != (length + size)))
- return NVIF_NOTIFY_DROP;
- filp = ntfy->p->base.file_priv;
- dev = filp->minor->dev;
-
- memcpy(&ntfy->p->e.data[0], header, length);
- memcpy(&ntfy->p->e.data[length], data, size);
- switch (rep->v0.version) {
- case 0: {
- struct nvif_notify_rep_v0 *rep = (void *)ntfy->p->e.data;
- rep->route = ntfy->route;
- rep->token = ntfy->token;
- }
- break;
- default:
- BUG();
- break;
- }
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (!WARN_ON(filp->event_space < ntfy->p->e.base.length)) {
- list_add_tail(&ntfy->p->base.link, &filp->event_list);
- filp->event_space -= ntfy->p->e.base.length;
- }
- wake_up_interruptible(&filp->event_wait);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- atomic_set(&ntfy->enabled, 0);
- return NVIF_NOTIFY_DROP;
-}
-
-static int
-usif_notify_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
-{
- struct nouveau_cli *cli = nouveau_cli(f);
- struct nvif_client *client = &cli->base;
- union {
- struct nvif_ioctl_ntfy_new_v0 v0;
- } *args = data;
- union {
- struct nvif_notify_req_v0 v0;
- } *req;
- struct usif_notify *ntfy;
- int ret = -ENOSYS;
-
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- if (usif_notify_find(f, args->v0.index))
- return -EEXIST;
- } else
- return ret;
- req = data;
- ret = -ENOSYS;
-
- if (!(ntfy = kmalloc(sizeof(*ntfy), GFP_KERNEL)))
- return -ENOMEM;
- atomic_set(&ntfy->enabled, 0);
-
- if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, true))) {
- ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply;
- ntfy->route = req->v0.route;
- ntfy->token = req->v0.token;
- req->v0.route = NVDRM_NOTIFY_USIF;
- req->v0.token = (unsigned long)(void *)ntfy;
- ret = nvif_client_ioctl(client, argv, argc);
- req->v0.token = ntfy->token;
- req->v0.route = ntfy->route;
- ntfy->handle = args->v0.index;
- }
-
- if (ret == 0)
- list_add(&ntfy->head, &cli->notifys);
- if (ret)
- kfree(ntfy);
- return ret;
-}
-
-static int
-usif_notify_del(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
-{
- struct nouveau_cli *cli = nouveau_cli(f);
- struct nvif_client *client = &cli->base;
- union {
- struct nvif_ioctl_ntfy_del_v0 v0;
- } *args = data;
- struct usif_notify *ntfy;
- int ret = -ENOSYS;
-
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- if (!(ntfy = usif_notify_find(f, args->v0.index)))
- return -ENOENT;
- } else
- return ret;
-
- ret = nvif_client_ioctl(client, argv, argc);
- if (ret == 0)
- usif_notify_dtor(ntfy);
- return ret;
-}
-
-static int
-usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
-{
- struct nouveau_cli *cli = nouveau_cli(f);
- struct nvif_client *client = &cli->base;
- union {
- struct nvif_ioctl_ntfy_del_v0 v0;
- } *args = data;
- struct usif_notify *ntfy;
- int ret = -ENOSYS;
-
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- if (!(ntfy = usif_notify_find(f, args->v0.index)))
- return -ENOENT;
- } else
- return ret;
-
- if (atomic_xchg(&ntfy->enabled, 1))
- return 0;
-
- ntfy->p = kmalloc(sizeof(*ntfy->p) + ntfy->reply, GFP_KERNEL);
- if (ret = -ENOMEM, !ntfy->p)
- goto done;
- ntfy->p->base.event = &ntfy->p->e.base;
- ntfy->p->base.file_priv = f;
- ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
- ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
-
- ret = nvif_client_ioctl(client, argv, argc);
-done:
- if (ret) {
- atomic_set(&ntfy->enabled, 0);
- kfree(ntfy->p);
- }
- return ret;
-}
-
-static int
-usif_notify_put(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
-{
- struct nouveau_cli *cli = nouveau_cli(f);
- struct nvif_client *client = &cli->base;
- union {
- struct nvif_ioctl_ntfy_put_v0 v0;
- } *args = data;
- struct usif_notify *ntfy;
- int ret = -ENOSYS;
-
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- if (!(ntfy = usif_notify_find(f, args->v0.index)))
- return -ENOENT;
- } else
- return ret;
-
- ret = nvif_client_ioctl(client, argv, argc);
- if (ret == 0 && atomic_xchg(&ntfy->enabled, 0))
- kfree(ntfy->p);
- return ret;
-}
-
struct usif_object {
struct list_head head;
- struct list_head ntfy;
u8 route;
u64 token;
};
@@ -369,16 +152,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
ret = usif_object_new(filp, data, size, argv, argc, abi16);
break;
case NVIF_IOCTL_V0_NTFY_NEW:
- ret = usif_notify_new(filp, data, size, argv, argc);
- break;
case NVIF_IOCTL_V0_NTFY_DEL:
- ret = usif_notify_del(filp, data, size, argv, argc);
- break;
case NVIF_IOCTL_V0_NTFY_GET:
- ret = usif_notify_get(filp, data, size, argv, argc);
- break;
case NVIF_IOCTL_V0_NTFY_PUT:
- ret = usif_notify_put(filp, data, size, argv, argc);
+ ret = -ENOSYS;
break;
default:
ret = nvif_client_ioctl(client, argv, argc);
@@ -410,11 +187,6 @@ void
usif_client_fini(struct nouveau_cli *cli)
{
struct usif_object *object, *otemp;
- struct usif_notify *notify, *ntemp;
-
- list_for_each_entry_safe(notify, ntemp, &cli->notifys, head) {
- usif_notify_dtor(notify);
- }
list_for_each_entry_safe(object, otemp, &cli->objects, head) {
usif_object_dtor(object);
@@ -425,5 +197,4 @@ void
usif_client_init(struct nouveau_cli *cli)
{
INIT_LIST_HEAD(&cli->objects);
- INIT_LIST_HEAD(&cli->notifys);
}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index dce1ecee2af5..4d1aaee8fe15 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -250,7 +250,7 @@ nvif_object_dtor(struct nvif_object *object)
.ioctl.type = NVIF_IOCTL_V0_DEL,
};
- if (!object->client)
+ if (!nvif_object_constructed(object))
return;
nvif_object_unmap(object);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index 735cb6816f10..45f920da89af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -24,6 +24,7 @@
#include <core/ioctl.h>
#include <core/client.h>
#include <core/engine.h>
+#include <core/event.h>
#include <nvif/unpack.h>
#include <nvif/ioctl.h>
@@ -128,7 +129,7 @@ nvkm_ioctl_new(struct nvkm_client *client,
if (ret == 0) {
ret = nvkm_object_init(object);
if (ret == 0) {
- list_add(&object->head, &parent->tree);
+ list_add_tail(&object->head, &parent->tree);
if (nvkm_object_insert(object)) {
client->data = object;
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
index cd5e9cdca1cf..44021d1395d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
@@ -21,11 +21,35 @@
*/
#include "priv.h"
+#include <core/gpuobj.h>
+#include <core/object.h>
+
#include <nvif/class.h>
+static int
+gv100_ce_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, int align,
+ struct nvkm_gpuobj **pgpuobj)
+{
+ struct nvkm_device *device = object->engine->subdev.device;
+ u32 size;
+
+ /* Allocate fault method buffer (magics come from nvgpu). */
+ size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
+ size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
+ size = roundup(size, PAGE_SIZE);
+
+ return nvkm_gpuobj_new(device, size, align, true, parent, pgpuobj);
+}
+
+const struct nvkm_object_func
+gv100_ce_cclass = {
+ .bind = gv100_ce_cclass_bind,
+};
+
static const struct nvkm_engine_func
gv100_ce = {
.intr = gp100_ce_intr,
+ .cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, VOLTA_DMA_COPY_A },
{}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index b0c8342db15f..cd53b93664d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -6,4 +6,6 @@
void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
void gk104_ce_intr(struct nvkm_engine *);
void gp100_ce_intr(struct nvkm_engine *);
+
+extern const struct nvkm_object_func gv100_ce_cclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index e5ff92d9364c..9563c0175142 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -26,6 +26,7 @@
static const struct nvkm_engine_func
tu102_ce = {
.intr = gp100_ce_intr,
+ .cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, TURING_DMA_COPY_A },
{}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
index fdca90bc8f0e..c948a0dc9e62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
@@ -24,17 +24,17 @@
#include "acpi.h"
#include <core/device.h>
+#include <subdev/clk.h>
#ifdef CONFIG_ACPI
static int
nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
{
- struct nvkm_device *device =
- container_of(nb, typeof(*device), acpi.nb);
+ struct nvkm_device *device = container_of(nb, typeof(*device), acpi.nb);
struct acpi_bus_event *info = data;
if (!strcmp(info->device_class, "ac_adapter"))
- nvkm_event_send(&device->event, 1, 0, NULL, 0);
+ nvkm_clk_pwrsrc(device);
return NOTIFY_DONE;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 62efbd0f3846..568182e68dd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -24,7 +24,6 @@
#include "priv.h"
#include "acpi.h"
-#include <core/notify.h>
#include <core/option.h>
#include <subdev/bios.h>
@@ -2668,24 +2667,6 @@ nv177_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
};
-static int
-nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
-{
- if (!WARN_ON(size != 0)) {
- notify->size = 0;
- notify->types = 1;
- notify->index = 0;
- return 0;
- }
- return -EINVAL;
-}
-
-static const struct nvkm_event_func
-nvkm_device_event_func = {
- .ctor = nvkm_device_event_ctor,
-};
-
struct nvkm_subdev *
nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
{
@@ -2838,8 +2819,6 @@ nvkm_device_del(struct nvkm_device **pdevice)
list_for_each_entry_safe_reverse(subdev, subtmp, &device->subdev, head)
nvkm_subdev_del(&subdev);
- nvkm_event_fini(&device->event);
-
if (device->pri)
iounmap(device->pri);
list_del(&device->head);
@@ -2914,10 +2893,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
device->debug = nvkm_dbgopt(device->dbgopt, "device");
INIT_LIST_HEAD(&device->subdev);
- ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
- if (ret)
- goto done;
-
mmio_base = device->func->resource_addr(device, 0);
mmio_size = device->func->resource_size(device, 0);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 992cc285f2fe..ac9e122586bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -41,11 +41,9 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
ret = clk_prepare_enable(tdev->clk);
if (ret)
goto err_clk;
- if (tdev->clk_ref) {
- ret = clk_prepare_enable(tdev->clk_ref);
- if (ret)
- goto err_clk_ref;
- }
+ ret = clk_prepare_enable(tdev->clk_ref);
+ if (ret)
+ goto err_clk_ref;
ret = clk_prepare_enable(tdev->clk_pwr);
if (ret)
goto err_clk_pwr;
@@ -70,8 +68,7 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
err_clamp:
clk_disable_unprepare(tdev->clk_pwr);
err_clk_pwr:
- if (tdev->clk_ref)
- clk_disable_unprepare(tdev->clk_ref);
+ clk_disable_unprepare(tdev->clk_ref);
err_clk_ref:
clk_disable_unprepare(tdev->clk);
err_clk:
@@ -87,8 +84,7 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
int ret;
clk_disable_unprepare(tdev->clk_pwr);
- if (tdev->clk_ref)
- clk_disable_unprepare(tdev->clk_ref);
+ clk_disable_unprepare(tdev->clk_ref);
clk_disable_unprepare(tdev->clk);
udelay(10);
@@ -123,7 +119,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
mutex_init(&tdev->iommu.mutex);
- if (iommu_present(&platform_bus_type)) {
+ if (device_iommu_mapped(dev)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
if (!tdev->iommu.domain)
goto error;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 113ddc103ac2..45f509c11c36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -346,6 +346,7 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
return -EINVAL;
oclass->base = sclass->base;
+ oclass->engine = NULL;
}
oclass->ctor = nvkm_udevice_child_new;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 2ed4ff05d207..58b8df75fc40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -144,30 +144,6 @@ nvkm_fifo_kevent_func = {
.ctor = nvkm_fifo_kevent_ctor,
};
-static int
-nvkm_fifo_cevent_ctor(struct nvkm_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
-{
- if (size == 0) {
- notify->size = 0;
- notify->types = 1;
- notify->index = 0;
- return 0;
- }
- return -ENOSYS;
-}
-
-static const struct nvkm_event_func
-nvkm_fifo_cevent_func = {
- .ctor = nvkm_fifo_cevent_ctor,
-};
-
-void
-nvkm_fifo_cevent(struct nvkm_fifo *fifo)
-{
- nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
-}
-
static void
nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
@@ -332,7 +308,6 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
if (fifo->func->dtor)
data = fifo->func->dtor(fifo);
nvkm_event_fini(&fifo->kevent);
- nvkm_event_fini(&fifo->cevent);
nvkm_event_fini(&fifo->uevent);
mutex_destroy(&fifo->mutex);
return data;
@@ -378,9 +353,5 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
return ret;
}
- ret = nvkm_event_init(&nvkm_fifo_cevent_func, 1, 1, &fifo->cevent);
- if (ret)
- return ret;
-
return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index 8d957643940a..2e7f32cebf2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -272,36 +272,6 @@ nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
}
static int
-nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
- if (unlikely(!chan->user)) {
- chan->user = ioremap(chan->addr, chan->size);
- if (!chan->user)
- return -ENOMEM;
- }
- if (unlikely(addr + 4 > chan->size))
- return -EINVAL;
- *data = ioread32_native(chan->user + addr);
- return 0;
-}
-
-static int
-nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
- if (unlikely(!chan->user)) {
- chan->user = ioremap(chan->addr, chan->size);
- if (!chan->user)
- return -ENOMEM;
- }
- if (unlikely(addr + 4 > chan->size))
- return -EINVAL;
- iowrite32_native(data, chan->user + addr);
- return 0;
-}
-
-static int
nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
@@ -332,9 +302,6 @@ nvkm_fifo_chan_dtor(struct nvkm_object *object)
}
spin_unlock_irqrestore(&fifo->lock, flags);
- if (chan->user)
- iounmap(chan->user);
-
if (chan->vmm) {
nvkm_vmm_part(chan->vmm, chan->inst->memory);
nvkm_vmm_unref(&chan->vmm);
@@ -352,8 +319,6 @@ nvkm_fifo_chan_func = {
.fini = nvkm_fifo_chan_fini,
.ntfy = nvkm_fifo_chan_ntfy,
.map = nvkm_fifo_chan_map,
- .rd32 = nvkm_fifo_chan_rd32,
- .wr32 = nvkm_fifo_chan_wr32,
.sclass = nvkm_fifo_chan_child_get,
};
@@ -424,7 +389,5 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
chan->addr = device->func->resource_addr(device, bar) +
base + user * chan->chid;
chan->size = user;
-
- nvkm_fifo_cevent(fifo);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index cfbe096e604f..9713daee6c76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -14,8 +14,6 @@ struct gk104_fifo_chan {
struct list_head head;
bool killed;
- struct nvkm_memory *mthd;
-
#define GK104_FIFO_ENGN_SW 15
struct gk104_fifo_engn {
struct nvkm_gpuobj *inst;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index ae6c4d846eb5..80456ec70e8a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -175,13 +175,19 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
int ret;
- if (!gk104_fifo_gpfifo_engine_addr(engine))
- return 0;
+ if (!gk104_fifo_gpfifo_engine_addr(engine)) {
+ if (engine->subdev.type != NVKM_ENGINE_CE ||
+ engine->subdev.device->card_type < GV100)
+ return 0;
+ }
ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
if (ret)
return ret;
+ if (!gk104_fifo_gpfifo_engine_addr(engine))
+ return 0;
+
ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
if (ret)
return ret;
@@ -231,7 +237,6 @@ void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- nvkm_memory_unref(&chan->mthd);
kfree(chan->cgrp);
return chan;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
index 743791c514fe..428f9b41165c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -70,8 +70,17 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret;
- if (engine->subdev.type == NVKM_ENGINE_CE)
- return gk104_fifo_gpfifo_kick(chan);
+ if (engine->subdev.type == NVKM_ENGINE_CE) {
+ ret = gv100_fifo_gpfifo_engine_valid(chan, true, false);
+ if (ret && suspend)
+ return ret;
+
+ nvkm_kmap(inst);
+ nvkm_wo32(chan->base.inst, 0x220, 0x00000000);
+ nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+ nvkm_done(inst);
+ return ret;
+ }
ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
if (ret && suspend)
@@ -92,8 +101,16 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
- if (engine->subdev.type == NVKM_ENGINE_CE)
- return 0;
+ if (engine->subdev.type == NVKM_ENGINE_CE) {
+ const u64 bar2 = nvkm_memory_bar2(engn->inst->memory);
+
+ nvkm_kmap(inst);
+ nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(bar2));
+ nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(bar2));
+ nvkm_done(inst);
+
+ return gv100_fifo_gpfifo_engine_valid(chan, true, true);
+ }
nvkm_kmap(inst);
nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
@@ -123,11 +140,9 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
u32 *token, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
- struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
- u64 usermem, mthd;
- u32 size;
+ u64 usermem;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
@@ -173,20 +188,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
- /* Allocate fault method buffer (magics come from nvgpu). */
- size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
- size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
- size = roundup(size, PAGE_SIZE);
-
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
- &chan->mthd);
- if (ret)
- return ret;
-
- mthd = nvkm_memory_bar2(chan->mthd);
- if (mthd == ~0ULL)
- return -EFAULT;
-
/* RAMFC */
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
@@ -203,10 +204,8 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
- nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
- nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
nvkm_done(chan->base.inst);
- return gv100_fifo_gpfifo_engine_valid(chan, true, true);
+ return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 70e16a91ac12..faf0fe9f704c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -52,7 +52,7 @@ gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
nvkm_wo32(memory, offset + 0xc, 0x00000000);
}
-const struct gk104_fifo_runlist_func
+static const struct gk104_fifo_runlist_func
gv100_fifo_runlist = {
.size = 16,
.cgrp = gv100_fifo_runlist_cgrp,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index 899272801a8b..79cec57647f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -7,7 +7,6 @@
int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
int nr, struct nvkm_fifo *);
void nvkm_fifo_uevent(struct nvkm_fifo *);
-void nvkm_fifo_cevent(struct nvkm_fifo *);
void nvkm_fifo_kevent(struct nvkm_fifo *, int chid);
void nvkm_fifo_recover_chan(struct nvkm_fifo *, int chid);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 397ff4fe9df8..f16eabf4f642 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1922,8 +1922,8 @@ gf100_gr_oneinit_tiles(struct gf100_gr *gr)
for (i = 0; i < gr->gpc_nr; i++) {
init_frac[i] = gr->tpc_nr[gpc_map[i]] * gr->gpc_nr * mul_factor;
- init_err[i] = i * gr->tpc_max * mul_factor - comm_denom/2;
- run_err[i] = init_frac[i] + init_err[i];
+ init_err[i] = i * gr->tpc_max * mul_factor - comm_denom/2;
+ run_err[i] = init_frac[i] + init_err[i];
}
for (i = 0; i < gr->tpc_total;) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 5c38ff0fe7f9..385cfd91b266 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -26,7 +26,6 @@
#include <core/firmware.h>
#include <subdev/acr.h>
-#include <subdev/secboot.h>
#include <nvfw/flcn.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index c91130a6be2a..f3f90c1063dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -221,13 +221,3 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
mutex_init(&falcon->dmem_mutex);
return 0;
}
-
-void
-nvkm_falcon_del(struct nvkm_falcon **pfalcon)
-{
- if (*pfalcon) {
- nvkm_falcon_dtor(*pfalcon);
- kfree(*pfalcon);
- *pfalcon = NULL;
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index 1ff9b9c2e651..b0ee4c31414c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -309,28 +309,3 @@ nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
nvkm_falcon_wr32(falcon, 0x014, 0xff);
falcon_v1_wait_idle(falcon);
}
-
-static const struct nvkm_falcon_func
-nvkm_falcon_v1 = {
- .load_imem = nvkm_falcon_v1_load_imem,
- .load_dmem = nvkm_falcon_v1_load_dmem,
- .read_dmem = nvkm_falcon_v1_read_dmem,
- .bind_context = nvkm_falcon_v1_bind_context,
- .start = nvkm_falcon_v1_start,
- .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
- .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
- .enable = nvkm_falcon_v1_enable,
- .disable = nvkm_falcon_v1_disable,
- .set_start_addr = nvkm_falcon_v1_set_start_addr,
-};
-
-int
-nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
- struct nvkm_falcon **pfalcon)
-{
- struct nvkm_falcon *falcon;
- if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 64e423dddd9e..6c318e41bde0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -33,7 +33,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
{
u32 p = *addr;
- if (*addr > bios->image0_size && bios->imaged_addr) {
+ if (*addr >= bios->image0_size && bios->imaged_addr) {
*addr -= bios->image0_size;
*addr += bios->imaged_addr;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index fac1bff1311b..cfa8a0c356dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -19,7 +19,7 @@ struct nvbios_source {
int nvbios_extend(struct nvkm_bios *, u32 length);
int nvbios_shadow(struct nvkm_bios *);
-extern const struct nvbios_source nvbios_rom;
+extern const struct nvbios_source nvbios_prom;
extern const struct nvbios_source nvbios_ramin;
extern const struct nvbios_source nvbios_acpi_fast;
extern const struct nvbios_source nvbios_acpi_slow;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 4b571cc6bc70..19188683c8fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -171,7 +171,7 @@ nvbios_shadow(struct nvkm_bios *bios)
struct shadow mthds[] = {
{ 0, &nvbios_of },
{ 0, &nvbios_ramin },
- { 0, &nvbios_rom },
+ { 0, &nvbios_prom },
{ 0, &nvbios_acpi_fast },
{ 4, &nvbios_acpi_slow },
{ 1, &nvbios_pcirom },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
index ffa4b395220a..39144ceb117b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
@@ -25,7 +25,7 @@
#include <subdev/pci.h>
static u32
-prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
+nvbios_prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
struct nvkm_device *device = data;
u32 i;
@@ -38,14 +38,14 @@ prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
}
static void
-prom_fini(void *data)
+nvbios_prom_fini(void *data)
{
struct nvkm_device *device = data;
nvkm_pci_rom_shadow(device->pci, true);
}
static void *
-prom_init(struct nvkm_bios *bios, const char *name)
+nvbios_prom_init(struct nvkm_bios *bios, const char *name)
{
struct nvkm_device *device = bios->subdev.device;
if (device->card_type == NV_40 && device->chipset >= 0x4c)
@@ -55,10 +55,10 @@ prom_init(struct nvkm_bios *bios, const char *name)
}
const struct nvbios_source
-nvbios_rom = {
+nvbios_prom = {
.name = "PROM",
- .init = prom_init,
- .fini = prom_fini,
- .read = prom_read,
+ .init = nvbios_prom_init,
+ .fini = nvbios_prom_fini,
+ .read = nvbios_prom_read,
.rw = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index c2b5cc5f97ed..da07a2fbef06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -330,7 +330,6 @@ nvkm_pstate_work(struct work_struct *work)
}
wake_up_all(&clk->wait);
- nvkm_notify_get(&clk->pwrsrc_ntfy);
}
static int
@@ -559,13 +558,12 @@ nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
return nvkm_pstate_calc(clk, true);
}
-static int
-nvkm_clk_pwrsrc(struct nvkm_notify *notify)
+int
+nvkm_clk_pwrsrc(struct nvkm_device *device)
{
- struct nvkm_clk *clk =
- container_of(notify, typeof(*clk), pwrsrc_ntfy);
- nvkm_pstate_calc(clk, false);
- return NVKM_NOTIFY_DROP;
+ if (device->clk)
+ return nvkm_pstate_calc(device->clk, false);
+ return 0;
}
/******************************************************************************
@@ -582,7 +580,6 @@ static int
nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_clk *clk = nvkm_clk(subdev);
- nvkm_notify_put(&clk->pwrsrc_ntfy);
flush_work(&clk->work);
if (clk->func->fini)
clk->func->fini(clk);
@@ -629,8 +626,6 @@ nvkm_clk_dtor(struct nvkm_subdev *subdev)
struct nvkm_clk *clk = nvkm_clk(subdev);
struct nvkm_pstate *pstate, *temp;
- nvkm_notify_fini(&clk->pwrsrc_ntfy);
-
/* Early return if the pstates have been provided statically */
if (clk->func->pstates)
return clk;
@@ -692,11 +687,6 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
clk->state_nr = func->nr_pstates;
}
- ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
- NULL, 0, 0, &clk->pwrsrc_ntfy);
- if (ret)
- return ret;
-
mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
if (mode) {
clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 8bf00b396ec1..ae793f400ba1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -280,7 +280,7 @@ nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
- /* PT no longer neeed? Destroy it. */
+ /* PT no longer needed? Destroy it. */
if (!pgt->refs[type]) {
it->lvl++;
TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
@@ -1048,7 +1048,7 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
__mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
/* Locate the smallest page size supported by the backend, it will
- * have the the deepest nesting of page tables.
+ * have the deepest nesting of page tables.
*/
while (page[1].shift)
page++;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 35b750cebaeb..a8a75dc24751 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
#include "omapdss.h"
#include "hdmi4_core.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 65085d886da5..868712cd8a3a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -32,6 +32,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
#include "omapdss.h"
#include "hdmi5_core.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 2d3909a37f51..bfb2ccb40bd1 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -10,6 +10,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_framebuffer.h>
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 852e78a5f142..ac869acf80ea 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -17,6 +17,7 @@
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h> /* platform_device() */
#include <linux/sched.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 895e66b08a81..1d414b33fee3 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -6,8 +6,10 @@
#include <linux/dma-mapping.h>
+#include <drm/drm_blend.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "omap_dmm_tiler.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 42eac6ad12bd..40706c5aad7b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -9,6 +9,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index b83d91ec030a..b6cb537f7689 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -6,9 +6,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 38799effd00a..a9043eacce97 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -115,6 +115,17 @@ config DRM_PANEL_EDP
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_EBBG_FT8719
+ tristate "EBBG FT8719 panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the EBBG FT8719
+ video mode panel. Mainly found on Xiaomi Poco F1 mobile phone.
+ The panel has a resolution of 1080x2246. It provides a MIPI DSI
+ interface to the host.
+
config DRM_PANEL_ELIDA_KD35T133
tristate "Elida KD35T133 panel driver"
depends on OF
@@ -438,6 +449,8 @@ config DRM_PANEL_SAMSUNG_ATNA33XC20
depends on OF
depends on BACKLIGHT_CLASS_DEVICE
depends on PM
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
select DRM_DP_AUX_BUS
help
DRM panel driver for the Samsung ATNA33XC20 panel. This panel can't
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 42a7ab54234b..34e717382dbb 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_EDP) += panel-edp.o
+obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index 44674ebedf59..174ff434bd71 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -215,14 +215,9 @@ static const struct drm_panel_funcs tm5p5_nt35596_panel_funcs = {
static int tm5p5_nt35596_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
- u16 brightness = bl->props.brightness;
+ u16 brightness = backlight_get_brightness(bl);
int ret;
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
-
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 1be150ac758f..07f722f33fc5 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1511,16 +1511,28 @@ static int boe_panel_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = boe->desc->size.width_mm;
connector->display_info.height_mm = boe->desc->size.height_mm;
connector->display_info.bpc = boe->desc->bpc;
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
drm_connector_set_panel_orientation(connector, boe->orientation);
return 1;
}
+static enum drm_panel_orientation boe_panel_get_orientation(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+
+ return boe->orientation;
+}
+
static const struct drm_panel_funcs boe_panel_funcs = {
.unprepare = boe_panel_unprepare,
.prepare = boe_panel_prepare,
.enable = boe_panel_enable,
.get_modes = boe_panel_get_modes,
+ .get_orientation = boe_panel_get_orientation,
};
static int boe_panel_add(struct boe_panel *boe)
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index b58cb064975f..b0213a518f9d 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -85,17 +85,10 @@ static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
else
return;
- if (enable) {
- backlight->props.fb_blank = FB_BLANK_UNBLANK;
- backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
- backlight->props.power = FB_BLANK_UNBLANK;
- } else {
- backlight->props.fb_blank = FB_BLANK_NORMAL;
- backlight->props.power = FB_BLANK_POWERDOWN;
- backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
- }
-
- backlight_update_status(backlight);
+ if (enable)
+ backlight_enable(backlight);
+ else
+ backlight_disable(backlight);
}
static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
@@ -196,13 +189,7 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
int r = 0;
- int level;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
+ int level = backlight_get_brightness(dev);
dev_dbg(&ddata->dsi->dev, "update brightness to %d\n", level);
@@ -219,11 +206,7 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
static int dsicm_bl_get_intensity(struct backlight_device *dev)
{
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- return dev->props.brightness;
-
- return 0;
+ return backlight_get_brightness(dev);
}
static const struct backlight_ops dsicm_bl_ops = {
diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
new file mode 100644
index 000000000000..386f8321b930
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Joel Selvaraj <jo@jsfamily.in>
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+static const char * const regulator_names[] = {
+ "vddio",
+ "vddpos",
+ "vddneg",
+};
+
+static const unsigned long regulator_enable_loads[] = {
+ 62000,
+ 100000,
+ 100000
+};
+
+struct ebbg_ft8719 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+
+ struct gpio_desc *reset_gpio;
+};
+
+static inline
+struct ebbg_ft8719 *to_ebbg_ft8719(struct drm_panel *panel)
+{
+ return container_of(panel, struct ebbg_ft8719, panel);
+}
+
+static void ebbg_ft8719_reset(struct ebbg_ft8719 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(4000, 5000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(15000, 16000);
+}
+
+static int ebbg_ft8719_on(struct ebbg_ft8719 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display brightness: %d\n", ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(90);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ebbg_ft8719_off(struct ebbg_ft8719 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ usleep_range(10000, 11000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(90);
+
+ return 0;
+}
+
+static int ebbg_ft8719_prepare(struct drm_panel *panel)
+{
+ struct ebbg_ft8719 *ctx = to_ebbg_ft8719(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ebbg_ft8719_reset(ctx);
+
+ ret = ebbg_ft8719_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ebbg_ft8719_unprepare(struct drm_panel *panel)
+{
+ struct ebbg_ft8719 *ctx = to_ebbg_ft8719(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = ebbg_ft8719_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ dev_err(panel->dev, "Failed to disable regulators: %d\n", ret);
+
+ return 0;
+}
+
+static const struct drm_display_mode ebbg_ft8719_mode = {
+ .clock = (1080 + 28 + 4 + 16) * (2246 + 120 + 4 + 12) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 28,
+ .hsync_end = 1080 + 28 + 4,
+ .htotal = 1080 + 28 + 4 + 16,
+ .vdisplay = 2246,
+ .vsync_start = 2246 + 120,
+ .vsync_end = 2246 + 120 + 4,
+ .vtotal = 2246 + 120 + 4 + 12,
+ .width_mm = 68,
+ .height_mm = 141,
+};
+
+static int ebbg_ft8719_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &ebbg_ft8719_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ebbg_ft8719_panel_funcs = {
+ .prepare = ebbg_ft8719_prepare,
+ .unprepare = ebbg_ft8719_unprepare,
+ .get_modes = ebbg_ft8719_get_modes,
+};
+
+static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct ebbg_ft8719 *ctx;
+ int i, ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ ctx->supplies[i].supply = regulator_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+ ret = regulator_set_load(ctx->supplies[i].consumer,
+ regulator_enable_loads[i]);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set regulator load\n");
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &ebbg_ft8719_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ebbg_ft8719_remove(struct mipi_dsi_device *dsi)
+{
+ struct ebbg_ft8719 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id ebbg_ft8719_of_match[] = {
+ { .compatible = "ebbg,ft8719" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ebbg_ft8719_of_match);
+
+static struct mipi_dsi_driver ebbg_ft8719_driver = {
+ .probe = ebbg_ft8719_probe,
+ .remove = ebbg_ft8719_remove,
+ .driver = {
+ .name = "panel-ebbg-ft8719",
+ .of_match_table = ebbg_ft8719_of_match,
+ },
+};
+module_mipi_dsi_driver(ebbg_ft8719_driver);
+
+MODULE_AUTHOR("Joel Selvaraj <jo@jsfamily.in>");
+MODULE_DESCRIPTION("DRM driver for EBBG FT8719 video dsi panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index c96014464355..3626469c4cc2 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -39,6 +39,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
/**
@@ -417,6 +418,11 @@ static int panel_edp_get_hpd_gpio(struct device *dev, struct panel_edp *p)
return 0;
}
+static bool panel_edp_can_read_hpd(struct panel_edp *p)
+{
+ return !p->no_hpd && (p->hpd_gpio || (p->aux && p->aux->wait_hpd_asserted));
+}
+
static int panel_edp_prepare_once(struct panel_edp *p)
{
struct device *dev = p->base.dev;
@@ -441,17 +447,21 @@ static int panel_edp_prepare_once(struct panel_edp *p)
if (delay)
msleep(delay);
- if (p->hpd_gpio) {
+ if (panel_edp_can_read_hpd(p)) {
if (p->desc->delay.hpd_absent)
hpd_wait_us = p->desc->delay.hpd_absent * 1000UL;
else
hpd_wait_us = 2000000;
- err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
- hpd_asserted, hpd_asserted,
- 1000, hpd_wait_us);
- if (hpd_asserted < 0)
- err = hpd_asserted;
+ if (p->hpd_gpio) {
+ err = readx_poll_timeout(gpiod_get_value_cansleep,
+ p->hpd_gpio, hpd_asserted,
+ hpd_asserted, 1000, hpd_wait_us);
+ if (hpd_asserted < 0)
+ err = hpd_asserted;
+ } else {
+ err = p->aux->wait_hpd_asserted(p->aux, hpd_wait_us);
+ }
if (err) {
if (err != -ETIMEDOUT)
@@ -532,18 +542,22 @@ static int panel_edp_enable(struct drm_panel *panel)
/*
* If there is a "prepare_to_enable" delay then that's supposed to be
* the delay from HPD going high until we can turn the backlight on.
- * However, we can only count this if HPD is handled by the panel
- * driver, not if it goes to a dedicated pin on the controller.
+ * However, we can only count this if HPD is readable by the panel
+ * driver.
+ *
* If we aren't handling the HPD pin ourselves then the best we
* can do is assume that HPD went high immediately before we were
- * called (and link training took zero time).
+ * called (and link training took zero time). Note that "no-hpd"
+ * actually counts as handling HPD ourselves since we're doing the
+ * worst case delay (in prepare) ourselves.
*
* NOTE: if we ever end up in this "if" statement then we're
* guaranteed that the panel_edp_wait() call below will do no delay.
* It already handles that case, though, so we don't need any special
* code for it.
*/
- if (p->desc->delay.prepare_to_enable && !p->hpd_gpio && !p->no_hpd)
+ if (p->desc->delay.prepare_to_enable &&
+ !panel_edp_can_read_hpd(p) && !p->no_hpd)
delay = max(delay, p->desc->delay.prepare_to_enable);
if (delay)
@@ -586,7 +600,10 @@ static int panel_edp_get_modes(struct drm_panel *panel,
else if (!num)
dev_warn(p->base.dev, "No display modes\n");
- /* set up connector's "panel orientation" property */
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
drm_connector_set_panel_orientation(connector, p->orientation);
return num;
@@ -609,6 +626,13 @@ static int panel_edp_get_timings(struct drm_panel *panel,
return p->desc->num_timings;
}
+static enum drm_panel_orientation panel_edp_get_orientation(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+
+ return p->orientation;
+}
+
static int detected_panel_show(struct seq_file *s, void *data)
{
struct drm_panel *panel = s->private;
@@ -637,6 +661,7 @@ static const struct drm_panel_funcs panel_edp_funcs = {
.prepare = panel_edp_prepare,
.enable = panel_edp_enable,
.get_modes = panel_edp_get_modes,
+ .get_orientation = panel_edp_get_orientation,
.get_timings = panel_edp_get_timings,
.debugfs_init = panel_edp_debugfs_init,
};
@@ -1860,6 +1885,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
+ EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index 80227617a4d6..01dd555a7f26 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -217,15 +217,27 @@ static int kd35t133_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
drm_connector_set_panel_orientation(connector, ctx->orientation);
return 1;
}
+static enum drm_panel_orientation kd35t133_get_orientation(struct drm_panel *panel)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+
+ return ctx->orientation;
+}
+
static const struct drm_panel_funcs kd35t133_funcs = {
.unprepare = kd35t133_unprepare,
.prepare = kd35t133_prepare,
.get_modes = kd35t133_get_modes,
+ .get_orientation = kd35t133_get_orientation,
};
static int kd35t133_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index a9cd7135cb51..ee61d60eceae 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -209,7 +209,7 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->avdd),
"Couldn't get avdd regulator\n");
- ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ ctx->reset = devm_gpiod_get_optional(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
"Couldn't get our reset GPIO\n");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index ba30d11547ad..596861269774 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -853,17 +853,29 @@ static int ili9881c_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
drm_connector_set_panel_orientation(connector, ctx->orientation);
return 1;
}
+static enum drm_panel_orientation ili9881c_get_orientation(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+ return ctx->orientation;
+}
+
static const struct drm_panel_funcs ili9881c_funcs = {
.prepare = ili9881c_prepare,
.unprepare = ili9881c_unprepare,
.enable = ili9881c_enable,
.disable = ili9881c_disable,
.get_modes = ili9881c_get_modes,
+ .get_orientation = ili9881c_get_orientation,
};
static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index f11252fb00fe..de8758c30e6e 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -99,15 +99,28 @@ static int panel_lvds_get_modes(struct drm_panel *panel,
drm_display_info_set_bus_formats(&connector->display_info,
&lvds->bus_format, 1);
connector->display_info.bus_flags = lvds->bus_flags;
+
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
drm_connector_set_panel_orientation(connector, lvds->orientation);
return 1;
}
+static enum drm_panel_orientation panel_lvds_get_orientation(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+
+ return lvds->orientation;
+}
+
static const struct drm_panel_funcs panel_lvds_funcs = {
.unprepare = panel_lvds_unprepare,
.prepare = panel_lvds_prepare,
.get_modes = panel_lvds_get_modes,
+ .get_orientation = panel_lvds_get_orientation,
};
static int panel_lvds_parse_dt(struct panel_lvds *lvds)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 873cbd38e6d3..40ea41b0a5dd 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -190,7 +190,7 @@ struct nt35510_config {
* 6 = Hsync x 2
* 7 = Hsync x 4
* bits 4..6 in the upper nibble controls BTP, the boosting
- * amplification for the the step-up circuit:
+ * amplification for the step-up circuit:
* 0 = Disable
* 1 = 1.5 x VDDB
* 2 = 1.66 x VDDB
@@ -211,7 +211,7 @@ struct nt35510_config {
* bits 0..2 in the lower nibble controls NCK, the booster clock
* frequency, the values are the same as for PCK in @bt1ctr.
* bits 4..5 in the upper nibble controls BTN, the boosting
- * amplification for the the step-up circuit.
+ * amplification for the step-up circuit.
* 0 = Disable
* 1 = -1.5 x VDDB
* 2 = -2 x VDDB
@@ -250,7 +250,7 @@ struct nt35510_config {
* bits 0..2 in the lower nibble controls LCK, the booster clock
* frequency, the values are the same as for PCK in @bt1ctr.
* bits 4..5 in the upper nibble controls BTL, the boosting
- * amplification for the the step-up circuit.
+ * amplification for the step-up circuit.
* 0 = AVEE + VCL
* 1 = AVEE - AVDD
* 2 = AVEE + VCL - AVDD
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
index 231f371901e8..6d6ce42787e2 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -628,6 +628,10 @@ static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ ret = drm_panel_of_backlight(&pinfo->base);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
drm_panel_add(&pinfo->base);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 145047e19394..a6dc5ab182fa 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -45,6 +45,7 @@
#include <linux/err.h>
#include <linux/fb.h>
#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index 572547d1aa83..4e021a572211 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -8,6 +8,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 3dd10412d147..5a8b978c6415 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -19,6 +19,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
+/* T3 VCC to HPD high is max 200 ms */
+#define HPD_MAX_MS 200
+#define HPD_MAX_US (HPD_MAX_MS * 1000)
+
struct atana33xc20_panel {
struct drm_panel base;
bool prepared;
@@ -30,6 +34,7 @@ struct atana33xc20_panel {
struct regulator *supply;
struct gpio_desc *el_on3_gpio;
+ struct drm_dp_aux *aux;
struct edid *edid;
@@ -79,7 +84,7 @@ static int atana33xc20_suspend(struct device *dev)
static int atana33xc20_resume(struct device *dev)
{
struct atana33xc20_panel *p = dev_get_drvdata(dev);
- bool hpd_asserted = false;
+ int hpd_asserted;
int ret;
/* T12 (Power off time) is min 500 ms */
@@ -90,23 +95,41 @@ static int atana33xc20_resume(struct device *dev)
return ret;
p->powered_on_time = ktime_get();
- /*
- * Handle HPD. Note: if HPD is hooked up to a dedicated pin on the
- * eDP controller then "no_hpd" will be false _and_ "hpd_gpio" will be
- * NULL. It's up to the controller driver to wait for HPD after
- * preparing the panel in that case.
- */
if (p->no_hpd) {
- /* T3 VCC to HPD high is max 200 ms */
- msleep(200);
- } else if (p->hpd_gpio) {
+ msleep(HPD_MAX_MS);
+ return 0;
+ }
+
+ if (p->hpd_gpio) {
ret = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
hpd_asserted, hpd_asserted,
- 1000, 200000);
- if (!hpd_asserted)
- dev_warn(dev, "Timeout waiting for HPD\n");
+ 1000, HPD_MAX_US);
+ if (hpd_asserted < 0)
+ ret = hpd_asserted;
+
+ if (ret)
+ dev_warn(dev, "Error waiting for HPD GPIO: %d\n", ret);
+
+ return ret;
}
+ if (p->aux->wait_hpd_asserted) {
+ ret = p->aux->wait_hpd_asserted(p->aux, HPD_MAX_US);
+
+ if (ret)
+ dev_warn(dev, "Controller error waiting for HPD: %d\n", ret);
+
+ return ret;
+ }
+
+ /*
+ * Note that it's possible that no_hpd is false, hpd_gpio is
+ * NULL, and wait_hpd_asserted is NULL. This is because
+ * wait_hpd_asserted() is optional even if HPD is hooked up to
+ * a dedicated pin on the eDP controller. In this case we just
+ * assume that the controller driver will wait for HPD at the
+ * right times.
+ */
return 0;
}
@@ -263,6 +286,8 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
return -ENOMEM;
dev_set_drvdata(dev, panel);
+ panel->aux = aux_ep->aux;
+
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
return dev_err_probe(dev, PTR_ERR(panel->supply),
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 3939b25e6666..76160e5d43bd 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -7,6 +7,7 @@
*/
#include <linux/delay.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 4a2e580a2f7b..ff5e1a44c43a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -23,6 +23,8 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -35,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
@@ -411,7 +414,10 @@ static int panel_simple_get_modes(struct drm_panel *panel,
/* add hard-coded panel modes */
num += panel_simple_get_non_edid_modes(p, connector);
- /* set up connector's "panel orientation" property */
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
drm_connector_set_panel_orientation(connector, p->orientation);
return num;
@@ -434,12 +440,20 @@ static int panel_simple_get_timings(struct drm_panel *panel,
return p->desc->num_timings;
}
+static enum drm_panel_orientation panel_simple_get_orientation(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ return p->orientation;
+}
+
static const struct drm_panel_funcs panel_simple_funcs = {
.disable = panel_simple_disable,
.unprepare = panel_simple_unprepare,
.prepare = panel_simple_prepare,
.enable = panel_simple_enable,
.get_modes = panel_simple_get_modes,
+ .get_orientation = panel_simple_get_orientation,
.get_timings = panel_simple_get_timings,
};
@@ -663,8 +677,10 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
drm_panel_init(&panel->base, dev, &panel_simple_funcs, connector_type);
err = drm_panel_of_backlight(&panel->base);
- if (err)
+ if (err) {
+ dev_err_probe(dev, err, "Could not find backlight\n");
goto disable_pm_runtime;
+ }
drm_panel_add(&panel->base);
@@ -778,6 +794,36 @@ static const struct panel_desc ampire_am800480r3tmqwa1h = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct display_timing ampire_am800600p5tmqw_tb8h_timing = {
+ .pixelclock = { 34500000, 39600000, 50400000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 12, 112, 312 },
+ .hback_porch = { 87, 87, 48 },
+ .hsync_len = { 1, 1, 40 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 1, 21, 61 },
+ .vback_porch = { 38, 38, 19 },
+ .vsync_len = { 1, 1, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc ampire_am800600p5tmqwtb8h = {
+ .timings = &ampire_am800600p5tmqw_tb8h_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 162,
+ .height = 122,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct display_timing santek_st0700i5y_rbslw_f_timing = {
.pixelclock = { 26400000, 33300000, 46800000 },
.hactive = { 800, 800, 800 },
@@ -1427,6 +1473,30 @@ static const struct panel_desc dataimage_fg040346dsswbg04 = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing dataimage_fg1001l0dsswmg01_timing = {
+ .pixelclock = { 68900000, 71110000, 73400000 },
+ .hactive = { 1280, 1280, 1280 },
+ .vactive = { 800, 800, 800 },
+ .hback_porch = { 100, 100, 100 },
+ .hfront_porch = { 100, 100, 100 },
+ .vback_porch = { 5, 5, 5 },
+ .vfront_porch = { 5, 5, 5 },
+ .hsync_len = { 24, 24, 24 },
+ .vsync_len = { 3, 3, 3 },
+ .flags = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc dataimage_fg1001l0dsswmg01 = {
+ .timings = &dataimage_fg1001l0dsswmg01_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+};
+
static const struct drm_display_mode dataimage_scf0700c48ggu18_mode = {
.clock = 33260,
.hdisplay = 800,
@@ -1685,6 +1755,32 @@ static const struct panel_desc edt_etm0700g0bdh6 = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing edt_etml0700y5dha_timing = {
+ .pixelclock = { 40800000, 51200000, 67200000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 30, 106, 125 },
+ .hback_porch = { 30, 106, 125 },
+ .hsync_len = { 30, 108, 126 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 3, 12, 67},
+ .vback_porch = { 3, 12, 67 },
+ .vsync_len = { 4, 11, 66 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc edt_etml0700y5dha = {
+ .timings = &edt_etml0700y5dha_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 155,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode edt_etmv570g2dhu_mode = {
.clock = 25175,
.hdisplay = 640,
@@ -1953,6 +2049,31 @@ static const struct panel_desc hannstar_hsd100pxn1 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing hannstar_hsd101pww2_timing = {
+ .pixelclock = { 64300000, 71100000, 82000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 1, 1, 10 },
+ .hback_porch = { 1, 1, 10 },
+ .hsync_len = { 58, 158, 661 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 1, 1, 10 },
+ .vback_porch = { 1, 1, 10 },
+ .vsync_len = { 1, 21, 203 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc hannstar_hsd101pww2 = {
+ .timings = &hannstar_hsd101pww2_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
.clock = 33333,
.hdisplay = 800,
@@ -3755,6 +3876,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "ampire,am800480r3tmqwa1h",
.data = &ampire_am800480r3tmqwa1h,
}, {
+ .compatible = "ampire,am800600p5tmqw-tb8h",
+ .data = &ampire_am800600p5tmqwtb8h,
+ }, {
.compatible = "arm,rtsm-display",
.data = &arm_rtsm,
}, {
@@ -3833,6 +3957,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "dataimage,fg040346dsswbg04",
.data = &dataimage_fg040346dsswbg04,
}, {
+ .compatible = "dataimage,fg1001l0dsswmg01",
+ .data = &dataimage_fg1001l0dsswmg01,
+ }, {
.compatible = "dataimage,scf0700c48ggu18",
.data = &dataimage_scf0700c48ggu18,
}, {
@@ -3869,6 +3996,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etm0700g0edh6",
.data = &edt_etm0700g0bdh6,
}, {
+ .compatible = "edt,etml0700y5dha",
+ .data = &edt_etml0700y5dha,
+ }, {
.compatible = "edt,etmv570g2dhu",
.data = &edt_etmv570g2dhu,
}, {
@@ -3899,6 +4029,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "hannstar,hsd100pxn1",
.data = &hannstar_hsd100pxn1,
}, {
+ .compatible = "hannstar,hsd101pww2",
+ .data = &hannstar_hsd101pww2,
+ }, {
.compatible = "hit,tx23d38vm0caa",
.data = &hitachi_tx23d38vm0caa
}, {
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 0d7541a33f87..3d6a286056a0 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -298,13 +298,7 @@ static void acx565akm_set_brightness(struct acx565akm_panel *lcd, int level)
static int acx565akm_bl_update_status_locked(struct backlight_device *dev)
{
struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
- int level;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
+ int level = backlight_get_brightness(dev);
acx565akm_set_brightness(lcd, level);
@@ -330,8 +324,7 @@ static int acx565akm_bl_get_intensity(struct backlight_device *dev)
mutex_lock(&lcd->mutex);
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
+ if (!backlight_is_blank(dev))
intensity = acx565akm_get_actual_brightness(lcd);
else
intensity = 0;
@@ -349,7 +342,6 @@ static const struct backlight_ops acx565akm_bl_ops = {
static int acx565akm_backlight_init(struct acx565akm_panel *lcd)
{
struct backlight_properties props = {
- .fb_blank = FB_BLANK_UNBLANK,
.power = FB_BLANK_UNBLANK,
.type = BACKLIGHT_RAW,
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 7f51a4682ccb..ee612303f076 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -11,6 +11,7 @@
#include "panfrost_device.h"
#include "panfrost_devfreq.h"
#include "panfrost_features.h"
+#include "panfrost_issues.h"
#include "panfrost_gpu.h"
#include "panfrost_job.h"
#include "panfrost_mmu.h"
@@ -380,9 +381,13 @@ const char *panfrost_exception_name(u32 exception_code)
bool panfrost_exception_needs_reset(const struct panfrost_device *pfdev,
u32 exception_code)
{
- /* Right now, none of the GPU we support need a reset, but this
- * might change.
+ /* If an occlusion query write causes a bus fault on affected GPUs,
+ * future fragment jobs may hang. Reset to workaround.
*/
+ if (exception_code == DRM_PANFROST_EXCEPTION_JOB_BUS_FAULT)
+ return panfrost_has_hw_issue(pfdev, HW_ISSUE_TTRX_3076);
+
+ /* No other GPUs we support need a reset */
return false;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 7fcbc2a5b6cd..c58075bc096e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -233,6 +233,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_file_priv *file_priv = file->driver_priv;
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
struct panfrost_job *job;
@@ -262,12 +263,12 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
job->jc = args->jc;
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
- job->file_priv = file->driver_priv;
+ job->mmu = file_priv->mmu;
slot = panfrost_job_get_slot(job);
ret = drm_sched_job_init(&job->base,
- &job->file_priv->sched_entity[slot],
+ &file_priv->sched_entity[slot],
NULL);
if (ret)
goto out_put_job;
@@ -664,6 +665,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "arm,mali-t860", .data = &default_data, },
{ .compatible = "arm,mali-t880", .data = &default_data, },
{ .compatible = "arm,mali-bifrost", .data = &default_data, },
+ { .compatible = "arm,mali-valhall-jm", .data = &default_data, },
{ .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data },
{}
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
index 36fadcf9634e..7ed0cd3ea2d4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_features.h
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -21,6 +21,7 @@ enum panfrost_hw_feature {
HW_FEATURE_TLS_HASHING,
HW_FEATURE_THREAD_GROUP_SPLIT,
HW_FEATURE_IDVS_GROUP_SIZE,
+ HW_FEATURE_CLEAN_ONLY_SAFE,
HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
};
@@ -105,6 +106,18 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_TLS_HASHING) | \
BIT_ULL(HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
+#define hw_features_g57 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
+ BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \
+ BIT_ULL(HW_FEATURE_CLEAN_ONLY_SAFE))
+
static inline bool panfrost_has_hw_feature(struct panfrost_device *pfdev,
enum panfrost_hw_feature feat)
{
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index aa89926742fd..6452e4e900dd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -108,6 +108,9 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
quirks |= SC_LS_ALLOW_ATTR_TYPES;
}
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_TTRX_2968_TTRX_3162))
+ quirks |= SC_VAR_ALGORITHM;
+
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_TLS_HASHING))
quirks |= SC_TLS_HASH_ENABLE;
@@ -124,18 +127,6 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
gpu_write(pfdev, GPU_TILER_CONFIG, quirks);
- quirks = gpu_read(pfdev, GPU_L2_MMU_CONFIG);
-
- /* Limit read & write ID width for AXI */
- if (panfrost_has_hw_feature(pfdev, HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
- quirks &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS |
- L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES);
- else
- quirks &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS |
- L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
-
- gpu_write(pfdev, GPU_L2_MMU_CONFIG, quirks);
-
quirks = 0;
if ((panfrost_model_eq(pfdev, 0x860) || panfrost_model_eq(pfdev, 0x880)) &&
pfdev->features.revision >= 0x2000)
@@ -210,6 +201,9 @@ static const struct panfrost_model gpu_models[] = {
GPU_MODEL(g52, 0x7002),
GPU_MODEL(g31, 0x7003,
GPU_REV(g31, 1, 0)),
+
+ GPU_MODEL(g57, 0x9001,
+ GPU_REV(g57, 0, 0)),
};
static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h
index 501a76c5e95f..eb60cb83667a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_issues.h
+++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
@@ -125,6 +125,16 @@ enum panfrost_hw_issue {
* kernel must fiddle with L2 caches to prevent data leakage */
HW_ISSUE_TGOX_R1_1234,
+ /* Must set SC_VAR_ALGORITHM */
+ HW_ISSUE_TTRX_2968_TTRX_3162,
+
+ /* Bus fault from occlusion query write may cause future fragment jobs
+ * to hang */
+ HW_ISSUE_TTRX_3076,
+
+ /* Must issue a dummy job before starting real work to prevent hangs */
+ HW_ISSUE_TTRX_3485,
+
HW_ISSUE_END
};
@@ -248,7 +258,14 @@ enum panfrost_hw_issue {
#define hw_issues_g76 0
-static inline bool panfrost_has_hw_issue(struct panfrost_device *pfdev,
+#define hw_issues_g57 (\
+ BIT_ULL(HW_ISSUE_TTRX_2968_TTRX_3162) | \
+ BIT_ULL(HW_ISSUE_TTRX_3076))
+
+#define hw_issues_g57_r0p0 (\
+ BIT_ULL(HW_ISSUE_TTRX_3485))
+
+static inline bool panfrost_has_hw_issue(const struct panfrost_device *pfdev,
enum panfrost_hw_issue issue)
{
return test_bit(issue, pfdev->features.hw_issues);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index fda5871aebe3..7c4208476fbd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -201,7 +201,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
return;
}
- cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
+ cfg = panfrost_mmu_as_get(pfdev, job->mmu);
job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
@@ -435,7 +435,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
job->jc = 0;
}
- panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
+ panfrost_mmu_as_put(pfdev, job->mmu);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
if (signal_fence)
@@ -456,7 +456,7 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev,
* happen when we receive the DONE interrupt while doing a GPU reset).
*/
job->jc = 0;
- panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
+ panfrost_mmu_as_put(pfdev, job->mmu);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
dma_fence_signal_locked(job->done_fence);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 77e6d0e6f612..8becc1ba0eb9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -17,7 +17,7 @@ struct panfrost_job {
struct kref refcount;
struct panfrost_device *pfdev;
- struct panfrost_file_priv *file_priv;
+ struct panfrost_mmu *mmu;
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index 0b6cd8fdcb47..accb4fa3adb8 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -195,6 +195,7 @@
#define SC_TLS_HASH_ENABLE BIT(17)
#define SC_LS_ATTR_CHECK_DISABLE BIT(18)
#define SC_ENABLE_TEXGRD_FLAGS BIT(25)
+#define SC_VAR_ALGORITHM BIT(29)
/* End SHADER_CONFIG register */
/* TILER_CONFIG register */
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 443e3b932322..6263346f24c6 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -12,10 +12,12 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
+#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_vblank.h>
@@ -248,7 +250,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
cntl |= CNTL_ST_CDWID_24;
/*
- * Note that the the ARM hardware's format reader takes 'r' from
+ * Note that the ARM hardware's format reader takes 'r' from
* the low bit, while DRM formats list channels from high bit
* to low bit as you read left to right. The ST Micro version of
* the PL110 (LCDC) however uses the standard DRM format.
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 520301b405f1..19a4324bd356 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -50,6 +50,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index bdd883f4f0da..efb01a554574 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -18,6 +18,8 @@
#include <linux/regmap.h>
#include <linux/vexpress.h>
+#include <drm/drm_fourcc.h>
+
#include "pl111_versatile.h"
#include "pl111_drm.h"
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 9a64fa4c7530..2e8949863d6b 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -30,6 +30,8 @@
#include <drm/drm_drv.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index a93de9e1977a..3a3e127ce297 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -23,6 +23,7 @@
#include <linux/iosys-map.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index a054e4a00fe8..9bf6d4cc98d4 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -33,8 +33,6 @@
#include "qxl_drv.h"
#include "qxl_object.h"
-int qxl_log_level;
-
static bool qxl_check_device(struct qxl_device *qdev)
{
struct qxl_rom *rom = qdev->rom;
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 2e1bc01aa5c9..970e192b0d51 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -300,8 +300,8 @@ extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
# define R128_PM4_64PIO_128INDBM (5 << 28)
# define R128_PM4_64BM_128INDBM (6 << 28)
# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28)
-# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28)
-# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28)
+# define R128_PM4_64BM_64VCBM_64INDBM (8U << 28)
+# define R128_PM4_64PIO_64VCPIO_64INDPIO (15U << 28)
# define R128_PM4_BUFFER_CNTL_NOUPDATE (1 << 27)
#define R128_PM4_BUFFER_WM_CNTL 0x0708
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c94e429e75f9..69f1bc073902 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -28,6 +28,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 455f8036aa54..4f06356d9ce2 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -29,6 +29,7 @@
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include "atom.h"
#include "avivod.h"
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 2dd85ba1faa2..d4f09ecc3d22 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -35,6 +35,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 429644d5ddc6..2b12389f841a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -38,6 +38,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 57ff2b723c87..f12675e3d261 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -36,6 +36,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ca382fbf7a86..6ccea51d4072 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 8817fd033cd0..6072ed5f2dd3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -28,6 +28,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b87dd551e939..8cf87a0a2b2a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -42,6 +42,7 @@
#include <drm/drm_device.h>
#include <drm/drm_vblank.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include "atom.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 38796af4fadd..26fa9b095514 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -33,6 +33,7 @@
#include <drm/drm_device.h>
#include <drm/radeon_drm.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include "atom.h"
#include "avivod.h"
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 3977aaa1ab5a..abf8022eb884 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -8,6 +8,7 @@
*/
#include <linux/export.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 190dbb7f15dd..0f09e1ee0390 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -12,6 +12,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 5c1c7bb04f3f..e98b76db703a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -9,10 +9,12 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 8eb9b2b097ae..e778fd52f890 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -9,9 +9,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index 505a905e3ad1..4fd6067f6fb4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -7,7 +7,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 8dbfbbd3cad1..830aac0a2cb4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
index 891bb956fd61..31ed285073e0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
@@ -679,23 +679,12 @@ static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = {
static int rcar_mipi_dsi_parse_dt(struct rcar_mipi_dsi *dsi)
{
- struct device_node *ep;
- u32 data_lanes[4];
int ret;
- ep = of_graph_get_endpoint_by_regs(dsi->dev->of_node, 1, 0);
- if (!ep) {
- dev_dbg(dsi->dev, "unconnected port@1\n");
- return -ENODEV;
- }
-
- ret = of_property_read_variable_u32_array(ep, "data-lanes", data_lanes,
- 1, 4);
- of_node_put(ep);
-
+ ret = drm_of_get_data_lanes_count_ep(dsi->dev->of_node, 1, 0, 1, 4);
if (ret < 0) {
dev_err(dsi->dev, "missing or invalid data-lanes property\n");
- return -ENODEV;
+ return ret;
}
dsi->num_data_lanes = ret;
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 5afab49dc4f2..53c2d9980d48 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,7 +2,6 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
- select DRM_DISPLAY_HELPER if ROCKCHIP_ANALOGIX_DP
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
@@ -38,6 +37,7 @@ config ROCKCHIP_VOP2
config ROCKCHIP_ANALOGIX_DP
bool "Rockchip specific extensions for Analogix DP driver"
depends on ROCKCHIP_VOP
+ select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
help
This selects support for Rockchip SoC specific extensions
@@ -47,6 +47,8 @@ config ROCKCHIP_ANALOGIX_DP
config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
help
This selects support for Rockchip SoC specific extensions
for the cdn DP driver. If you want to enable Dp on
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 70be64ca0a00..ad2d3ae7e621 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -408,7 +408,15 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (IS_ERR(dp->adp))
return PTR_ERR(dp->adp);
- return component_add(dev, &rockchip_dp_component_ops);
+ ret = component_add(dev, &rockchip_dp_component_ops);
+ if (ret)
+ goto err_dp_remove;
+
+ return 0;
+
+err_dp_remove:
+ analogix_dp_remove(dp->adp);
+ return ret;
}
static int rockchip_dp_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index c8c3612a4fe2..cf2cf51091a3 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -4,6 +4,7 @@
* Zheng Yang <zhengyang@rock-chips.com>
*/
+#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 0d2cb4f3922b..092bf863110b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -11,6 +11,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 74562d40f639..ad3958b6f8bf 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -20,9 +20,11 @@
#include <drm/drm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
@@ -1570,6 +1572,9 @@ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *rockchip_state;
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
if (!rockchip_state)
return NULL;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 26ac91db0f35..e4631f515ba4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -22,10 +23,12 @@
#include <drm/drm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_flip_work.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -1202,7 +1205,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
*/
stride = (fb->pitches[0] << 3) / bpp;
if ((stride & 0x3f) && (xmirror || rotate_90 || rotate_270))
- drm_err(vop2->drm, "vp%d %s stride[%d] not 64 pixel aligened\n",
+ drm_err(vop2->drm, "vp%d %s stride[%d] not 64 pixel aligned\n",
vp->id, win->data->name, stride);
rb_swap = vop2_afbc_rb_swap(fb->format->format);
@@ -1473,7 +1476,7 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
default:
drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
return;
- };
+ }
dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
@@ -1524,6 +1527,7 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
if (ret < 0) {
drm_err(vop2->drm, "failed to enable dclk for video port%d - %d\n",
vp->id, ret);
+ vop2_unlock(vop2);
return;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 418eb631d7cd..75eb7cca3d82 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -6,6 +6,7 @@
*/
#include <linux/component.h>
+#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
#include <drm/display/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 191c56064f19..6b25b2f4f5a3 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
}
EXPORT_SYMBOL(drm_sched_entity_flush);
-static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk)
+static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
@@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
- init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work);
- irq_work_queue(&job->work);
+ INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+ schedule_work(&job->work);
}
static struct dma_fence *
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
index 8d8d8e214c28..816e1464a98f 100644
--- a/drivers/gpu/drm/selftests/test-drm_damage_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) "drm_damage_helper: " fmt
#include <drm/drm_damage_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
index 967c52150b67..4caa9be900ac 100644
--- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Test cases for for the DRM DP MST helpers
+ * Test cases for the DRM DP MST helpers
*/
#define PREFIX_STR "[drm_dp_mst_helper]"
diff --git a/drivers/gpu/drm/selftests/test-drm_plane_helper.c b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
index b61273e9c403..64e8938ab194 100644
--- a/drivers/gpu/drm/selftests/test-drm_plane_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) "drm_plane_helper: " fmt
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_modes.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
index f6628a5ee95f..794573badfe8 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
@@ -18,11 +18,7 @@ static int shmob_drm_backlight_update(struct backlight_device *bdev)
struct shmob_drm_connector *scon = bl_get_data(bdev);
struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
- int brightness = bdev->props.brightness;
-
- if (bdev->props.power != FB_BLANK_UNBLANK ||
- bdev->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
+ int brightness = backlight_get_brightness(bdev);
return bdata->set_brightness(brightness);
}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 03556dbfcafb..071a929e9fe3 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -14,6 +14,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index 7a866d6ce6bb..68d21be784aa 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -10,6 +10,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index cbc464f006b4..4763ea8e1af0 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -11,6 +11,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include "shmob_drm_drv.h"
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 43722adab1f8..07802907e39a 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -143,6 +143,7 @@ static const struct of_device_id ssd130x_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ssd130x_of_match);
+#if IS_MODULE(CONFIG_DRM_SSD130X_SPI)
/*
* The SPI core always reports a MODALIAS uevent of the form "spi:<dev>", even
* if the device was registered via OF. This means that the module will not be
@@ -160,6 +161,7 @@ static const struct spi_device_id ssd130x_spi_table[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, ssd130x_spi_table);
+#endif
static struct spi_driver ssd130x_spi_driver = {
.driver = {
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 08394444dd6e..77f80b0d3a5e 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -20,9 +20,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index 1637203ea103..3664089b6983 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -17,8 +17,10 @@
#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 9caaf3ccfabe..142a8e1b4436 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -9,6 +9,7 @@
#include <linux/component.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 414c9973aa6d..1e9bd4241f10 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_compositor.h"
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index b58415f2e4d8..af783f599306 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -7,12 +7,14 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/of.h>
#include <linux/seq_file.h>
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_compositor.h"
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 03f3377f918c..03cc401ed593 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index b3fbee7eac11..61a034a01764 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -8,6 +8,7 @@
#include <linux/component.h>
#include <linux/debugfs.h>
#include <linux/hdmi.h>
+#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 2201a50353eb..271982080437 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -10,6 +10,7 @@
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/reset.h>
#include <linux/seq_file.h>
@@ -17,6 +18,7 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_compositor.h"
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index 173409cdb99e..c74b524663ab 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -8,8 +8,10 @@
#include <linux/types.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_compositor.h"
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 0da7cce2a1a2..c63945dc2260 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -95,6 +95,7 @@ static int drv_load(struct drm_device *ddev)
ddev->mode_config.max_width = STM_MAX_FB_WIDTH;
ddev->mode_config.max_height = STM_MAX_FB_HEIGHT;
ddev->mode_config.funcs = &drv_mode_config_funcs;
+ ddev->mode_config.normalize_zpos = true;
ret = ltdc_load(ddev);
if (ret)
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 6bd45df8f5a7..da7a0a183b27 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -12,6 +12,7 @@
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
@@ -23,10 +24,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
@@ -162,16 +166,20 @@
#define BCCR_BCWHITE GENMASK(23, 0) /* Background Color WHITE */
#define IER_LIE BIT(0) /* Line Interrupt Enable */
-#define IER_FUIE BIT(1) /* Fifo Underrun Interrupt Enable */
+#define IER_FUWIE BIT(1) /* Fifo Underrun Warning Interrupt Enable */
#define IER_TERRIE BIT(2) /* Transfer ERRor Interrupt Enable */
-#define IER_RRIE BIT(3) /* Register Reload Interrupt enable */
+#define IER_RRIE BIT(3) /* Register Reload Interrupt Enable */
+#define IER_FUEIE BIT(6) /* Fifo Underrun Error Interrupt Enable */
+#define IER_CRCIE BIT(7) /* CRC Error Interrupt Enable */
#define CPSR_CYPOS GENMASK(15, 0) /* Current Y position */
#define ISR_LIF BIT(0) /* Line Interrupt Flag */
-#define ISR_FUIF BIT(1) /* Fifo Underrun Interrupt Flag */
+#define ISR_FUWIF BIT(1) /* Fifo Underrun Warning Interrupt Flag */
#define ISR_TERRIF BIT(2) /* Transfer ERRor Interrupt Flag */
#define ISR_RRIF BIT(3) /* Register Reload Interrupt Flag */
+#define ISR_FUEIF BIT(6) /* Fifo Underrun Error Interrupt Flag */
+#define ISR_CRCIF BIT(7) /* CRC Error Interrupt Flag */
#define EDCR_OCYEN BIT(25) /* Output Conversion to YCbCr 422: ENable */
#define EDCR_OCYSEL BIT(26) /* Output Conversion to YCbCr 422: SELection of the CCIR */
@@ -180,6 +188,7 @@
#define LXCR_LEN BIT(0) /* Layer ENable */
#define LXCR_COLKEN BIT(1) /* Color Keying Enable */
#define LXCR_CLUTEN BIT(4) /* Color Look-Up Table ENable */
+#define LXCR_HMEN BIT(8) /* Horizontal Mirroring ENable */
#define LXWHPCR_WHSTPOS GENMASK(11, 0) /* Window Horizontal StarT POSition */
#define LXWHPCR_WHSPPOS GENMASK(27, 16) /* Window Horizontal StoP POSition */
@@ -194,9 +203,10 @@
#define LXBFCR_BF2 GENMASK(2, 0) /* Blending Factor 2 */
#define LXBFCR_BF1 GENMASK(10, 8) /* Blending Factor 1 */
+#define LXBFCR_BOR GENMASK(18, 16) /* Blending ORder */
#define LXCFBLR_CFBLL GENMASK(12, 0) /* Color Frame Buffer Line Length */
-#define LXCFBLR_CFBP GENMASK(28, 16) /* Color Frame Buffer Pitch in bytes */
+#define LXCFBLR_CFBP GENMASK(31, 16) /* Color Frame Buffer Pitch in bytes */
#define LXCFBLNR_CFBLN GENMASK(10, 0) /* Color Frame Buffer Line Number */
@@ -229,6 +239,8 @@
#define NB_PF 8 /* Max nb of HW pixel format */
+#define FUT_DFT 128 /* Default value of fifo underrun threshold */
+
/*
* Skip the first value and the second in case CRC was enabled during
* the thread irq. This is to be sure CRC value is relevant for the
@@ -709,12 +721,13 @@ static irqreturn_t ltdc_irq_thread(int irq, void *arg)
ltdc_irq_crc_handle(ldev, crtc);
}
- /* Save FIFO Underrun & Transfer Error status */
mutex_lock(&ldev->err_lock);
- if (ldev->irq_status & ISR_FUIF)
- ldev->error_status |= ISR_FUIF;
if (ldev->irq_status & ISR_TERRIF)
- ldev->error_status |= ISR_TERRIF;
+ ldev->transfer_err++;
+ if (ldev->irq_status & ISR_FUEIF)
+ ldev->fifo_err++;
+ if (ldev->irq_status & ISR_FUWIF)
+ ldev->fifo_warn++;
mutex_unlock(&ldev->err_lock);
return IRQ_HANDLED;
@@ -773,7 +786,7 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
regmap_write(ldev->regmap, LTDC_BCCR, BCCR_BCBLACK);
/* Enable IRQ */
- regmap_set_bits(ldev->regmap, LTDC_IER, IER_RRIE | IER_FUIE | IER_TERRIE);
+ regmap_set_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_RRIE | IER_TERRIE);
/* Commit shadow registers = update planes at next vblank */
if (!ldev->caps.plane_reg_shadow)
@@ -787,19 +800,32 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
+ int layer_index = 0;
DRM_DEBUG_DRIVER("\n");
drm_crtc_vblank_off(crtc);
+ /* Disable all layers */
+ for (layer_index = 0; layer_index < ldev->caps.nb_layers; layer_index++)
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + layer_index * LAY_OFS,
+ LXCR_CLUTEN | LXCR_LEN, 0);
+
/* disable IRQ */
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_RRIE | IER_FUIE | IER_TERRIE);
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_RRIE | IER_TERRIE);
/* immediately commit disable of layers before switching off LTDC */
if (!ldev->caps.plane_reg_shadow)
regmap_set_bits(ldev->regmap, LTDC_SRCR, SRCR_IMR);
pm_runtime_put_sync(ddev->dev);
+
+ /* clear interrupt error counters */
+ mutex_lock(&ldev->err_lock);
+ ldev->transfer_err = 0;
+ ldev->fifo_err = 0;
+ ldev->fifo_warn = 0;
+ mutex_unlock(&ldev->err_lock);
}
#define CLK_TOLERANCE_HZ 50
@@ -902,9 +928,9 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
drm_connector_list_iter_end(&iter);
}
- if (bridge && bridge->timings)
+ if (bridge && bridge->timings) {
bus_flags = bridge->timings->input_bus_flags;
- else if (connector) {
+ } else if (connector) {
bus_flags = connector->display_info.bus_flags;
if (connector->display_info.num_bus_formats)
bus_formats = connector->display_info.bus_formats[0];
@@ -1160,6 +1186,18 @@ static int ltdc_crtc_verify_crc_source(struct drm_crtc *crtc,
return 0;
}
+static void ltdc_crtc_atomic_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+
+ drm_printf(p, "\ttransfer_error=%d\n", ldev->transfer_err);
+ drm_printf(p, "\tfifo_underrun_error=%d\n", ldev->fifo_err);
+ drm_printf(p, "\tfifo_underrun_warning=%d\n", ldev->fifo_warn);
+ drm_printf(p, "\tfifo_underrun_threshold=%d\n", ldev->fifo_threshold);
+}
+
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
@@ -1170,6 +1208,7 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.enable_vblank = ltdc_crtc_enable_vblank,
.disable_vblank = ltdc_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ .atomic_print_state = ltdc_crtc_atomic_print_state,
};
static const struct drm_crtc_funcs ltdc_crtc_with_crc_support_funcs = {
@@ -1184,6 +1223,7 @@ static const struct drm_crtc_funcs ltdc_crtc_with_crc_support_funcs = {
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.set_crc_source = ltdc_crtc_set_crc_source,
.verify_crc_source = ltdc_crtc_verify_crc_source,
+ .atomic_print_state = ltdc_crtc_atomic_print_state,
};
/*
@@ -1209,7 +1249,8 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
/* Reject scaling */
if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) {
- DRM_ERROR("Scaling is not supported");
+ DRM_DEBUG_DRIVER("Scaling is not supported");
+
return -EINVAL;
}
@@ -1229,7 +1270,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
u32 y0 = newstate->crtc_y;
u32 y1 = newstate->crtc_y + newstate->crtc_h - 1;
u32 src_x, src_y, src_w, src_h;
- u32 val, pitch_in_bytes, line_length, line_number, paddr, ahbp, avbp, bpcr;
+ u32 val, pitch_in_bytes, line_length, line_number, ahbp, avbp, bpcr;
+ u32 paddr, paddr1, paddr2;
enum ltdc_pix_fmt pf;
if (!newstate->crtc || !fb) {
@@ -1281,13 +1323,6 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
}
regmap_write_bits(ldev->regmap, LTDC_L1PFCR + lofs, LXPFCR_PF, val);
- /* Configures the color frame buffer pitch in bytes & line length */
- pitch_in_bytes = fb->pitches[0];
- line_length = fb->format->cpp[0] *
- (x1 - x0 + 1) + (ldev->caps.bus_width >> 3) - 1;
- val = ((pitch_in_bytes << 16) | line_length);
- regmap_write_bits(ldev->regmap, LTDC_L1CFBLR + lofs, LXCFBLR_CFBLL | LXCFBLR_CFBP, val);
-
/* Specifies the constant alpha value */
val = newstate->alpha >> 8;
regmap_write_bits(ldev->regmap, LTDC_L1CACR + lofs, LXCACR_CONSTA, val);
@@ -1302,78 +1337,124 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
plane->type != DRM_PLANE_TYPE_PRIMARY)
val = BF1_PAXCA | BF2_1PAXCA;
- regmap_write_bits(ldev->regmap, LTDC_L1BFCR + lofs, LXBFCR_BF2 | LXBFCR_BF1, val);
-
- /* Configures the frame buffer line number */
- line_number = y1 - y0 + 1;
- regmap_write_bits(ldev->regmap, LTDC_L1CFBLNR + lofs, LXCFBLNR_CFBLN, line_number);
+ if (ldev->caps.dynamic_zorder) {
+ val |= (newstate->normalized_zpos << 16);
+ regmap_write_bits(ldev->regmap, LTDC_L1BFCR + lofs,
+ LXBFCR_BF2 | LXBFCR_BF1 | LXBFCR_BOR, val);
+ } else {
+ regmap_write_bits(ldev->regmap, LTDC_L1BFCR + lofs,
+ LXBFCR_BF2 | LXBFCR_BF1, val);
+ }
/* Sets the FB address */
paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 0);
+ if (newstate->rotation & DRM_MODE_REFLECT_X)
+ paddr += (fb->format->cpp[0] * (x1 - x0 + 1)) - 1;
+
+ if (newstate->rotation & DRM_MODE_REFLECT_Y)
+ paddr += (fb->pitches[0] * (y1 - y0));
+
DRM_DEBUG_DRIVER("fb: phys 0x%08x", paddr);
regmap_write(ldev->regmap, LTDC_L1CFBAR + lofs, paddr);
+ /* Configures the color frame buffer pitch in bytes & line length */
+ line_length = fb->format->cpp[0] *
+ (x1 - x0 + 1) + (ldev->caps.bus_width >> 3) - 1;
+
+ if (newstate->rotation & DRM_MODE_REFLECT_Y)
+ /* Compute negative value (signed on 16 bits) for the picth */
+ pitch_in_bytes = 0x10000 - fb->pitches[0];
+ else
+ pitch_in_bytes = fb->pitches[0];
+
+ val = (pitch_in_bytes << 16) | line_length;
+ regmap_write_bits(ldev->regmap, LTDC_L1CFBLR + lofs, LXCFBLR_CFBLL | LXCFBLR_CFBP, val);
+
+ /* Configures the frame buffer line number */
+ line_number = y1 - y0 + 1;
+ regmap_write_bits(ldev->regmap, LTDC_L1CFBLNR + lofs, LXCFBLNR_CFBLN, line_number);
+
if (ldev->caps.ycbcr_input) {
if (fb->format->is_yuv) {
switch (fb->format->format) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
- /* Configure the auxiliary frame buffer address 0 & 1 */
- paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
- regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr);
- regmap_write(ldev->regmap, LTDC_L1AFBA1R + lofs, paddr + 1);
+ /* Configure the auxiliary frame buffer address 0 */
+ paddr1 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
- /* Configure the buffer length */
- val = ((pitch_in_bytes << 16) | line_length);
- regmap_write(ldev->regmap, LTDC_L1AFBLR + lofs, val);
+ if (newstate->rotation & DRM_MODE_REFLECT_X)
+ paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
- /* Configure the frame buffer line number */
- val = (line_number >> 1);
- regmap_write(ldev->regmap, LTDC_L1AFBLNR + lofs, val);
+ if (newstate->rotation & DRM_MODE_REFLECT_Y)
+ paddr1 += (fb->pitches[1] * (y1 - y0 - 1)) >> 1;
+
+ regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr1);
break;
case DRM_FORMAT_YUV420:
- /* Configure the auxiliary frame buffer address 0 */
- paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
- regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr);
-
- /* Configure the auxiliary frame buffer address 1 */
- paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 2);
- regmap_write(ldev->regmap, LTDC_L1AFBA1R + lofs, paddr);
+ /* Configure the auxiliary frame buffer address 0 & 1 */
+ paddr1 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
+ paddr2 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 2);
- line_length = ((fb->format->cpp[0] * (x1 - x0 + 1)) >> 1) +
- (ldev->caps.bus_width >> 3) - 1;
+ if (newstate->rotation & DRM_MODE_REFLECT_X) {
+ paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
+ paddr2 += ((fb->format->cpp[2] * (x1 - x0 + 1)) >> 1) - 1;
+ }
- /* Configure the buffer length */
- val = (((pitch_in_bytes >> 1) << 16) | line_length);
- regmap_write(ldev->regmap, LTDC_L1AFBLR + lofs, val);
+ if (newstate->rotation & DRM_MODE_REFLECT_Y) {
+ paddr1 += (fb->pitches[1] * (y1 - y0 - 1)) >> 1;
+ paddr2 += (fb->pitches[2] * (y1 - y0 - 1)) >> 1;
+ }
- /* Configure the frame buffer line number */
- val = (line_number >> 1);
- regmap_write(ldev->regmap, LTDC_L1AFBLNR + lofs, val);
+ regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr1);
+ regmap_write(ldev->regmap, LTDC_L1AFBA1R + lofs, paddr2);
break;
case DRM_FORMAT_YVU420:
- /* Configure the auxiliary frame buffer address 0 */
- paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 2);
- regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr);
-
- /* Configure the auxiliary frame buffer address 1 */
- paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
- regmap_write(ldev->regmap, LTDC_L1AFBA1R + lofs, paddr);
+ /* Configure the auxiliary frame buffer address 0 & 1 */
+ paddr1 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 2);
+ paddr2 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
- line_length = ((fb->format->cpp[0] * (x1 - x0 + 1)) >> 1) +
- (ldev->caps.bus_width >> 3) - 1;
+ if (newstate->rotation & DRM_MODE_REFLECT_X) {
+ paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
+ paddr2 += ((fb->format->cpp[2] * (x1 - x0 + 1)) >> 1) - 1;
+ }
- /* Configure the buffer length */
- val = (((pitch_in_bytes >> 1) << 16) | line_length);
- regmap_write(ldev->regmap, LTDC_L1AFBLR + lofs, val);
+ if (newstate->rotation & DRM_MODE_REFLECT_Y) {
+ paddr1 += (fb->pitches[1] * (y1 - y0 - 1)) >> 1;
+ paddr2 += (fb->pitches[2] * (y1 - y0 - 1)) >> 1;
+ }
- /* Configure the frame buffer line number */
- val = (line_number >> 1);
- regmap_write(ldev->regmap, LTDC_L1AFBLNR + lofs, val);
+ regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr1);
+ regmap_write(ldev->regmap, LTDC_L1AFBA1R + lofs, paddr2);
break;
}
+ /*
+ * Set the length and the number of lines of the auxiliary
+ * buffers if the framebuffer contains more than one plane.
+ */
+ if (fb->format->num_planes > 1) {
+ if (newstate->rotation & DRM_MODE_REFLECT_Y)
+ /*
+ * Compute negative value (signed on 16 bits)
+ * for the picth
+ */
+ pitch_in_bytes = 0x10000 - fb->pitches[1];
+ else
+ pitch_in_bytes = fb->pitches[1];
+
+ line_length = ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) +
+ (ldev->caps.bus_width >> 3) - 1;
+
+ /* Configure the auxiliary buffer length */
+ val = (pitch_in_bytes << 16) | line_length;
+ regmap_write(ldev->regmap, LTDC_L1AFBLR + lofs, val);
+
+ /* Configure the auxiliary frame buffer line number */
+ val = line_number >> 1;
+ regmap_write(ldev->regmap, LTDC_L1AFBLNR + lofs, val);
+ }
+
/* Configure YCbC conversion coefficient */
ltdc_set_ycbcr_coeffs(plane);
@@ -1388,7 +1469,12 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
/* Enable layer and CLUT if needed */
val = fb->format->format == DRM_FORMAT_C8 ? LXCR_CLUTEN : 0;
val |= LXCR_LEN;
- regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN, val);
+
+ /* Enable horizontal mirroring if requested */
+ if (newstate->rotation & DRM_MODE_REFLECT_X)
+ val |= LXCR_HMEN;
+
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, val);
/* Commit shadow registers = update plane at next vblank */
if (ldev->caps.plane_reg_shadow)
@@ -1398,13 +1484,21 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
ldev->plane_fpsi[plane->index].counter++;
mutex_lock(&ldev->err_lock);
- if (ldev->error_status & ISR_FUIF) {
- DRM_WARN("ltdc fifo underrun: please verify display mode\n");
- ldev->error_status &= ~ISR_FUIF;
+ if (ldev->transfer_err) {
+ DRM_WARN("ltdc transfer error: %d\n", ldev->transfer_err);
+ ldev->transfer_err = 0;
}
- if (ldev->error_status & ISR_TERRIF) {
- DRM_WARN("ltdc transfer error\n");
- ldev->error_status &= ~ISR_TERRIF;
+
+ if (ldev->caps.fifo_threshold) {
+ if (ldev->fifo_err) {
+ DRM_WARN("ltdc fifo underrun: please verify display mode\n");
+ ldev->fifo_err = 0;
+ }
+ } else {
+ if (ldev->fifo_warn >= ldev->fifo_threshold) {
+ DRM_WARN("ltdc fifo underrun: please verify display mode\n");
+ ldev->fifo_warn = 0;
+ }
}
mutex_unlock(&ldev->err_lock);
}
@@ -1417,8 +1511,8 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
struct ltdc_device *ldev = plane_to_ltdc(plane);
u32 lofs = plane->index * LAY_OFS;
- /* disable layer */
- regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN, 0);
+ /* Disable layer */
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, 0);
/* Commit shadow registers = update plane at next vblank */
if (ldev->caps.plane_reg_shadow)
@@ -1562,6 +1656,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
{
struct ltdc_device *ldev = ddev->dev_private;
struct drm_plane *primary, *overlay;
+ int supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
unsigned int i;
int ret;
@@ -1571,7 +1666,14 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
return -EINVAL;
}
- drm_plane_create_zpos_immutable_property(primary, 0);
+ if (ldev->caps.dynamic_zorder)
+ drm_plane_create_zpos_property(primary, 0, 0, ldev->caps.nb_layers - 1);
+ else
+ drm_plane_create_zpos_immutable_property(primary, 0);
+
+ if (ldev->caps.plane_rotation)
+ drm_plane_create_rotation_property(primary, DRM_MODE_ROTATE_0,
+ supported_rotations);
/* Init CRTC according to its hardware features */
if (ldev->caps.crc)
@@ -1600,7 +1702,14 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
DRM_ERROR("Can not create overlay plane %d\n", i);
goto cleanup;
}
- drm_plane_create_zpos_immutable_property(overlay, i);
+ if (ldev->caps.dynamic_zorder)
+ drm_plane_create_zpos_property(overlay, i, 0, ldev->caps.nb_layers - 1);
+ else
+ drm_plane_create_zpos_immutable_property(overlay, i);
+
+ if (ldev->caps.plane_rotation)
+ drm_plane_create_rotation_property(overlay, DRM_MODE_ROTATE_0,
+ supported_rotations);
}
return 0;
@@ -1631,6 +1740,10 @@ static void ltdc_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("\n");
+ /* set fifo underrun threshold register */
+ if (ldev->caps.fifo_threshold)
+ regmap_write(ldev->regmap, LTDC_FUT, ldev->fifo_threshold);
+
/* Enable LTDC */
regmap_set_bits(ldev->regmap, LTDC_GCR, GCR_LTDCEN);
}
@@ -1730,6 +1843,9 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.ycbcr_output = false;
ldev->caps.plane_reg_shadow = false;
ldev->caps.crc = false;
+ ldev->caps.dynamic_zorder = false;
+ ldev->caps.plane_rotation = false;
+ ldev->caps.fifo_threshold = false;
break;
case HWVER_20101:
ldev->caps.layer_ofs = LAY_OFS_0;
@@ -1745,6 +1861,9 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.ycbcr_output = false;
ldev->caps.plane_reg_shadow = false;
ldev->caps.crc = false;
+ ldev->caps.dynamic_zorder = false;
+ ldev->caps.plane_rotation = false;
+ ldev->caps.fifo_threshold = false;
break;
case HWVER_40100:
ldev->caps.layer_ofs = LAY_OFS_1;
@@ -1760,6 +1879,9 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.ycbcr_output = true;
ldev->caps.plane_reg_shadow = true;
ldev->caps.crc = true;
+ ldev->caps.dynamic_zorder = true;
+ ldev->caps.plane_rotation = true;
+ ldev->caps.fifo_threshold = true;
break;
default:
return -ENODEV;
@@ -1884,9 +2006,6 @@ int ltdc_load(struct drm_device *ddev)
goto err;
}
- /* Disable interrupts */
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUIE | IER_TERRIE);
-
ret = ltdc_get_caps(ddev);
if (ret) {
DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
@@ -1894,8 +2013,22 @@ int ltdc_load(struct drm_device *ddev)
goto err;
}
+ /* Disable interrupts */
+ if (ldev->caps.fifo_threshold)
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUWIE |
+ IER_TERRIE);
+ else
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUWIE |
+ IER_TERRIE | IER_FUEIE);
+
DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
+ /* initialize default value for fifo underrun threshold & clear interrupt error counters */
+ ldev->transfer_err = 0;
+ ldev->fifo_err = 0;
+ ldev->fifo_warn = 0;
+ ldev->fifo_threshold = FUT_DFT;
+
for (i = 0; i < ldev->caps.nb_irq; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
@@ -1910,7 +2043,6 @@ int ltdc_load(struct drm_device *ddev)
DRM_ERROR("Failed to register LTDC interrupt\n");
goto err;
}
-
}
crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index 59fc5d1bbbab..9d488043ffdb 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -28,6 +28,9 @@ struct ltdc_caps {
bool ycbcr_output; /* ycbcr output converter supported */
bool plane_reg_shadow; /* plane shadow registers ability */
bool crc; /* cyclic redundancy check supported */
+ bool dynamic_zorder; /* dynamic z-order */
+ bool plane_rotation; /* plane rotation */
+ bool fifo_threshold; /* fifo underrun threshold supported */
};
#define LTDC_MAX_LAYER 4
@@ -43,8 +46,11 @@ struct ltdc_device {
struct clk *pixel_clk; /* lcd pixel clock */
struct mutex err_lock; /* protecting error_status */
struct ltdc_caps caps;
- u32 error_status;
u32 irq_status;
+ u32 fifo_err; /* fifo underrun error counter */
+ u32 fifo_warn; /* fifo underrun warning counter */
+ u32 fifo_threshold; /* fifo underrun threshold */
+ u32 transfer_err; /* transfer error counter */
struct fps_info plane_fpsi[LTDC_MAX_LAYER];
struct drm_atomic_state *suspend_state;
int crc_skip_count;
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index decd95ad519d..287e8c4bbaea 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -17,9 +17,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 275f7e4a03ae..6eb1aabdb161 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -7,6 +7,7 @@
*/
#include <linux/component.h>
+#include <linux/dma-mapping.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/of_graph.h>
@@ -73,7 +74,6 @@ static int sun4i_drv_bind(struct device *dev)
goto free_drm;
}
- dev_set_drvdata(dev, drm);
drm->dev_private = drv;
INIT_LIST_HEAD(&drv->frontend_list);
INIT_LIST_HEAD(&drv->engine_list);
@@ -114,6 +114,8 @@ static int sun4i_drv_bind(struct device *dev)
drm_fbdev_generic_setup(drm, 32);
+ dev_set_drvdata(dev, drm);
+
return 0;
finish_poll:
@@ -130,6 +132,7 @@ static void sun4i_drv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
+ dev_set_drvdata(dev, NULL);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
@@ -367,6 +370,13 @@ static int sun4i_drv_probe(struct platform_device *pdev)
INIT_KFIFO(list.fifo);
+ /*
+ * DE2 and DE3 cores actually supports 40-bit addresses, but
+ * driver does not.
+ */
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
for (i = 0;; i++) {
struct device_node *pipeline = of_parse_phandle(np,
"allwinner,pipelines",
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 6825ef46f43f..260136d60ceb 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "sun4i_drv.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index d8b71710e8f6..c0df5e892fa7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/i2c.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 6d43080791a0..648dd0b5b116 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
@@ -117,7 +118,7 @@ static bool sun4i_layer_format_mod_supported(struct drm_plane *plane,
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
if (IS_ERR_OR_NULL(layer->backend->frontend))
- sun4i_backend_format_is_supported(format, modifier);
+ return sun4i_backend_format_is_supported(format, modifier);
return sun4i_backend_format_is_supported(format, modifier) ||
sun4i_frontend_format_is_supported(format, modifier);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 2ee158aaeb9e..523a6d787921 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -8,6 +8,7 @@
#include <linux/component.h>
#include <linux/ioport.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index e624f6977eb8..fa23aa23fe4a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
#include <linux/reset.h>
#define SUN4I_TCON_GCTL_REG 0x0
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index a8d75fd7e9f4..477cb6985b4d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -93,34 +93,10 @@ crtcs_exit:
return crtcs;
}
-static int sun8i_dw_hdmi_find_connector_pdev(struct device *dev,
- struct platform_device **pdev_out)
-{
- struct platform_device *pdev;
- struct device_node *remote;
-
- remote = of_graph_get_remote_node(dev->of_node, 1, -1);
- if (!remote)
- return -ENODEV;
-
- if (!of_device_is_compatible(remote, "hdmi-connector")) {
- of_node_put(remote);
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(remote);
- of_node_put(remote);
- if (!pdev)
- return -ENODEV;
-
- *pdev_out = pdev;
- return 0;
-}
-
static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
- struct platform_device *pdev = to_platform_device(dev), *connector_pdev;
+ struct platform_device *pdev = to_platform_device(dev);
struct dw_hdmi_plat_data *plat_data;
struct drm_device *drm = data;
struct device_node *phy_node;
@@ -167,30 +143,16 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
return dev_err_probe(dev, PTR_ERR(hdmi->regulator),
"Couldn't get regulator\n");
- ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev);
- if (!ret) {
- hdmi->ddc_en = gpiod_get_optional(&connector_pdev->dev,
- "ddc-en", GPIOD_OUT_HIGH);
- platform_device_put(connector_pdev);
-
- if (IS_ERR(hdmi->ddc_en)) {
- dev_err(dev, "Couldn't get ddc-en gpio\n");
- return PTR_ERR(hdmi->ddc_en);
- }
- }
-
ret = regulator_enable(hdmi->regulator);
if (ret) {
dev_err(dev, "Failed to enable regulator\n");
- goto err_unref_ddc_en;
+ return ret;
}
- gpiod_set_value(hdmi->ddc_en, 1);
-
ret = reset_control_deassert(hdmi->rst_ctrl);
if (ret) {
dev_err(dev, "Could not deassert ctrl reset control\n");
- goto err_disable_ddc_en;
+ goto err_disable_regulator;
}
ret = clk_prepare_enable(hdmi->clk_tmds);
@@ -245,12 +207,8 @@ err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
reset_control_assert(hdmi->rst_ctrl);
-err_disable_ddc_en:
- gpiod_set_value(hdmi->ddc_en, 0);
+err_disable_regulator:
regulator_disable(hdmi->regulator);
-err_unref_ddc_en:
- if (hdmi->ddc_en)
- gpiod_put(hdmi->ddc_en);
return ret;
}
@@ -264,11 +222,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
sun8i_hdmi_phy_deinit(hdmi->phy);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
- gpiod_set_value(hdmi->ddc_en, 0);
regulator_disable(hdmi->regulator);
-
- if (hdmi->ddc_en)
- gpiod_put(hdmi->ddc_en);
}
static const struct component_ops sun8i_dw_hdmi_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index bffe1b9cd3dc..ab80d52a70bb 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -9,7 +9,6 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_encoder.h>
#include <linux/clk.h>
-#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -151,16 +150,11 @@ struct sun8i_hdmi_phy;
struct sun8i_hdmi_phy_variant {
bool has_phy_clk;
bool has_second_pll;
- unsigned int is_custom_phy : 1;
const struct dw_hdmi_curr_ctrl *cur_ctr;
const struct dw_hdmi_mpll_config *mpll_cfg;
const struct dw_hdmi_phy_config *phy_cfg;
+ const struct dw_hdmi_phy_ops *phy_ops;
void (*phy_init)(struct sun8i_hdmi_phy *phy);
- void (*phy_disable)(struct dw_hdmi *hdmi,
- struct sun8i_hdmi_phy *phy);
- int (*phy_config)(struct dw_hdmi *hdmi,
- struct sun8i_hdmi_phy *phy,
- unsigned int clk_rate);
};
struct sun8i_hdmi_phy {
@@ -173,7 +167,7 @@ struct sun8i_hdmi_phy {
unsigned int rcal;
struct regmap *regs;
struct reset_control *rst_phy;
- struct sun8i_hdmi_phy_variant *variant;
+ const struct sun8i_hdmi_phy_variant *variant;
};
struct sun8i_dw_hdmi_quirks {
@@ -193,7 +187,6 @@ struct sun8i_dw_hdmi {
struct regulator *regulator;
const struct sun8i_dw_hdmi_quirks *quirks;
struct reset_control *rst_ctrl;
- struct gpio_desc *ddc_en;
};
extern struct platform_driver sun8i_hdmi_phy_driver;
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 2860e6bff8b7..ca53b5e9fffc 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -123,10 +123,30 @@ static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
-static int sun8i_hdmi_phy_config_a83t(struct dw_hdmi *hdmi,
- struct sun8i_hdmi_phy *phy,
- unsigned int clk_rate)
+static void sun8i_hdmi_phy_set_polarity(struct sun8i_hdmi_phy *phy,
+ const struct drm_display_mode *mode)
{
+ u32 val = 0;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ val |= SUN8I_HDMI_PHY_DBG_CTRL_POL_NHSYNC;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ val |= SUN8I_HDMI_PHY_DBG_CTRL_POL_NVSYNC;
+
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
+ SUN8I_HDMI_PHY_DBG_CTRL_POL_MASK, val);
+};
+
+static int sun8i_a83t_hdmi_phy_config(struct dw_hdmi *hdmi, void *data,
+ const struct drm_display_info *display,
+ const struct drm_display_mode *mode)
+{
+ unsigned int clk_rate = mode->crtc_clock * 1000;
+ struct sun8i_hdmi_phy *phy = data;
+
+ sun8i_hdmi_phy_set_polarity(phy, mode);
+
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN,
SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN);
@@ -185,10 +205,31 @@ static int sun8i_hdmi_phy_config_a83t(struct dw_hdmi *hdmi,
return 0;
}
-static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi,
- struct sun8i_hdmi_phy *phy,
- unsigned int clk_rate)
+static void sun8i_a83t_hdmi_phy_disable(struct dw_hdmi *hdmi, void *data)
{
+ struct sun8i_hdmi_phy *phy = data;
+
+ dw_hdmi_phy_gen2_txpwron(hdmi, 0);
+ dw_hdmi_phy_gen2_pddq(hdmi, 1);
+
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
+ SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN, 0);
+}
+
+static const struct dw_hdmi_phy_ops sun8i_a83t_hdmi_phy_ops = {
+ .init = sun8i_a83t_hdmi_phy_config,
+ .disable = sun8i_a83t_hdmi_phy_disable,
+ .read_hpd = dw_hdmi_phy_read_hpd,
+ .update_hpd = dw_hdmi_phy_update_hpd,
+ .setup_hpd = dw_hdmi_phy_setup_hpd,
+};
+
+static int sun8i_h3_hdmi_phy_config(struct dw_hdmi *hdmi, void *data,
+ const struct drm_display_info *display,
+ const struct drm_display_mode *mode)
+{
+ unsigned int clk_rate = mode->crtc_clock * 1000;
+ struct sun8i_hdmi_phy *phy = data;
u32 pll_cfg1_init;
u32 pll_cfg2_init;
u32 ana_cfg1_end;
@@ -197,6 +238,11 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi,
u32 b_offset = 0;
u32 val;
+ if (phy->variant->has_phy_clk)
+ clk_set_rate(phy->clk_phy, clk_rate);
+
+ sun8i_hdmi_phy_set_polarity(phy, mode);
+
/* bandwidth / frequency independent settings */
pll_cfg1_init = SUN8I_HDMI_PHY_PLL_CFG1_LDO2_EN |
@@ -333,41 +379,10 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi,
return 0;
}
-static int sun8i_hdmi_phy_config(struct dw_hdmi *hdmi, void *data,
- const struct drm_display_info *display,
- const struct drm_display_mode *mode)
-{
- struct sun8i_hdmi_phy *phy = (struct sun8i_hdmi_phy *)data;
- u32 val = 0;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- val |= SUN8I_HDMI_PHY_DBG_CTRL_POL_NHSYNC;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- val |= SUN8I_HDMI_PHY_DBG_CTRL_POL_NVSYNC;
-
- regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
- SUN8I_HDMI_PHY_DBG_CTRL_POL_MASK, val);
-
- if (phy->variant->has_phy_clk)
- clk_set_rate(phy->clk_phy, mode->crtc_clock * 1000);
-
- return phy->variant->phy_config(hdmi, phy, mode->crtc_clock * 1000);
-};
-
-static void sun8i_hdmi_phy_disable_a83t(struct dw_hdmi *hdmi,
- struct sun8i_hdmi_phy *phy)
+static void sun8i_h3_hdmi_phy_disable(struct dw_hdmi *hdmi, void *data)
{
- dw_hdmi_phy_gen2_txpwron(hdmi, 0);
- dw_hdmi_phy_gen2_pddq(hdmi, 1);
-
- regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
- SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN, 0);
-}
+ struct sun8i_hdmi_phy *phy = data;
-static void sun8i_hdmi_phy_disable_h3(struct dw_hdmi *hdmi,
- struct sun8i_hdmi_phy *phy)
-{
regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_LDOEN |
SUN8I_HDMI_PHY_ANA_CFG1_ENVBS |
@@ -375,19 +390,12 @@ static void sun8i_hdmi_phy_disable_h3(struct dw_hdmi *hdmi,
regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, 0);
}
-static void sun8i_hdmi_phy_disable(struct dw_hdmi *hdmi, void *data)
-{
- struct sun8i_hdmi_phy *phy = (struct sun8i_hdmi_phy *)data;
-
- phy->variant->phy_disable(hdmi, phy);
-}
-
-static const struct dw_hdmi_phy_ops sun8i_hdmi_phy_ops = {
- .init = &sun8i_hdmi_phy_config,
- .disable = &sun8i_hdmi_phy_disable,
- .read_hpd = &dw_hdmi_phy_read_hpd,
- .update_hpd = &dw_hdmi_phy_update_hpd,
- .setup_hpd = &dw_hdmi_phy_setup_hpd,
+static const struct dw_hdmi_phy_ops sun8i_h3_hdmi_phy_ops = {
+ .init = sun8i_h3_hdmi_phy_config,
+ .disable = sun8i_h3_hdmi_phy_disable,
+ .read_hpd = dw_hdmi_phy_read_hpd,
+ .update_hpd = dw_hdmi_phy_update_hpd,
+ .setup_hpd = dw_hdmi_phy_setup_hpd,
};
static void sun8i_hdmi_phy_unlock(struct sun8i_hdmi_phy *phy)
@@ -565,10 +573,10 @@ void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy)
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
struct dw_hdmi_plat_data *plat_data)
{
- struct sun8i_hdmi_phy_variant *variant = phy->variant;
+ const struct sun8i_hdmi_phy_variant *variant = phy->variant;
- if (variant->is_custom_phy) {
- plat_data->phy_ops = &sun8i_hdmi_phy_ops;
+ if (variant->phy_ops) {
+ plat_data->phy_ops = variant->phy_ops;
plat_data->phy_name = "sun8i_dw_hdmi_phy";
plat_data->phy_data = phy;
} else {
@@ -587,35 +595,27 @@ static const struct regmap_config sun8i_hdmi_phy_regmap_config = {
};
static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
- .is_custom_phy = true,
+ .phy_ops = &sun8i_a83t_hdmi_phy_ops,
.phy_init = &sun8i_hdmi_phy_init_a83t,
- .phy_disable = &sun8i_hdmi_phy_disable_a83t,
- .phy_config = &sun8i_hdmi_phy_config_a83t,
};
static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
.has_phy_clk = true,
- .is_custom_phy = true,
+ .phy_ops = &sun8i_h3_hdmi_phy_ops,
.phy_init = &sun8i_hdmi_phy_init_h3,
- .phy_disable = &sun8i_hdmi_phy_disable_h3,
- .phy_config = &sun8i_hdmi_phy_config_h3,
};
static const struct sun8i_hdmi_phy_variant sun8i_r40_hdmi_phy = {
.has_phy_clk = true,
.has_second_pll = true,
- .is_custom_phy = true,
+ .phy_ops = &sun8i_h3_hdmi_phy_ops,
.phy_init = &sun8i_hdmi_phy_init_h3,
- .phy_disable = &sun8i_hdmi_phy_disable_h3,
- .phy_config = &sun8i_hdmi_phy_config_h3,
};
static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
.has_phy_clk = true,
- .is_custom_phy = true,
+ .phy_ops = &sun8i_h3_hdmi_phy_ops,
.phy_init = &sun8i_hdmi_phy_init_h3,
- .phy_disable = &sun8i_hdmi_phy_disable_h3,
- .phy_config = &sun8i_hdmi_phy_config_h3,
};
static const struct sun8i_hdmi_phy_variant sun50i_h6_hdmi_phy = {
@@ -672,116 +672,64 @@ int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
struct sun8i_hdmi_phy *phy;
- struct resource res;
void __iomem *regs;
- int ret;
-
- match = of_match_node(sun8i_hdmi_phy_of_table, node);
- if (!match) {
- dev_err(dev, "Incompatible HDMI PHY\n");
- return -EINVAL;
- }
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- phy->variant = (struct sun8i_hdmi_phy_variant *)match->data;
+ phy->variant = of_device_get_match_data(dev);
phy->dev = dev;
- ret = of_address_to_resource(node, 0, &res);
- if (ret) {
- dev_err(dev, "phy: Couldn't get our resources\n");
- return ret;
- }
-
- regs = devm_ioremap_resource(dev, &res);
- if (IS_ERR(regs)) {
- dev_err(dev, "Couldn't map the HDMI PHY registers\n");
- return PTR_ERR(regs);
- }
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Couldn't map the HDMI PHY registers\n");
phy->regs = devm_regmap_init_mmio(dev, regs,
&sun8i_hdmi_phy_regmap_config);
- if (IS_ERR(phy->regs)) {
- dev_err(dev, "Couldn't create the HDMI PHY regmap\n");
- return PTR_ERR(phy->regs);
- }
+ if (IS_ERR(phy->regs))
+ return dev_err_probe(dev, PTR_ERR(phy->regs),
+ "Couldn't create the HDMI PHY regmap\n");
- phy->clk_bus = of_clk_get_by_name(node, "bus");
- if (IS_ERR(phy->clk_bus)) {
- dev_err(dev, "Could not get bus clock\n");
- return PTR_ERR(phy->clk_bus);
- }
+ phy->clk_bus = devm_clk_get(dev, "bus");
+ if (IS_ERR(phy->clk_bus))
+ return dev_err_probe(dev, PTR_ERR(phy->clk_bus),
+ "Could not get bus clock\n");
- phy->clk_mod = of_clk_get_by_name(node, "mod");
- if (IS_ERR(phy->clk_mod)) {
- dev_err(dev, "Could not get mod clock\n");
- ret = PTR_ERR(phy->clk_mod);
- goto err_put_clk_bus;
- }
+ phy->clk_mod = devm_clk_get(dev, "mod");
+ if (IS_ERR(phy->clk_mod))
+ return dev_err_probe(dev, PTR_ERR(phy->clk_mod),
+ "Could not get mod clock\n");
if (phy->variant->has_phy_clk) {
- phy->clk_pll0 = of_clk_get_by_name(node, "pll-0");
- if (IS_ERR(phy->clk_pll0)) {
- dev_err(dev, "Could not get pll-0 clock\n");
- ret = PTR_ERR(phy->clk_pll0);
- goto err_put_clk_mod;
- }
+ phy->clk_pll0 = devm_clk_get(dev, "pll-0");
+ if (IS_ERR(phy->clk_pll0))
+ return dev_err_probe(dev, PTR_ERR(phy->clk_pll0),
+ "Could not get pll-0 clock\n");
if (phy->variant->has_second_pll) {
- phy->clk_pll1 = of_clk_get_by_name(node, "pll-1");
- if (IS_ERR(phy->clk_pll1)) {
- dev_err(dev, "Could not get pll-1 clock\n");
- ret = PTR_ERR(phy->clk_pll1);
- goto err_put_clk_pll0;
- }
+ phy->clk_pll1 = devm_clk_get(dev, "pll-1");
+ if (IS_ERR(phy->clk_pll1))
+ return dev_err_probe(dev, PTR_ERR(phy->clk_pll1),
+ "Could not get pll-1 clock\n");
}
}
- phy->rst_phy = of_reset_control_get_shared(node, "phy");
- if (IS_ERR(phy->rst_phy)) {
- dev_err(dev, "Could not get phy reset control\n");
- ret = PTR_ERR(phy->rst_phy);
- goto err_put_clk_pll1;
- }
+ phy->rst_phy = devm_reset_control_get_shared(dev, "phy");
+ if (IS_ERR(phy->rst_phy))
+ return dev_err_probe(dev, PTR_ERR(phy->rst_phy),
+ "Could not get phy reset control\n");
platform_set_drvdata(pdev, phy);
return 0;
-
-err_put_clk_pll1:
- clk_put(phy->clk_pll1);
-err_put_clk_pll0:
- clk_put(phy->clk_pll0);
-err_put_clk_mod:
- clk_put(phy->clk_mod);
-err_put_clk_bus:
- clk_put(phy->clk_bus);
-
- return ret;
-}
-
-static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
-{
- struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
-
- reset_control_put(phy->rst_phy);
-
- clk_put(phy->clk_pll0);
- clk_put(phy->clk_pll1);
- clk_put(phy->clk_mod);
- clk_put(phy->clk_bus);
- return 0;
}
struct platform_driver sun8i_hdmi_phy_driver = {
.probe = sun8i_hdmi_phy_probe,
- .remove = sun8i_hdmi_phy_remove,
.driver = {
.name = "sun8i-hdmi-phy",
.of_match_table = sun8i_hdmi_phy_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 875a1156c04e..648b38a73066 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 4632dea2dc1e..36da962de394 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -11,9 +11,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index f7d0b082d634..1fee6499bdd3 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -5,8 +5,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index c6951cf5d2ca..747abafb6a5c 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/interconnect.h>
#include <linux/module.h>
@@ -21,8 +22,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 9464f522e257..6748ec1e0005 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -18,6 +18,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
@@ -1380,6 +1381,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra194-sor", },
{ .compatible = "nvidia,tegra194-vic", },
{ .compatible = "nvidia,tegra194-nvdec", },
+ { .compatible = "nvidia,tegra234-vic", },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index fc0a19554eac..845e60f144c7 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -80,6 +80,7 @@ struct tegra_drm_context {
/* Only used by new UAPI. */
struct xarray mappings;
+ struct host1x_memory_context *memory_context;
};
struct tegra_drm_client_ops {
@@ -91,12 +92,22 @@ struct tegra_drm_client_ops {
int (*submit)(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file);
+ int (*get_streamid_offset)(struct tegra_drm_client *client, u32 *offset);
+ int (*can_use_memory_ctx)(struct tegra_drm_client *client, bool *supported);
};
int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file);
+static inline int
+tegra_drm_get_streamid_offset_thi(struct tegra_drm_client *client, u32 *offset)
+{
+ *offset = 0x30;
+
+ return 0;
+}
+
struct tegra_drm_client {
struct host1x_client base;
struct list_head list;
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index 3762d87759d9..c0d85463eb1a 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -48,6 +48,14 @@ static int falcon_copy_chunk(struct falcon *falcon,
if (target == FALCON_MEMORY_IMEM)
cmd |= FALCON_DMATRFCMD_IMEM;
+ /*
+ * Use second DMA context (i.e. the one for firmware). Strictly
+ * speaking, at this point both DMA contexts point to the firmware
+ * stream ID, but this register's value will be reused by the firmware
+ * for later DMA transactions, so we need to use the correct value.
+ */
+ cmd |= FALCON_DMATRFCMD_DMACTX(1);
+
falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
index c56ee32d92ee..1955cf11a8a6 100644
--- a/drivers/gpu/drm/tegra/falcon.h
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -50,6 +50,7 @@
#define FALCON_DMATRFCMD_IDLE (1 << 1)
#define FALCON_DMATRFCMD_IMEM (1 << 4)
#define FALCON_DMATRFCMD_SIZE_256B (6 << 8)
+#define FALCON_DMATRFCMD_DMACTX(v) (((v) & 0x7) << 12)
#define FALCON_DMATRFFBOFFS 0x0000111c
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index c04dda8353fd..ed828de5ac01 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -10,6 +10,7 @@
#include <linux/console.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 7c7dd84e6db8..81991090adcc 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -704,14 +704,23 @@ static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
{
struct drm_gem_object *gem = buf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
+ void *vaddr;
- iosys_map_set_vaddr(map, bo->vaddr);
+ vaddr = tegra_bo_mmap(&bo->base);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ iosys_map_set_vaddr(map, vaddr);
return 0;
}
static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+
+ tegra_bo_munmap(&bo->base, map->vaddr);
}
static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index b8d3174c04c9..b872527a123c 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -16,7 +17,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include "drm.h"
diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c
index 79e1e88203cf..276fe0472730 100644
--- a/drivers/gpu/drm/tegra/nvdec.c
+++ b/drivers/gpu/drm/tegra/nvdec.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
@@ -21,6 +22,8 @@
#include "falcon.h"
#include "vic.h"
+#define NVDEC_TFBIF_TRANSCFG 0x2c44
+
struct nvdec_config {
const char *firmware;
unsigned int version;
@@ -63,7 +66,7 @@ static int nvdec_boot(struct nvdec *nvdec)
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
- nvdec_writel(nvdec, value, VIC_TFBIF_TRANSCFG);
+ nvdec_writel(nvdec, value, NVDEC_TFBIF_TRANSCFG);
if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
@@ -304,10 +307,19 @@ static void nvdec_close_channel(struct tegra_drm_context *context)
host1x_channel_put(context->channel);
}
+static int nvdec_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
+{
+ *supported = true;
+
+ return 0;
+}
+
static const struct tegra_drm_client_ops nvdec_ops = {
.open_channel = nvdec_open_channel,
.close_channel = nvdec_close_channel,
.submit = tegra_drm_submit,
+ .get_streamid_offset = tegra_drm_get_streamid_offset_thi,
+ .can_use_memory_ctx = nvdec_can_use_memory_ctx,
};
#define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin"
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index e0e6938c6200..ca9f03e3675b 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -3,12 +3,14 @@
* Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/interconnect.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/tegra/submit.c b/drivers/gpu/drm/tegra/submit.c
index 6d6dd8c35475..b24738bdf3df 100644
--- a/drivers/gpu/drm/tegra/submit.c
+++ b/drivers/gpu/drm/tegra/submit.c
@@ -498,6 +498,9 @@ static void release_job(struct host1x_job *job)
struct tegra_drm_submit_data *job_data = job->user_data;
u32 i;
+ if (job->memory_context)
+ host1x_memory_context_put(job->memory_context);
+
for (i = 0; i < job_data->num_used_mappings; i++)
tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
@@ -588,11 +591,51 @@ int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
goto put_job;
}
+ if (context->client->ops->get_streamid_offset) {
+ err = context->client->ops->get_streamid_offset(
+ context->client, &job->engine_streamid_offset);
+ if (err) {
+ SUBMIT_ERR(context, "failed to get streamid offset: %d", err);
+ goto unpin_job;
+ }
+ }
+
+ if (context->memory_context && context->client->ops->can_use_memory_ctx) {
+ bool supported;
+
+ err = context->client->ops->can_use_memory_ctx(context->client, &supported);
+ if (err) {
+ SUBMIT_ERR(context, "failed to detect if engine can use memory context: %d", err);
+ goto unpin_job;
+ }
+
+ if (supported) {
+ job->memory_context = context->memory_context;
+ host1x_memory_context_get(job->memory_context);
+ }
+ } else if (context->client->ops->get_streamid_offset) {
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec;
+
+ /*
+ * Job submission will need to temporarily change stream ID,
+ * so need to tell it what to change it back to.
+ */
+ spec = dev_iommu_fwspec_get(context->client->base.dev);
+ if (spec && spec->num_ids > 0)
+ job->engine_fallback_streamid = spec->ids[0] & 0xffff;
+ else
+ job->engine_fallback_streamid = 0x7f;
+#else
+ job->engine_fallback_streamid = 0x7f;
+#endif
+ }
+
/* Boot engine. */
err = pm_runtime_resume_and_get(context->client->base.dev);
if (err < 0) {
SUBMIT_ERR(context, "could not power up engine: %d", err);
- goto unpin_job;
+ goto put_memory_context;
}
job->user_data = job_data;
@@ -627,6 +670,9 @@ int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
goto put_job;
+put_memory_context:
+ if (job->memory_context)
+ host1x_memory_context_put(job->memory_context);
unpin_job:
host1x_job_unpin(job);
put_job:
diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c
index 9ab9179d2026..a98239cb0e29 100644
--- a/drivers/gpu/drm/tegra/uapi.c
+++ b/drivers/gpu/drm/tegra/uapi.c
@@ -33,6 +33,9 @@ static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
struct tegra_drm_mapping *mapping;
unsigned long id;
+ if (context->memory_context)
+ host1x_memory_context_put(context->memory_context);
+
xa_for_each(&context->mappings, id, mapping)
tegra_drm_mapping_put(mapping);
@@ -72,6 +75,7 @@ static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u
int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
{
+ struct host1x *host = tegra_drm_to_host1x(drm->dev_private);
struct tegra_drm_file *fpriv = file->driver_priv;
struct tegra_drm *tegra = drm->dev_private;
struct drm_tegra_channel_open *args = data;
@@ -102,10 +106,36 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_
}
}
+ /* Only allocate context if the engine supports context isolation. */
+ if (device_iommu_mapped(client->base.dev) && client->ops->can_use_memory_ctx) {
+ bool supported;
+
+ err = client->ops->can_use_memory_ctx(client, &supported);
+ if (err)
+ goto put_channel;
+
+ if (supported)
+ context->memory_context = host1x_memory_context_alloc(
+ host, get_task_pid(current, PIDTYPE_TGID));
+
+ if (IS_ERR(context->memory_context)) {
+ if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
+ err = PTR_ERR(context->memory_context);
+ goto put_channel;
+ } else {
+ /*
+ * OK, HW does not support contexts or contexts
+ * are disabled.
+ */
+ context->memory_context = NULL;
+ }
+ }
+ }
+
err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
GFP_KERNEL);
if (err < 0)
- goto put_channel;
+ goto put_memctx;
context->client = client;
xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
@@ -118,6 +148,9 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_
return 0;
+put_memctx:
+ if (context->memory_context)
+ host1x_memory_context_put(context->memory_context);
put_channel:
host1x_channel_put(context->channel);
free:
@@ -156,6 +189,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
struct tegra_drm_mapping *mapping;
struct tegra_drm_context *context;
enum dma_data_direction direction;
+ struct device *mapping_dev;
int err = 0;
if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
@@ -177,6 +211,11 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
kref_init(&mapping->ref);
+ if (context->memory_context)
+ mapping_dev = &context->memory_context->dev;
+ else
+ mapping_dev = context->client->base.dev;
+
mapping->bo = tegra_gem_lookup(file, args->handle);
if (!mapping->bo) {
err = -EINVAL;
@@ -201,7 +240,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
goto put_gem;
}
- mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction, NULL);
+ mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL);
if (IS_ERR(mapping->map)) {
err = PTR_ERR(mapping->map);
goto put_gem;
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index f56f5921a8c2..7382ee132eb7 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -38,6 +38,8 @@ struct vic {
struct clk *clk;
struct reset_control *rst;
+ bool can_use_context;
+
/* Platform configuration */
const struct vic_config *config;
};
@@ -229,28 +231,38 @@ static int vic_load_firmware(struct vic *vic)
{
struct host1x_client *client = &vic->client.base;
struct tegra_drm *tegra = vic->client.drm;
+ static DEFINE_MUTEX(lock);
+ u32 fce_bin_data_offset;
dma_addr_t iova;
size_t size;
void *virt;
int err;
- if (vic->falcon.firmware.virt)
- return 0;
+ mutex_lock(&lock);
+
+ if (vic->falcon.firmware.virt) {
+ err = 0;
+ goto unlock;
+ }
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
- return err;
+ goto unlock;
size = vic->falcon.firmware.size;
if (!client->group) {
virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
- if (!virt)
- return -ENOMEM;
+ if (!virt) {
+ err = -ENOMEM;
+ goto unlock;
+ }
} else {
virt = tegra_drm_alloc(tegra, size, &iova);
- if (IS_ERR(virt))
- return PTR_ERR(virt);
+ if (IS_ERR(virt)) {
+ err = PTR_ERR(virt);
+ goto unlock;
+ }
}
vic->falcon.firmware.virt = virt;
@@ -277,7 +289,28 @@ static int vic_load_firmware(struct vic *vic)
vic->falcon.firmware.phys = phys;
}
- return 0;
+ /*
+ * Check if firmware is new enough to not require mapping firmware
+ * to data buffer domains.
+ */
+ fce_bin_data_offset = *(u32 *)(virt + VIC_UCODE_FCE_DATA_OFFSET);
+
+ if (!vic->config->supports_sid) {
+ vic->can_use_context = false;
+ } else if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
+ /*
+ * Firmware will access FCE through STREAMID0, so context
+ * isolation cannot be used.
+ */
+ vic->can_use_context = false;
+ dev_warn_once(vic->dev, "context isolation disabled due to old firmware\n");
+ } else {
+ vic->can_use_context = true;
+ }
+
+unlock:
+ mutex_unlock(&lock);
+ return err;
cleanup:
if (!client->group)
@@ -285,11 +318,12 @@ cleanup:
else
tegra_drm_free(tegra, size, virt, iova);
+ mutex_unlock(&lock);
return err;
}
-static int vic_runtime_resume(struct device *dev)
+static int __maybe_unused vic_runtime_resume(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
int err;
@@ -323,7 +357,7 @@ disable:
return err;
}
-static int vic_runtime_suspend(struct device *dev)
+static int __maybe_unused vic_runtime_suspend(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
int err;
@@ -358,10 +392,27 @@ static void vic_close_channel(struct tegra_drm_context *context)
host1x_channel_put(context->channel);
}
+static int vic_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
+{
+ struct vic *vic = to_vic(client);
+ int err;
+
+ /* This doesn't access HW so it's safe to call without powering up. */
+ err = vic_load_firmware(vic);
+ if (err < 0)
+ return err;
+
+ *supported = vic->can_use_context;
+
+ return 0;
+}
+
static const struct tegra_drm_client_ops vic_ops = {
.open_channel = vic_open_channel,
.close_channel = vic_close_channel,
.submit = tegra_drm_submit,
+ .get_streamid_offset = tegra_drm_get_streamid_offset_thi,
+ .can_use_memory_ctx = vic_can_use_memory_ctx,
};
#define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin"
@@ -396,11 +447,20 @@ static const struct vic_config vic_t194_config = {
.supports_sid = true,
};
+#define NVIDIA_TEGRA_234_VIC_FIRMWARE "nvidia/tegra234/vic.bin"
+
+static const struct vic_config vic_t234_config = {
+ .firmware = NVIDIA_TEGRA_234_VIC_FIRMWARE,
+ .version = 0x23,
+ .supports_sid = true,
+};
+
static const struct of_device_id tegra_vic_of_match[] = {
{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
{ .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
{ .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
+ { .compatible = "nvidia,tegra234-vic", .data = &vic_t234_config },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_vic_of_match);
@@ -409,7 +469,6 @@ static int vic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct host1x_syncpt **syncpts;
- struct resource *regs;
struct vic *vic;
int err;
@@ -430,13 +489,7 @@ static int vic_probe(struct platform_device *pdev)
if (!syncpts)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_err(&pdev->dev, "failed to get registers\n");
- return -ENXIO;
- }
-
- vic->regs = devm_ioremap_resource(dev, regs);
+ vic->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vic->regs))
return PTR_ERR(vic->regs);
@@ -539,3 +592,6 @@ MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_234_VIC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tests/.kunitconfig b/drivers/gpu/drm/tests/.kunitconfig
new file mode 100644
index 000000000000..6ec04b4c979d
--- /dev/null
+++ b/drivers/gpu/drm/tests/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
new file mode 100644
index 000000000000..2c8273796d9d
--- /dev/null
+++ b/drivers/gpu/drm/tests/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_KUNIT_TEST) += drm_format_helper_test.o
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
new file mode 100644
index 000000000000..98583bf56044
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
+#include <drm/drm_rect.h>
+
+#include "../drm_crtc_internal.h"
+
+#define TEST_BUF_SIZE 50
+
+struct xrgb8888_to_rgb332_case {
+ const char *name;
+ unsigned int pitch;
+ unsigned int dst_pitch;
+ struct drm_rect clip;
+ const u32 xrgb8888[TEST_BUF_SIZE];
+ const u8 expected[4 * TEST_BUF_SIZE];
+};
+
+static struct xrgb8888_to_rgb332_case xrgb8888_to_rgb332_cases[] = {
+ {
+ .name = "single_pixel_source_buffer",
+ .pitch = 1 * 4,
+ .dst_pitch = 0,
+ .clip = DRM_RECT_INIT(0, 0, 1, 1),
+ .xrgb8888 = { 0x01FF0000 },
+ .expected = { 0xE0 },
+ },
+ {
+ .name = "single_pixel_clip_rectangle",
+ .pitch = 2 * 4,
+ .dst_pitch = 0,
+ .clip = DRM_RECT_INIT(1, 1, 1, 1),
+ .xrgb8888 = {
+ 0x00000000, 0x00000000,
+ 0x00000000, 0x10FF0000,
+ },
+ .expected = { 0xE0 },
+ },
+ {
+ /* Well known colors: White, black, red, green, blue, magenta,
+ * yellow and cyan. Different values for the X in XRGB8888 to
+ * make sure it is ignored. Partial clip area.
+ */
+ .name = "well_known_colors",
+ .pitch = 4 * 4,
+ .dst_pitch = 0,
+ .clip = DRM_RECT_INIT(1, 1, 2, 4),
+ .xrgb8888 = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x11FFFFFF, 0x22000000, 0x00000000,
+ 0x00000000, 0x33FF0000, 0x4400FF00, 0x00000000,
+ 0x00000000, 0x550000FF, 0x66FF00FF, 0x00000000,
+ 0x00000000, 0x77FFFF00, 0x8800FFFF, 0x00000000,
+ },
+ .expected = {
+ 0xFF, 0x00,
+ 0xE0, 0x1C,
+ 0x03, 0xE3,
+ 0xFC, 0x1F,
+ },
+ },
+ {
+ /* Randomly picked colors. Full buffer within the clip area. */
+ .name = "destination_pitch",
+ .pitch = 3 * 4,
+ .dst_pitch = 5,
+ .clip = DRM_RECT_INIT(0, 0, 3, 3),
+ .xrgb8888 = {
+ 0xA10E449C, 0xB1114D05, 0xC1A80303,
+ 0xD16C7073, 0xA20E449C, 0xB2114D05,
+ 0xC2A80303, 0xD26C7073, 0xA30E449C,
+ },
+ .expected = {
+ 0x0A, 0x08, 0xA0, 0x00, 0x00,
+ 0x6D, 0x0A, 0x08, 0x00, 0x00,
+ 0xA0, 0x6D, 0x0A, 0x00, 0x00,
+ },
+ },
+};
+
+/*
+ * conversion_buf_size - Return the destination buffer size required to convert
+ * between formats.
+ * @dst_format: destination buffer pixel format (DRM_FORMAT_*)
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
+ * @clip: Clip rectangle area to convert
+ *
+ * Returns:
+ * The size of the destination buffer or negative value on error.
+ */
+static size_t conversion_buf_size(u32 dst_format, unsigned int dst_pitch,
+ const struct drm_rect *clip)
+{
+ const struct drm_format_info *dst_fi = drm_format_info(dst_format);
+
+ if (!dst_fi)
+ return -EINVAL;
+
+ if (!dst_pitch)
+ dst_pitch = drm_rect_width(clip) * dst_fi->cpp[0];
+
+ return dst_pitch * drm_rect_height(clip);
+}
+
+static void xrgb8888_to_rgb332_case_desc(struct xrgb8888_to_rgb332_case *t,
+ char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(xrgb8888_to_rgb332, xrgb8888_to_rgb332_cases,
+ xrgb8888_to_rgb332_case_desc);
+
+static void xrgb8888_to_rgb332_test(struct kunit *test)
+{
+ const struct xrgb8888_to_rgb332_case *params = test->param_value;
+ size_t dst_size;
+ __u8 *dst = NULL;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB332, params->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ dst = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dst);
+
+ drm_fb_xrgb8888_to_rgb332(dst, params->dst_pitch, params->xrgb8888,
+ &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(dst, params->expected, dst_size), 0);
+}
+
+static struct kunit_case drm_format_helper_test_cases[] = {
+ KUNIT_CASE_PARAM(xrgb8888_to_rgb332_test,
+ xrgb8888_to_rgb332_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_format_helper_test_suite = {
+ .name = "drm_format_helper_test",
+ .test_cases = drm_format_helper_test_cases,
+};
+
+kunit_test_suite(drm_format_helper_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for the drm_format_helper APIs");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index dae47853b728..dd3c6a606ae2 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
@@ -21,8 +22,10 @@
#include <linux/regmap.h>
#include <linux/sys_soc.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 217415ec8eea..68a85a94ffcb 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -6,9 +6,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_atomic_helper.h>
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 0dae7d5806bb..509fbae8c9a6 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -14,6 +14,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 42357808eaf2..2729e16bc053 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 74a5c8832229..960136518814 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -8,6 +8,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include "tilcdc_drv.h"
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 627d637a1e7e..027cd87c3d0d 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -69,6 +69,7 @@ config DRM_PANEL_MIPI_DBI
config DRM_SIMPLEDRM
tristate "Simple framebuffer driver"
depends on DRM && MMU
+ select APERTURE_HELPERS
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index f0fa3b15c341..7461cb401407 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -10,9 +10,11 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index ed971c8bb446..82364a0a7b18 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -1,12 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index c8e791840862..c4f5beea1f90 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -29,10 +29,12 @@
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 648e585d40a8..7441d992a5d7 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -11,10 +11,12 @@
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index cc92eb9f2a07..8d686eecd5f4 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -22,6 +22,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index a096fb8b83e9..013790c45d0a 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -28,6 +28,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 3f38faa1cd8c..8eddb020c43e 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 29d618093e94..e0f02d367d88 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -174,6 +174,7 @@ MODULE_DEVICE_TABLE(of, st7735r_of_match);
static const struct spi_device_id st7735r_id[] = {
{ "jd-t18003-t01", (uintptr_t)&jd_t18003_t01_cfg },
+ { "rh128128t", (uintptr_t)&rh128128t_cfg },
{ },
};
MODULE_DEVICE_TABLE(spi, st7735r_id);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 75d308ec173d..0e210df65c30 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -44,12 +44,6 @@
#include "ttm_module.h"
-/* default destructor */
-static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
-{
- kfree(bo);
-}
-
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
@@ -109,11 +103,11 @@ void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
return;
spin_lock(&bo->bdev->lru_lock);
- if (bo->bulk_move && bo->resource)
- ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
+ if (bo->resource)
+ ttm_resource_del_bulk_move(bo->resource, bo);
bo->bulk_move = bulk;
- if (bo->bulk_move && bo->resource)
- ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
+ if (bo->resource)
+ ttm_resource_add_bulk_move(bo->resource, bo);
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_set_bulk_move);
@@ -689,8 +683,11 @@ void ttm_bo_pin(struct ttm_buffer_object *bo)
{
dma_resv_assert_held(bo->base.resv);
WARN_ON_ONCE(!kref_read(&bo->kref));
- if (!(bo->pin_count++) && bo->bulk_move && bo->resource)
- ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
+ spin_lock(&bo->bdev->lru_lock);
+ if (bo->resource)
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ ++bo->pin_count;
+ spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_pin);
@@ -707,8 +704,11 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
if (WARN_ON_ONCE(!bo->pin_count))
return;
- if (!(--bo->pin_count) && bo->bulk_move && bo->resource)
- ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
+ spin_lock(&bo->bdev->lru_lock);
+ --bo->pin_count;
+ if (bo->resource)
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unpin);
@@ -936,8 +936,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
bool locked;
int ret;
- bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
-
+ bo->destroy = destroy;
kref_init(&bo->kref);
INIT_LIST_HEAD(&bo->ddestroy);
bo->bdev = bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 5b324f245265..38119311284d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -102,7 +102,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
* @bo: The buffer object
* @vmf: The fault structure handed to the callback
*
- * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
+ * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped
* during long waits, and after the wait the callback will be restarted. This
* is to allow other threads using the same virtual memory space concurrent
* access to map(), unmap() completely unrelated buffer objects. TTM buffer
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index a0562ab386f5..e7147e304637 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -156,8 +156,12 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
ttm_resource_manager_for_each_res(man, &cursor, res) {
struct ttm_buffer_object *bo = res->bo;
- uint32_t num_pages = PFN_UP(bo->base.size);
+ uint32_t num_pages;
+ if (!bo)
+ continue;
+
+ num_pages = PFN_UP(bo->base.size);
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
/* ttm_bo_swapout has dropped the lru_lock */
if (!ret)
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index a3ad7c9736ec..b3fffe7b5062 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -74,7 +74,7 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
#endif /* CONFIG_UML */
#endif /* __i386__ || __x86_64__ */
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc__) || defined(__mips__)
+ defined(__powerpc__) || defined(__mips__) || defined(__loongarch__)
if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 65889b3caf50..20f9adcc3235 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -91,8 +91,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
}
/* Add the resource to a bulk_move cursor */
-void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
- struct ttm_resource *res)
+static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res)
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
@@ -105,8 +105,8 @@ void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
}
/* Remove the resource from a bulk_move range */
-void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
- struct ttm_resource *res)
+static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res)
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
@@ -122,6 +122,22 @@ void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
}
}
+/* Add the resource to a bulk move if the BO is configured for it */
+void ttm_resource_add_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo)
+{
+ if (bo->bulk_move && !bo->pin_count)
+ ttm_lru_bulk_move_add(bo->bulk_move, res);
+}
+
+/* Remove the resource from a bulk move if the BO is configured for it */
+void ttm_resource_del_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo)
+{
+ if (bo->bulk_move && !bo->pin_count)
+ ttm_lru_bulk_move_del(bo->bulk_move, res);
+}
+
/* Move a resource to the LRU or bulk tail */
void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
{
@@ -169,15 +185,14 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
res->bo = bo;
- INIT_LIST_HEAD(&res->lru);
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
- man->usage += res->num_pages << PAGE_SHIFT;
- if (bo->bulk_move)
- ttm_lru_bulk_move_add(bo->bulk_move, res);
+ if (bo->pin_count)
+ list_add_tail(&res->lru, &bo->bdev->pinned);
else
- ttm_resource_move_to_lru_tail(res);
+ list_add_tail(&res->lru, &man->lru[bo->priority]);
+ man->usage += res->num_pages << PAGE_SHIFT;
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_init);
@@ -210,8 +225,16 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type);
+ int ret;
+
+ ret = man->func->alloc(man, bo, place, res_ptr);
+ if (ret)
+ return ret;
- return man->func->alloc(man, bo, place, res_ptr);
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(*res_ptr, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ return 0;
}
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
@@ -221,12 +244,9 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
if (!*res)
return;
- if (bo->bulk_move) {
- spin_lock(&bo->bdev->lru_lock);
- ttm_lru_bulk_move_del(bo->bulk_move, *res);
- spin_unlock(&bo->bdev->lru_lock);
- }
-
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(*res, bo);
+ spin_unlock(&bo->bdev->lru_lock);
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
*res = NULL;
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 17b8c8dd169d..771bad881714 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -17,6 +17,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 930574ad2bca..fade4c7adbf7 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -8,6 +8,7 @@
*/
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_probe_helper.h>
@@ -128,7 +129,7 @@ struct drm_connector *udl_connector_init(struct drm_device *dev)
connector = &udl_connector->connector;
drm_connector_init(dev, connector, &udl_connector_funcs,
- DRM_MODE_CONNECTOR_DVII);
+ DRM_MODE_CONNECTOR_VGA);
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD |
diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig
index e973ec487484..ce62c5908e1d 100644
--- a/drivers/gpu/drm/v3d/Kconfig
+++ b/drivers/gpu/drm/v3d/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_V3D
tristate "Broadcom V3D 3.x and newer"
- depends on ARCH_BCM || ARCH_BRCMSTB || COMPILE_TEST
+ depends on ARCH_BCM || ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST
depends on DRM
depends on COMMON_CLK
depends on MMU
@@ -9,4 +9,5 @@ config DRM_V3D
select DRM_GEM_SHMEM_HELPER
help
Choose this option if you have a system that has a Broadcom
- V3D 3.x or newer GPU, such as BCM7268.
+ V3D 3.x or newer GPUs. SoCs supported include the BCM2711,
+ BCM7268 and BCM7278.
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 29fd13109e43..efbde124c296 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -4,7 +4,6 @@
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
-#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
@@ -131,11 +130,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
u32 ident0, ident1, ident2, ident3, cores;
- int ret, core;
-
- ret = pm_runtime_get_sync(v3d->drm.dev);
- if (ret < 0)
- return ret;
+ int core;
ident0 = V3D_READ(V3D_HUB_IDENT0);
ident1 = V3D_READ(V3D_HUB_IDENT1);
@@ -188,9 +183,6 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
}
- pm_runtime_mark_last_busy(v3d->drm.dev);
- pm_runtime_put_autosuspend(v3d->drm.dev);
-
return 0;
}
@@ -218,11 +210,6 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
uint32_t cycles;
int core = 0;
int measure_ms = 1000;
- int ret;
-
- ret = pm_runtime_get_sync(v3d->drm.dev);
- if (ret < 0)
- return ret;
if (v3d->ver >= 40) {
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
@@ -246,9 +233,6 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
cycles / (measure_ms * 1000),
(cycles / (measure_ms * 100)) % 10);
- pm_runtime_mark_last_busy(v3d->drm.dev);
- pm_runtime_put_autosuspend(v3d->drm.dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 1afcd54fbbd5..8c7f910daa28 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <drm/drm_drv.h>
@@ -43,7 +42,6 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct drm_v3d_get_param *args = data;
- int ret;
static const u32 reg_map[] = {
[DRM_V3D_PARAM_V3D_UIFCFG] = V3D_HUB_UIFCFG,
[DRM_V3D_PARAM_V3D_HUB_IDENT1] = V3D_HUB_IDENT1,
@@ -69,17 +67,12 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
if (args->value != 0)
return -EINVAL;
- ret = pm_runtime_get_sync(v3d->drm.dev);
- if (ret < 0)
- return ret;
if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) {
args->value = V3D_CORE_READ(0, offset);
} else {
args->value = V3D_READ(offset);
}
- pm_runtime_mark_last_busy(v3d->drm.dev);
- pm_runtime_put_autosuspend(v3d->drm.dev);
return 0;
}
@@ -198,6 +191,7 @@ static const struct drm_driver v3d_drm_driver = {
};
static const struct of_device_id v3d_of_match[] = {
+ { .compatible = "brcm,2711-v3d" },
{ .compatible = "brcm,7268-v3d" },
{ .compatible = "brcm,7278-v3d" },
{},
@@ -280,10 +274,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
return -ENOMEM;
}
- pm_runtime_use_autosuspend(dev);
- pm_runtime_set_autosuspend_delay(dev, 50);
- pm_runtime_enable(dev);
-
ret = v3d_gem_init(drm);
if (ret)
goto dma_free;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 2352e9640922..725a252e837b 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -6,7 +6,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
@@ -372,9 +371,6 @@ v3d_job_free(struct kref *ref)
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
- pm_runtime_mark_last_busy(job->v3d->drm.dev);
- pm_runtime_put_autosuspend(job->v3d->drm.dev);
-
if (job->perfmon)
v3d_perfmon_put(job->perfmon);
@@ -476,14 +472,10 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->v3d = v3d;
job->free = free;
- ret = pm_runtime_get_sync(v3d->drm.dev);
- if (ret < 0)
- goto fail;
-
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
v3d_priv);
if (ret)
- goto fail_job;
+ goto fail;
if (has_multisync) {
if (se->in_sync_count && se->wait_stage == queue) {
@@ -514,8 +506,6 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
fail_deps:
drm_sched_job_cleanup(&job->base);
-fail_job:
- pm_runtime_put_autosuspend(v3d->drm.dev);
fail:
kfree(*container);
*container = NULL;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 4017b0a621fc..fa0d73ce07bc 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -15,8 +15,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 49c0f2ac868b..0846d56f74f2 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -18,6 +18,8 @@
#include <linux/dma-buf.h>
+#include <drm/drm_fourcc.h>
+
#include "vc4_drv.h"
#include "uapi/drm/vc4_drm.h"
@@ -248,6 +250,9 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
{
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
mutex_lock(&vc4->purgeable.lock);
list_add_tail(&bo->size_head, &vc4->purgeable.list);
vc4->purgeable.num++;
@@ -259,6 +264,9 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
{
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
/* list_del_init() is used here because the caller might release
* the purgeable lock in order to acquire the madv one and update the
* madv status.
@@ -387,6 +395,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return ERR_PTR(-ENODEV);
+
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
@@ -413,6 +424,9 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
struct drm_gem_cma_object *cma_obj;
struct vc4_bo *bo;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return ERR_PTR(-ENODEV);
+
if (size == 0)
return ERR_PTR(-EINVAL);
@@ -471,19 +485,20 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
return bo;
}
-int vc4_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
+int vc4_bo_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
{
- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo = NULL;
int ret;
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
- if (args->size < args->pitch * args->height)
- args->size = args->pitch * args->height;
+ ret = vc4_dumb_fixup_args(args);
+ if (ret)
+ return ret;
bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
if (IS_ERR(bo))
@@ -601,8 +616,12 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
int vc4_bo_inc_usecnt(struct vc4_bo *bo)
{
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
/* Fast path: if the BO is already retained by someone, no need to
* check the madv status.
*/
@@ -637,6 +656,11 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
void vc4_bo_dec_usecnt(struct vc4_bo *bo)
{
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
/* Fast path: if the BO is still retained by someone, no need to test
* the madv value.
*/
@@ -756,6 +780,9 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct vc4_bo *bo = NULL;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
ret = vc4_grab_bin_bo(vc4, vc4file);
if (ret)
return ret;
@@ -779,9 +806,13 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
@@ -805,6 +836,9 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
struct vc4_bo *bo = NULL;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (args->size == 0)
return -EINVAL;
@@ -875,11 +909,15 @@ fail:
int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_set_tiling *args = data;
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
bool t_format;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (args->flags != 0)
return -EINVAL;
@@ -918,10 +956,14 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_get_tiling *args = data;
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (args->flags != 0 || args->modifier != 0)
return -EINVAL;
@@ -948,6 +990,9 @@ int vc4_bo_cache_init(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
int i;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
/* Create the initial set of BO labels that the kernel will
* use. This lets us avoid a bunch of string reallocation in
* the kernel's draw and BO allocation paths.
@@ -1007,6 +1052,9 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gem_obj;
int ret = 0, label;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!args->len)
return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 59b20c8f132b..029be98660b3 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -38,6 +38,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -256,7 +257,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
* Removing 1 from the FIFO full level however
* seems to completely remove that issue.
*/
- if (!vc4->hvs->hvs5)
+ if (!vc4->is_vc5)
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
@@ -316,10 +317,13 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
struct drm_crtc_state *crtc_state = crtc->state;
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
- u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
+ bool is_hdmi = vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0 ||
+ vc4_encoder->type == VC4_ENCODER_TYPE_HDMI1;
+ u32 pixel_rep = ((mode->flags & DRM_MODE_FLAG_DBLCLK) && !is_hdmi) ? 2 : 1;
bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
- u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
+ bool is_dsi1 = vc4_encoder->type == VC4_ENCODER_TYPE_DSI1;
+ u32 format = is_dsi1 ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
u8 ppc = pv_data->pixels_per_clock;
bool debug_dump_regs = false;
@@ -345,7 +349,8 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
PV_HORZB_HACTIVE));
CRTC_WRITE(PV_VERTA,
- VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
+ VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
+ interlace,
PV_VERTA_VBP) |
VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
PV_VERTA_VSYNC));
@@ -357,7 +362,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
if (interlace) {
CRTC_WRITE(PV_VERTA_EVEN,
VC4_SET_FIELD(mode->crtc_vtotal -
- mode->crtc_vsync_end - 1,
+ mode->crtc_vsync_end,
PV_VERTA_VBP) |
VC4_SET_FIELD(mode->crtc_vsync_end -
mode->crtc_vsync_start,
@@ -377,7 +382,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
PV_VCONTROL_CONTINUOUS |
(is_dsi ? PV_VCONTROL_DSI : 0) |
PV_VCONTROL_INTERLACE |
- VC4_SET_FIELD(mode->htotal * pixel_rep / 2,
+ VC4_SET_FIELD(mode->htotal * pixel_rep / (2 * ppc),
PV_VCONTROL_ODD_DELAY));
CRTC_WRITE(PV_VSYNCD_EVEN, 0);
} else {
@@ -389,7 +394,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
- if (vc4->hvs->hvs5)
+ if (vc4->is_vc5)
CRTC_WRITE(PV_MUX_CFG,
VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
@@ -775,17 +780,18 @@ struct vc4_async_flip_state {
struct drm_framebuffer *old_fb;
struct drm_pending_vblank_event *event;
- struct vc4_seqno_cb cb;
+ union {
+ struct dma_fence_cb fence;
+ struct vc4_seqno_cb seqno;
+ } cb;
};
/* Called when the V3D execution for the BO being flipped to is done, so that
* we can actually update the plane's address to point to it.
*/
static void
-vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
+vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state)
{
- struct vc4_async_flip_state *flip_state =
- container_of(cb, struct vc4_async_flip_state, cb);
struct drm_crtc *crtc = flip_state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_plane *plane = crtc->primary;
@@ -802,59 +808,96 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
drm_crtc_vblank_put(crtc);
drm_framebuffer_put(flip_state->fb);
- /* Decrement the BO usecnt in order to keep the inc/dec calls balanced
- * when the planes are updated through the async update path.
- * FIXME: we should move to generic async-page-flip when it's
- * available, so that we can get rid of this hand-made cleanup_fb()
- * logic.
- */
- if (flip_state->old_fb) {
- struct drm_gem_cma_object *cma_bo;
- struct vc4_bo *bo;
+ if (flip_state->old_fb)
+ drm_framebuffer_put(flip_state->old_fb);
+
+ kfree(flip_state);
+}
+
+static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
+{
+ struct vc4_async_flip_state *flip_state =
+ container_of(cb, struct vc4_async_flip_state, cb.seqno);
+ struct vc4_bo *bo = NULL;
- cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
+ if (flip_state->old_fb) {
+ struct drm_gem_cma_object *cma_bo =
+ drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
bo = to_vc4_bo(&cma_bo->base);
- vc4_bo_dec_usecnt(bo);
- drm_framebuffer_put(flip_state->old_fb);
}
- kfree(flip_state);
+ vc4_async_page_flip_complete(flip_state);
+
+ /*
+ * Decrement the BO usecnt in order to keep the inc/dec
+ * calls balanced when the planes are updated through
+ * the async update path.
+ *
+ * FIXME: we should move to generic async-page-flip when
+ * it's available, so that we can get rid of this
+ * hand-made cleanup_fb() logic.
+ */
+ if (bo)
+ vc4_bo_dec_usecnt(bo);
}
-/* Implements async (non-vblank-synced) page flips.
- *
- * The page flip ioctl needs to return immediately, so we grab the
- * modeset semaphore on the pipe, and queue the address update for
- * when V3D is done with the BO being flipped to.
- */
-static int vc4_async_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags)
+static void vc4_async_page_flip_fence_complete(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
{
- struct drm_device *dev = crtc->dev;
- struct drm_plane *plane = crtc->primary;
- int ret = 0;
- struct vc4_async_flip_state *flip_state;
+ struct vc4_async_flip_state *flip_state =
+ container_of(cb, struct vc4_async_flip_state, cb.fence);
+
+ vc4_async_page_flip_complete(flip_state);
+ dma_fence_put(fence);
+}
+
+static int vc4_async_set_fence_cb(struct drm_device *dev,
+ struct vc4_async_flip_state *flip_state)
+{
+ struct drm_framebuffer *fb = flip_state->fb;
struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct dma_fence *fence;
+ int ret;
- /* Increment the BO usecnt here, so that we never end up with an
- * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
- * plane is later updated through the non-async path.
- * FIXME: we should move to generic async-page-flip when it's
- * available, so that we can get rid of this hand-made prepare_fb()
- * logic.
- */
- ret = vc4_bo_inc_usecnt(bo);
+ if (!vc4->is_vc5) {
+ struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+
+ return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
+ vc4_async_page_flip_seqno_complete);
+ }
+
+ ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
return ret;
+ /* If there's no fence, complete the page flip immediately */
+ if (!fence) {
+ vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
+ return 0;
+ }
+
+ /* If the fence has already been completed, complete the page flip */
+ if (dma_fence_add_callback(fence, &flip_state->cb.fence,
+ vc4_async_page_flip_fence_complete))
+ vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
+
+ return 0;
+}
+
+static int
+vc4_async_page_flip_common(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_plane *plane = crtc->primary;
+ struct vc4_async_flip_state *flip_state;
+
flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
- if (!flip_state) {
- vc4_bo_dec_usecnt(bo);
+ if (!flip_state)
return -ENOMEM;
- }
drm_framebuffer_get(fb);
flip_state->fb = fb;
@@ -881,23 +924,79 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
*/
drm_atomic_set_fb_for_plane(plane->state, fb);
- vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
- vc4_async_page_flip_complete);
+ vc4_async_set_fence_cb(dev, flip_state);
/* Driver takes ownership of state on successful async commit. */
return 0;
}
+/* Implements async (non-vblank-synced) page flips.
+ *
+ * The page flip ioctl needs to return immediately, so we grab the
+ * modeset semaphore on the pipe, and queue the address update for
+ * when V3D is done with the BO being flipped to.
+ */
+static int vc4_async_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ int ret;
+
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
+ /*
+ * Increment the BO usecnt here, so that we never end up with an
+ * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
+ * plane is later updated through the non-async path.
+ *
+ * FIXME: we should move to generic async-page-flip when
+ * it's available, so that we can get rid of this
+ * hand-made prepare_fb() logic.
+ */
+ ret = vc4_bo_inc_usecnt(bo);
+ if (ret)
+ return ret;
+
+ ret = vc4_async_page_flip_common(crtc, fb, event, flags);
+ if (ret) {
+ vc4_bo_dec_usecnt(bo);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vc5_async_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ return vc4_async_page_flip_common(crtc, fb, event, flags);
+}
+
int vc4_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags,
struct drm_modeset_acquire_ctx *ctx)
{
- if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
- return vc4_async_page_flip(crtc, fb, event, flags);
- else
+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC) {
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ if (vc4->is_vc5)
+ return vc5_async_page_flip(crtc, fb, event, flags);
+ else
+ return vc4_async_page_flip(crtc, fb, event, flags);
+ } else {
return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx);
+ }
}
struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
@@ -1149,7 +1248,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
crtc_funcs, NULL);
drm_crtc_helper_add(crtc, crtc_helper_funcs);
- if (!vc4->hvs->hvs5) {
+ if (!vc4->is_vc5) {
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index c180eb60bee8..ef5e3921062c 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -20,6 +20,7 @@
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include "vc4_drv.h"
@@ -131,7 +132,7 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
struct vc4_dpi *dpi = vc4_encoder->dpi;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector = NULL, *connector_scan;
- u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE;
+ u32 dpi_c = DPI_ENABLE;
int ret;
/* Look up the connector attached to DPI so we can get the
@@ -148,49 +149,68 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
}
drm_connector_list_iter_end(&conn_iter);
- if (connector && connector->display_info.num_bus_formats) {
- u32 bus_format = connector->display_info.bus_formats[0];
-
- switch (bus_format) {
- case MEDIA_BUS_FMT_RGB888_1X24:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
- DPI_FORMAT);
- break;
- case MEDIA_BUS_FMT_BGR888_1X24:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
- DPI_FORMAT);
- dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
- break;
- case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
- DPI_FORMAT);
- break;
- case MEDIA_BUS_FMT_RGB666_1X18:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1,
- DPI_FORMAT);
- break;
- case MEDIA_BUS_FMT_RGB565_1X16:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3,
- DPI_FORMAT);
- break;
- default:
- DRM_ERROR("Unknown media bus format %d\n", bus_format);
- break;
+ /* Default to 24bit if no connector or format found. */
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT);
+
+ if (connector) {
+ if (connector->display_info.num_bus_formats) {
+ u32 bus_format = connector->display_info.bus_formats[0];
+
+ dpi_c &= ~DPI_FORMAT_MASK;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
+ DPI_FORMAT);
+ dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR,
+ DPI_ORDER);
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3,
+ DPI_FORMAT);
+ break;
+ default:
+ DRM_ERROR("Unknown media bus format %d\n",
+ bus_format);
+ break;
+ }
}
- } else {
- /* Default to 24bit if no connector found. */
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT);
+
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ dpi_c |= DPI_PIXEL_CLK_INVERT;
+
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ dpi_c |= DPI_OUTPUT_ENABLE_INVERT;
}
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- dpi_c |= DPI_HSYNC_INVERT;
- else if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
- dpi_c |= DPI_HSYNC_DISABLE;
+ if (mode->flags & DRM_MODE_FLAG_CSYNC) {
+ if (mode->flags & DRM_MODE_FLAG_NCSYNC)
+ dpi_c |= DPI_OUTPUT_ENABLE_INVERT;
+ } else {
+ dpi_c |= DPI_OUTPUT_ENABLE_MODE;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ dpi_c |= DPI_HSYNC_INVERT;
+ else if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
+ dpi_c |= DPI_HSYNC_DISABLE;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- dpi_c |= DPI_VSYNC_INVERT;
- else if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
- dpi_c |= DPI_VSYNC_DISABLE;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ dpi_c |= DPI_VSYNC_INVERT;
+ else if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
+ dpi_c |= DPI_VSYNC_DISABLE;
+ }
DPI_WRITE(DPI_C, dpi_c);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 162bc18e7497..292d1b6a01b6 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -63,6 +63,32 @@ void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index)
return map;
}
+int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args)
+{
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ return 0;
+}
+
+static int vc5_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+
+ ret = vc4_dumb_fixup_args(args);
+ if (ret)
+ return ret;
+
+ return drm_gem_cma_dumb_create_internal(file_priv, dev, args);
+}
+
static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -73,6 +99,9 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d)
return -ENODEV;
@@ -116,11 +145,16 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
static int vc4_open(struct drm_device *dev, struct drm_file *file)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
if (!vc4file)
return -ENOMEM;
+ vc4file->dev = vc4;
vc4_perfmon_open_file(vc4file);
file->driver_priv = vc4file;
@@ -132,6 +166,9 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file->driver_priv;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
if (vc4file->bin_bo_used)
vc4_v3d_bin_bo_put(vc4);
@@ -160,7 +197,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
};
-static struct drm_driver vc4_drm_driver = {
+static const struct drm_driver vc4_drm_driver = {
.driver_features = (DRIVER_MODESET |
DRIVER_ATOMIC |
DRIVER_GEM |
@@ -175,7 +212,7 @@ static struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create),
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
@@ -189,6 +226,27 @@ static struct drm_driver vc4_drm_driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static const struct drm_driver vc5_drm_driver = {
+ .driver_features = (DRIVER_MODESET |
+ DRIVER_ATOMIC |
+ DRIVER_GEM),
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = vc4_debugfs_init,
+#endif
+
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
+
+ .fops = &vc4_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
static void vc4_match_add_drivers(struct device *dev,
struct component_match **match,
struct platform_driver *const *drivers,
@@ -209,45 +267,71 @@ static void vc4_match_add_drivers(struct device *dev,
}
}
+static const struct of_device_id vc4_dma_range_matches[] = {
+ { .compatible = "brcm,bcm2711-hvs" },
+ { .compatible = "brcm,bcm2835-hvs" },
+ { .compatible = "brcm,bcm2835-v3d" },
+ { .compatible = "brcm,cygnus-v3d" },
+ { .compatible = "brcm,vc4-v3d" },
+ {}
+};
+
static int vc4_drm_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
+ const struct drm_driver *driver;
struct rpi_firmware *firmware = NULL;
struct drm_device *drm;
struct vc4_dev *vc4;
struct device_node *node;
struct drm_crtc *crtc;
+ bool is_vc5;
int ret = 0;
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- /* If VC4 V3D is missing, don't advertise render nodes. */
- node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
- if (!node || !of_device_is_available(node))
- vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
- of_node_put(node);
+ is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5");
+ if (is_vc5)
+ driver = &vc5_drm_driver;
+ else
+ driver = &vc4_drm_driver;
+
+ node = of_find_matching_node_and_match(NULL, vc4_dma_range_matches,
+ NULL);
+ if (node) {
+ ret = of_dma_configure(dev, node, true);
+ of_node_put(node);
- vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
+ if (ret)
+ return ret;
+ }
+
+ vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base);
if (IS_ERR(vc4))
return PTR_ERR(vc4);
+ vc4->is_vc5 = is_vc5;
drm = &vc4->base;
platform_set_drvdata(pdev, drm);
INIT_LIST_HEAD(&vc4->debugfs_list);
- mutex_init(&vc4->bin_bo_lock);
+ if (!is_vc5) {
+ mutex_init(&vc4->bin_bo_lock);
- ret = vc4_bo_cache_init(drm);
- if (ret)
- return ret;
+ ret = vc4_bo_cache_init(drm);
+ if (ret)
+ return ret;
+ }
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
- ret = vc4_gem_init(drm);
- if (ret)
- return ret;
+ if (!is_vc5) {
+ ret = vc4_gem_init(drm);
+ if (ret)
+ return ret;
+ }
node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
if (node) {
@@ -258,7 +342,7 @@ static int vc4_drm_bind(struct device *dev)
return -EPROBE_DEFER;
}
- ret = drm_aperture_remove_framebuffers(false, &vc4_drm_driver);
+ ret = drm_aperture_remove_framebuffers(false, driver);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 15e0c2ac3940..1beb96b77b8c 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -6,6 +6,7 @@
#define _VC4_DRV_H_
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/refcount.h>
#include <linux/uaccess.h>
@@ -48,6 +49,8 @@ enum vc4_kernel_bo_type {
* done. This way, only events related to a specific job will be counted.
*/
struct vc4_perfmon {
+ struct vc4_dev *dev;
+
/* Tracks the number of users of the perfmon, when this counter reaches
* zero the perfmon is destroyed.
*/
@@ -74,6 +77,8 @@ struct vc4_perfmon {
struct vc4_dev {
struct drm_device base;
+ bool is_vc5;
+
unsigned int irq;
struct vc4_hvs *hvs;
@@ -316,6 +321,7 @@ struct vc4_v3d {
};
struct vc4_hvs {
+ struct vc4_dev *vc4;
struct platform_device *pdev;
void __iomem *regs;
u32 __iomem *dlist;
@@ -333,9 +339,6 @@ struct vc4_hvs {
struct drm_mm_node mitchell_netravali_filter;
struct debugfs_regset32 regset;
-
- /* HVS version 5 flag, therefore requires updated dlist structures */
- bool hvs5;
};
struct vc4_plane {
@@ -580,6 +583,8 @@ to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
#define VC4_REG32(reg) { .name = #reg, .offset = reg }
struct vc4_exec_info {
+ struct vc4_dev *dev;
+
/* Sequence number for this bin/render job. */
uint64_t seqno;
@@ -701,6 +706,8 @@ struct vc4_exec_info {
* released when the DRM file is closed should be placed here.
*/
struct vc4_file {
+ struct vc4_dev *dev;
+
struct {
struct idr idr;
struct mutex lock;
@@ -814,9 +821,9 @@ struct vc4_validated_shader_info {
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
bool from_cache, enum vc4_kernel_bo_type type);
-int vc4_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
+int vc4_bo_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
@@ -885,6 +892,7 @@ static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
/* vc4_drv.c */
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
+int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args);
/* vc4_dpi.c */
extern struct platform_driver vc4_dpi_driver;
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 98308a17e4ed..b7b2c76770dc 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -181,8 +181,50 @@
#define DSI0_TXPKT_PIX_FIFO 0x20 /* AKA PIX_FIFO */
-#define DSI0_INT_STAT 0x24
-#define DSI0_INT_EN 0x28
+#define DSI0_INT_STAT 0x24
+#define DSI0_INT_EN 0x28
+# define DSI0_INT_FIFO_ERR BIT(25)
+# define DSI0_INT_CMDC_DONE_MASK VC4_MASK(24, 23)
+# define DSI0_INT_CMDC_DONE_SHIFT 23
+# define DSI0_INT_CMDC_DONE_NO_REPEAT 1
+# define DSI0_INT_CMDC_DONE_REPEAT 3
+# define DSI0_INT_PHY_DIR_RTF BIT(22)
+# define DSI0_INT_PHY_D1_ULPS BIT(21)
+# define DSI0_INT_PHY_D1_STOP BIT(20)
+# define DSI0_INT_PHY_RXLPDT BIT(19)
+# define DSI0_INT_PHY_RXTRIG BIT(18)
+# define DSI0_INT_PHY_D0_ULPS BIT(17)
+# define DSI0_INT_PHY_D0_LPDT BIT(16)
+# define DSI0_INT_PHY_D0_FTR BIT(15)
+# define DSI0_INT_PHY_D0_STOP BIT(14)
+/* Signaled when the clock lane enters the given state. */
+# define DSI0_INT_PHY_CLK_ULPS BIT(13)
+# define DSI0_INT_PHY_CLK_HS BIT(12)
+# define DSI0_INT_PHY_CLK_FTR BIT(11)
+/* Signaled on timeouts */
+# define DSI0_INT_PR_TO BIT(10)
+# define DSI0_INT_TA_TO BIT(9)
+# define DSI0_INT_LPRX_TO BIT(8)
+# define DSI0_INT_HSTX_TO BIT(7)
+/* Contention on a line when trying to drive the line low */
+# define DSI0_INT_ERR_CONT_LP1 BIT(6)
+# define DSI0_INT_ERR_CONT_LP0 BIT(5)
+/* Control error: incorrect line state sequence on data lane 0. */
+# define DSI0_INT_ERR_CONTROL BIT(4)
+# define DSI0_INT_ERR_SYNC_ESC BIT(3)
+# define DSI0_INT_RX2_PKT BIT(2)
+# define DSI0_INT_RX1_PKT BIT(1)
+# define DSI0_INT_CMD_PKT BIT(0)
+
+#define DSI0_INTERRUPTS_ALWAYS_ENABLED (DSI0_INT_ERR_SYNC_ESC | \
+ DSI0_INT_ERR_CONTROL | \
+ DSI0_INT_ERR_CONT_LP0 | \
+ DSI0_INT_ERR_CONT_LP1 | \
+ DSI0_INT_HSTX_TO | \
+ DSI0_INT_LPRX_TO | \
+ DSI0_INT_TA_TO | \
+ DSI0_INT_PR_TO)
+
# define DSI1_INT_PHY_D3_ULPS BIT(30)
# define DSI1_INT_PHY_D3_STOP BIT(29)
# define DSI1_INT_PHY_D2_ULPS BIT(28)
@@ -761,6 +803,9 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
if (iter->funcs->disable)
iter->funcs->disable(iter);
+
+ if (iter == dsi->bridge)
+ break;
}
vc4_dsi_ulps(dsi, true);
@@ -805,11 +850,9 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
/* Find what divider gets us a faster clock than the requested
* pixel clock.
*/
- for (divider = 1; divider < 8; divider++) {
- if (parent_rate / divider < pll_clock) {
- divider--;
+ for (divider = 1; divider < 255; divider++) {
+ if (parent_rate / (divider + 1) < pll_clock)
break;
- }
}
/* Now that we've picked a PLL divider, calculate back to its
@@ -894,6 +937,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_PORT_WRITE(PHY_AFEC0, afec0);
+ /* AFEC reset hold time */
+ mdelay(1);
+
DSI_PORT_WRITE(PHY_AFEC1,
VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE1) |
VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE0) |
@@ -1060,12 +1106,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN);
/* Bring AFE out of reset. */
- if (dsi->variant->port == 0) {
- } else {
- DSI_PORT_WRITE(PHY_AFEC0,
- DSI_PORT_READ(PHY_AFEC0) &
- ~DSI1_PHY_AFEC0_RESET);
- }
+ DSI_PORT_WRITE(PHY_AFEC0,
+ DSI_PORT_READ(PHY_AFEC0) &
+ ~DSI_PORT_BIT(PHY_AFEC0_RESET));
vc4_dsi_ulps(dsi, false);
@@ -1184,13 +1227,28 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
/* Enable the appropriate interrupt for the transfer completion. */
dsi->xfer_result = 0;
reinit_completion(&dsi->xfer_completion);
- DSI_PORT_WRITE(INT_STAT, DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF);
- if (msg->rx_len) {
- DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
- DSI1_INT_PHY_DIR_RTF));
+ if (dsi->variant->port == 0) {
+ DSI_PORT_WRITE(INT_STAT,
+ DSI0_INT_CMDC_DONE_MASK | DSI1_INT_PHY_DIR_RTF);
+ if (msg->rx_len) {
+ DSI_PORT_WRITE(INT_EN, (DSI0_INTERRUPTS_ALWAYS_ENABLED |
+ DSI0_INT_PHY_DIR_RTF));
+ } else {
+ DSI_PORT_WRITE(INT_EN,
+ (DSI0_INTERRUPTS_ALWAYS_ENABLED |
+ VC4_SET_FIELD(DSI0_INT_CMDC_DONE_NO_REPEAT,
+ DSI0_INT_CMDC_DONE)));
+ }
} else {
- DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
- DSI1_INT_TXPKT1_DONE));
+ DSI_PORT_WRITE(INT_STAT,
+ DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF);
+ if (msg->rx_len) {
+ DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
+ DSI1_INT_PHY_DIR_RTF));
+ } else {
+ DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
+ DSI1_INT_TXPKT1_DONE));
+ }
}
/* Send the packet. */
@@ -1207,7 +1265,7 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
ret = dsi->xfer_result;
}
- DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED);
+ DSI_PORT_WRITE(INT_EN, DSI_PORT_BIT(INTERRUPTS_ALWAYS_ENABLED));
if (ret)
goto reset_fifo_and_return;
@@ -1253,7 +1311,7 @@ reset_fifo_and_return:
DSI_PORT_BIT(CTRL_RESET_FIFOS));
DSI_PORT_WRITE(TXPKT1C, 0);
- DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED);
+ DSI_PORT_WRITE(INT_EN, DSI_PORT_BIT(INTERRUPTS_ALWAYS_ENABLED));
return ret;
}
@@ -1390,26 +1448,28 @@ static irqreturn_t vc4_dsi_irq_handler(int irq, void *data)
DSI_PORT_WRITE(INT_STAT, stat);
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_ERR_SYNC_ESC, "LPDT sync");
+ DSI_PORT_BIT(INT_ERR_SYNC_ESC), "LPDT sync");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_ERR_CONTROL, "data lane 0 sequence");
+ DSI_PORT_BIT(INT_ERR_CONTROL), "data lane 0 sequence");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_ERR_CONT_LP0, "LP0 contention");
+ DSI_PORT_BIT(INT_ERR_CONT_LP0), "LP0 contention");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_ERR_CONT_LP1, "LP1 contention");
+ DSI_PORT_BIT(INT_ERR_CONT_LP1), "LP1 contention");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_HSTX_TO, "HSTX timeout");
+ DSI_PORT_BIT(INT_HSTX_TO), "HSTX timeout");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_LPRX_TO, "LPRX timeout");
+ DSI_PORT_BIT(INT_LPRX_TO), "LPRX timeout");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_TA_TO, "turnaround timeout");
+ DSI_PORT_BIT(INT_TA_TO), "turnaround timeout");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_PR_TO, "peripheral reset timeout");
+ DSI_PORT_BIT(INT_PR_TO), "peripheral reset timeout");
- if (stat & (DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF)) {
+ if (stat & ((dsi->variant->port ? DSI1_INT_TXPKT1_DONE :
+ DSI0_INT_CMDC_DONE_MASK) |
+ DSI_PORT_BIT(INT_PHY_DIR_RTF))) {
complete(&dsi->xfer_completion);
ret = IRQ_HANDLED;
- } else if (stat & DSI1_INT_HSTX_TO) {
+ } else if (stat & DSI_PORT_BIT(INT_HSTX_TO)) {
complete(&dsi->xfer_completion);
dsi->xfer_result = -ETIMEDOUT;
ret = IRQ_HANDLED;
@@ -1487,13 +1547,29 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
dsi->clk_onecell);
}
+static void vc4_dsi_dma_mem_release(void *ptr)
+{
+ struct vc4_dsi *dsi = ptr;
+ struct device *dev = &dsi->pdev->dev;
+
+ dma_free_coherent(dev, 4, dsi->reg_dma_mem, dsi->reg_dma_paddr);
+ dsi->reg_dma_mem = NULL;
+}
+
+static void vc4_dsi_dma_chan_release(void *ptr)
+{
+ struct vc4_dsi *dsi = ptr;
+
+ dma_release_channel(dsi->reg_dma_chan);
+ dsi->reg_dma_chan = NULL;
+}
+
static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
- dma_cap_mask_t dma_mask;
int ret;
dsi->variant = of_device_get_match_data(dev);
@@ -1504,7 +1580,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return -ENOMEM;
INIT_LIST_HEAD(&dsi->bridge_chain);
- vc4_dsi_encoder->base.type = VC4_ENCODER_TYPE_DSI1;
+ vc4_dsi_encoder->base.type = dsi->variant->port ?
+ VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;
vc4_dsi_encoder->dsi = dsi;
dsi->encoder = &vc4_dsi_encoder->base.base;
@@ -1527,6 +1604,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
* so set up a channel for talking to it.
*/
if (dsi->variant->broken_axi_workaround) {
+ dma_cap_mask_t dma_mask;
+
dsi->reg_dma_mem = dma_alloc_coherent(dev, 4,
&dsi->reg_dma_paddr,
GFP_KERNEL);
@@ -1535,8 +1614,13 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return -ENOMEM;
}
+ ret = devm_add_action_or_reset(dev, vc4_dsi_dma_mem_release, dsi);
+ if (ret)
+ return ret;
+
dma_cap_zero(dma_mask);
dma_cap_set(DMA_MEMCPY, dma_mask);
+
dsi->reg_dma_chan = dma_request_chan_by_mask(&dma_mask);
if (IS_ERR(dsi->reg_dma_chan)) {
ret = PTR_ERR(dsi->reg_dma_chan);
@@ -1546,6 +1630,10 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
+ ret = devm_add_action_or_reset(dev, vc4_dsi_dma_chan_release, dsi);
+ if (ret)
+ return ret;
+
/* Get the physical address of the device's registers. The
* struct resource for the regs gives us the bus address
* instead.
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 9eaf304fc20d..fe10d9c3fff8 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -76,6 +76,9 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
u32 i;
int ret = 0;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d) {
DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
return -ENODEV;
@@ -386,6 +389,9 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
unsigned long timeout_expire;
DEFINE_WAIT(wait);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (vc4->finished_seqno >= seqno)
return 0;
@@ -468,6 +474,9 @@ vc4_submit_next_bin_job(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *exec;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
again:
exec = vc4_first_bin_job(vc4);
if (!exec)
@@ -513,6 +522,9 @@ vc4_submit_next_render_job(struct drm_device *dev)
if (!exec)
return;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
/* A previous RCL may have written to one of our textures, and
* our full cache flush at bin time may have occurred before
* that RCL completed. Flush the texture cache now, but not
@@ -531,6 +543,9 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_dev *vc4 = to_vc4_dev(dev);
bool was_empty = list_empty(&vc4->render_job_list);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
list_move_tail(&exec->head, &vc4->render_job_list);
if (was_empty)
vc4_submit_next_render_job(dev);
@@ -997,6 +1012,9 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
unsigned long irqflags;
struct vc4_seqno_cb *cb, *cb_temp;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
spin_lock_irqsave(&vc4->job_lock, irqflags);
while (!list_empty(&vc4->job_done_list)) {
struct vc4_exec_info *exec =
@@ -1033,6 +1051,9 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
cb->func = func;
INIT_WORK(&cb->work, vc4_seqno_cb_work);
@@ -1083,8 +1104,12 @@ int
vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_wait_seqno *args = data;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
&args->timeout_ns);
}
@@ -1093,11 +1118,15 @@ int
vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
struct drm_vc4_wait_bo *args = data;
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (args->pad != 0)
return -EINVAL;
@@ -1144,6 +1173,9 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
args->shader_rec_size,
args->bo_handle_count);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d) {
DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
return -ENODEV;
@@ -1167,6 +1199,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("malloc failure on exec struct\n");
return -ENOMEM;
}
+ exec->dev = vc4;
ret = vc4_v3d_pm_get(vc4);
if (ret) {
@@ -1276,6 +1309,9 @@ int vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
vc4->dma_fence_context = dma_fence_context_alloc(1);
INIT_LIST_HEAD(&vc4->bin_job_list);
@@ -1321,11 +1357,15 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused)
int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_gem_madvise *args = data;
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
switch (args->madv) {
case VC4_MADV_DONTNEED:
case VC4_MADV_WILLNEED:
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 6aadb65eb640..6ab83296b0e4 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -38,6 +38,7 @@
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
@@ -78,6 +79,11 @@
#define VC5_HDMI_VERTB_VSPO_SHIFT 16
#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16)
+#define VC4_HDMI_MISC_CONTROL_PIXEL_REP_SHIFT 0
+#define VC4_HDMI_MISC_CONTROL_PIXEL_REP_MASK VC4_MASK(3, 0)
+#define VC5_HDMI_MISC_CONTROL_PIXEL_REP_SHIFT 0
+#define VC5_HDMI_MISC_CONTROL_PIXEL_REP_MASK VC4_MASK(3, 0)
+
#define VC5_HDMI_SCRAMBLER_CTL_ENABLE BIT(0)
#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_SHIFT 8
@@ -144,6 +150,12 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
drm_print_regset32(&p, &vc4_hdmi->hdmi_regset);
drm_print_regset32(&p, &vc4_hdmi->hd_regset);
+ drm_print_regset32(&p, &vc4_hdmi->cec_regset);
+ drm_print_regset32(&p, &vc4_hdmi->csc_regset);
+ drm_print_regset32(&p, &vc4_hdmi->dvp_regset);
+ drm_print_regset32(&p, &vc4_hdmi->phy_regset);
+ drm_print_regset32(&p, &vc4_hdmi->ram_regset);
+ drm_print_regset32(&p, &vc4_hdmi->rm_regset);
return 0;
}
@@ -417,6 +429,7 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
+ connector->stereo_allowed = 1;
if (vc4_hdmi->variant->supports_hdr)
drm_connector_attach_hdr_output_metadata_property(connector);
@@ -454,9 +467,11 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
const struct vc4_hdmi_register *ram_packet_start =
&vc4_hdmi->variant->registers[HDMI_RAM_PACKET_START];
u32 packet_reg = ram_packet_start->offset + VC4_HDMI_PACKET_STRIDE * packet_id;
+ u32 packet_reg_next = ram_packet_start->offset +
+ VC4_HDMI_PACKET_STRIDE * (packet_id + 1);
void __iomem *base = __vc4_hdmi_get_field_base(vc4_hdmi,
ram_packet_start->reg);
- uint8_t buffer[VC4_HDMI_PACKET_STRIDE];
+ uint8_t buffer[VC4_HDMI_PACKET_STRIDE] = {};
unsigned long flags;
ssize_t len, i;
int ret;
@@ -492,6 +507,13 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
packet_reg += 4;
}
+ /*
+ * clear remainder of packet ram as it's included in the
+ * infoframe and triggers a checksum error on hdmi analyser
+ */
+ for (; packet_reg < packet_reg_next; packet_reg += 4)
+ writel(0, base + packet_reg);
+
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) | BIT(packet_id));
@@ -583,7 +605,9 @@ static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder)
union hdmi_infoframe frame;
memcpy(&frame.audio, audio, sizeof(*audio));
- vc4_hdmi_write_infoframe(encoder, &frame);
+
+ if (vc4_hdmi->packet_ram_enabled)
+ vc4_hdmi_write_infoframe(encoder, &frame);
}
static void vc4_hdmi_set_hdr_infoframe(struct drm_encoder *encoder)
@@ -723,6 +747,8 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
mutex_lock(&vc4_hdmi->mutex);
+ vc4_hdmi->packet_ram_enabled = false;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0);
@@ -770,15 +796,6 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
mutex_unlock(&vc4_hdmi->mutex);
}
-static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder)
-{
- struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
-
- mutex_lock(&vc4_hdmi->mutex);
- vc4_hdmi->output_enabled = false;
- mutex_unlock(&vc4_hdmi->mutex);
-}
-
static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
@@ -969,14 +986,15 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
VC4_HDMI_VERTA_VFP) |
VC4_SET_FIELD(mode->crtc_vdisplay, VC4_HDMI_VERTA_VAL));
u32 vertb = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
- VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
+ VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
+ interlaced,
VC4_HDMI_VERTB_VBP));
u32 vertb_even = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal -
- mode->crtc_vsync_end -
- interlaced,
+ mode->crtc_vsync_end,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
+ u32 reg;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -1003,6 +1021,11 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
+ reg = HDMI_READ(HDMI_MISC_CONTROL);
+ reg &= ~VC4_HDMI_MISC_CONTROL_PIXEL_REP_MASK;
+ reg |= VC4_SET_FIELD(pixel_rep - 1, VC4_HDMI_MISC_CONTROL_PIXEL_REP);
+ HDMI_WRITE(HDMI_MISC_CONTROL, reg);
+
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
@@ -1021,13 +1044,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
VC5_HDMI_VERTA_VFP) |
VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL));
- u32 vertb = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
+ u32 vertb = (VC4_SET_FIELD(mode->htotal >> (2 - pixel_rep),
+ VC5_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
VC4_HDMI_VERTB_VBP));
u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal -
- mode->crtc_vsync_end -
- interlaced,
+ mode->crtc_vsync_end - interlaced,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
unsigned char gcp;
@@ -1101,6 +1124,11 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
HDMI_WRITE(HDMI_GCP_CONFIG, reg);
+ reg = HDMI_READ(HDMI_MISC_CONTROL);
+ reg &= ~VC5_HDMI_MISC_CONTROL_PIXEL_REP_MASK;
+ reg |= VC4_SET_FIELD(pixel_rep - 1, VC5_HDMI_MISC_CONTROL_PIXEL_REP);
+ HDMI_WRITE(HDMI_MISC_CONTROL, reg);
+
HDMI_WRITE(HDMI_CLOCK_STOP, 0);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
@@ -1329,14 +1357,12 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
WARN_ON(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) &
VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE));
- HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
- HDMI_READ(HDMI_SCHEDULER_CONTROL) |
- VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
VC4_HDMI_RAM_PACKET_ENABLE);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ vc4_hdmi->packet_ram_enabled = true;
vc4_hdmi_set_infoframes(encoder);
}
@@ -1347,15 +1373,6 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
mutex_unlock(&vc4_hdmi->mutex);
}
-static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
-{
- struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
-
- mutex_lock(&vc4_hdmi->mutex);
- vc4_hdmi->output_enabled = true;
- mutex_unlock(&vc4_hdmi->mutex);
-}
-
static void vc4_hdmi_encoder_atomic_mode_set(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -1480,7 +1497,7 @@ vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode,
unsigned int bpc,
enum vc4_hdmi_output_format fmt)
{
- unsigned long long clock = mode->clock * 1000;
+ unsigned long long clock = mode->clock * 1000ULL;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock = clock * 2;
@@ -1596,18 +1613,37 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(conn_state->state, connector);
+ struct vc4_hdmi_connector_state *old_vc4_state =
+ conn_state_to_vc4_hdmi_conn_state(old_conn_state);
struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
- struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long long tmds_char_rate = mode->clock * 1000;
unsigned long long tmds_bit_rate;
int ret;
- if (vc4_hdmi->variant->unsupported_odd_h_timings &&
- !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
- ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
- (mode->hsync_end % 2) || (mode->htotal % 2)))
- return -EINVAL;
+ if (vc4_hdmi->variant->unsupported_odd_h_timings) {
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ /* Only try to fixup DBLCLK modes to get 480i and 576i
+ * working.
+ * A generic solution for all modes with odd horizontal
+ * timing values seems impossible based on trying to
+ * solve it for 1366x768 monitors.
+ */
+ if ((mode->hsync_start - mode->hdisplay) & 1)
+ mode->hsync_start--;
+ if ((mode->hsync_end - mode->hsync_start) & 1)
+ mode->hsync_end--;
+ }
+
+ /* Now check whether we still have odd values remaining */
+ if ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
+ (mode->hsync_end % 2) || (mode->htotal % 2))
+ return -EINVAL;
+ }
/*
* The 1440p@60 pixel rate is in the same range than the first
@@ -1627,6 +1663,11 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
if (ret)
return ret;
+ /* vc4_hdmi_encoder_compute_config may have changed output_bpc and/or output_format */
+ if (vc4_state->output_bpc != old_vc4_state->output_bpc ||
+ vc4_state->output_format != old_vc4_state->output_format)
+ crtc_state->mode_changed = true;
+
return 0;
}
@@ -1649,8 +1690,6 @@ static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
.atomic_check = vc4_hdmi_encoder_atomic_check,
.atomic_mode_set = vc4_hdmi_encoder_atomic_mode_set,
.mode_valid = vc4_hdmi_encoder_mode_valid,
- .disable = vc4_hdmi_encoder_disable,
- .enable = vc4_hdmi_encoder_enable,
};
static u32 vc4_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
@@ -1747,19 +1786,15 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi)
{
- lockdep_assert_held(&vc4_hdmi->mutex);
+ struct drm_display_info *display = &vc4_hdmi->connector.display_info;
- /*
- * If the controller is disabled, prevent any ALSA output.
- */
- if (!vc4_hdmi->output_enabled)
- return false;
+ lockdep_assert_held(&vc4_hdmi->mutex);
/*
* If the encoder is currently in DVI mode, treat the codec DAI
* as missing.
*/
- if (!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE))
+ if (!display->is_hdmi)
return false;
return true;
@@ -1940,10 +1975,10 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
/* Set the MAI threshold */
HDMI_WRITE(HDMI_MAI_THR,
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) |
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) |
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) |
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW));
+ VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICHIGH) |
+ VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICLOW) |
+ VC4_SET_FIELD(0x06, VC4_HD_MAI_THR_DREQHIGH) |
+ VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_DREQLOW));
HDMI_WRITE(HDMI_MAI_CONFIG,
VC4_HDMI_MAI_CONFIG_BIT_REVERSE |
@@ -2034,12 +2069,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
struct device *dev = &vc4_hdmi->pdev->dev;
struct platform_device *codec_pdev;
const __be32 *addr;
- int index;
+ int index, len;
int ret;
- if (!of_find_property(dev->of_node, "dmas", NULL)) {
+ if (!of_find_property(dev->of_node, "dmas", &len) || !len) {
dev_warn(dev,
- "'dmas' DT property is missing, no HDMI audio\n");
+ "'dmas' DT property is missing or empty, no HDMI audio\n");
return 0;
}
@@ -2520,8 +2555,6 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
struct cec_connector_info conn_info;
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
- unsigned long flags;
- u32 value;
int ret;
if (!of_find_property(dev->of_node, "interrupts", NULL)) {
@@ -2540,15 +2573,6 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector);
cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
- spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- value = HDMI_READ(HDMI_CEC_CNTRL_1);
- /* Set the logical address to Unregistered */
- value |= VC4_HDMI_CEC_ADDR_MASK;
- HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
- spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
-
- vc4_hdmi_cec_update_clk_div(vc4_hdmi);
-
if (vc4_hdmi->variant->external_irq_controller) {
ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"),
vc4_cec_irq_handler_rx_bare,
@@ -2564,10 +2588,6 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
if (ret)
goto err_remove_cec_rx_handler;
} else {
- spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
- spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
-
ret = request_threaded_irq(platform_get_irq(pdev, 0),
vc4_cec_irq_handler,
vc4_cec_irq_handler_thread, 0,
@@ -2618,7 +2638,6 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
}
static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) {};
-
#endif
static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
@@ -2703,6 +2722,7 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
struct resource *res;
+ int ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi");
if (!res)
@@ -2799,6 +2819,38 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return PTR_ERR(vc4_hdmi->reset);
}
+ ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
+ if (ret)
+ return ret;
+
+ ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
+ if (ret)
+ return ret;
+
+ ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->cec_regset, VC5_CEC);
+ if (ret)
+ return ret;
+
+ ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->csc_regset, VC5_CSC);
+ if (ret)
+ return ret;
+
+ ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->dvp_regset, VC5_DVP);
+ if (ret)
+ return ret;
+
+ ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->phy_regset, VC5_PHY);
+ if (ret)
+ return ret;
+
+ ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->ram_regset, VC5_RAM);
+ if (ret)
+ return ret;
+
+ ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->rm_regset, VC5_RM);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -2814,12 +2866,34 @@ static int __maybe_unused vc4_hdmi_runtime_suspend(struct device *dev)
static int vc4_hdmi_runtime_resume(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ unsigned long __maybe_unused flags;
+ u32 __maybe_unused value;
int ret;
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
+ if (vc4_hdmi->variant->reset)
+ vc4_hdmi->variant->reset(vc4_hdmi);
+
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+ value = HDMI_READ(HDMI_CEC_CNTRL_1);
+ /* Set the logical address to Unregistered */
+ value |= VC4_HDMI_CEC_ADDR_MASK;
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ vc4_hdmi_cec_update_clk_div(vc4_hdmi);
+
+ if (!vc4_hdmi->variant->external_irq_controller) {
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ }
+#endif
+
return 0;
}
@@ -2909,9 +2983,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- if (vc4_hdmi->variant->reset)
- vc4_hdmi->variant->reset(vc4_hdmi);
-
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
HDMI_READ(HDMI_VID_CTL) & VC4_HD_VID_CTL_ENABLE) {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 51b27dcdcd9b..c3ed2b07df23 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -179,6 +179,14 @@ struct vc4_hdmi {
struct debugfs_regset32 hdmi_regset;
struct debugfs_regset32 hd_regset;
+ /* VC5 only */
+ struct debugfs_regset32 cec_regset;
+ struct debugfs_regset32 csc_regset;
+ struct debugfs_regset32 dvp_regset;
+ struct debugfs_regset32 phy_regset;
+ struct debugfs_regset32 ram_regset;
+ struct debugfs_regset32 rm_regset;
+
/**
* @hw_lock: Spinlock protecting device register access.
*/
@@ -205,10 +213,10 @@ struct vc4_hdmi {
struct drm_display_mode saved_adjusted_mode;
/**
- * @output_enabled: Is the HDMI controller currently active?
- * Protected by @mutex.
+ * @packet_ram_enabled: Is the HDMI controller packet RAM currently
+ * on? Protected by @mutex.
*/
- bool output_enabled;
+ bool packet_ram_enabled;
/**
* @scdc_enabled: Is the HDMI controller currently running with
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index a040356b6bdc..48db438550b1 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -127,6 +127,17 @@ enum vc4_hdmi_field {
HDMI_VERTB0,
HDMI_VERTB1,
HDMI_VID_CTL,
+ HDMI_MISC_CONTROL,
+ HDMI_FORMAT_DET_1,
+ HDMI_FORMAT_DET_2,
+ HDMI_FORMAT_DET_3,
+ HDMI_FORMAT_DET_4,
+ HDMI_FORMAT_DET_5,
+ HDMI_FORMAT_DET_6,
+ HDMI_FORMAT_DET_7,
+ HDMI_FORMAT_DET_8,
+ HDMI_FORMAT_DET_9,
+ HDMI_FORMAT_DET_10,
};
struct vc4_hdmi_register {
@@ -189,6 +200,7 @@ static const struct vc4_hdmi_register __maybe_unused vc4_hdmi_fields[] = {
VC4_HDMI_REG(HDMI_VERTB0, 0x00d0),
VC4_HDMI_REG(HDMI_VERTA1, 0x00d4),
VC4_HDMI_REG(HDMI_VERTB1, 0x00d8),
+ VC4_HDMI_REG(HDMI_MISC_CONTROL, 0x00e4),
VC4_HDMI_REG(HDMI_CEC_CNTRL_1, 0x00e8),
VC4_HDMI_REG(HDMI_CEC_CNTRL_2, 0x00ec),
VC4_HDMI_REG(HDMI_CEC_CNTRL_3, 0x00f0),
@@ -237,8 +249,19 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi0_fields[] = {
VC4_HDMI_REG(HDMI_VERTB0, 0x0f0),
VC4_HDMI_REG(HDMI_VERTA1, 0x0f4),
VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
+ VC4_HDMI_REG(HDMI_MISC_CONTROL, 0x100),
VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_1, 0x134),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_2, 0x138),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_3, 0x13c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_4, 0x140),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_5, 0x144),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_6, 0x148),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_7, 0x14c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_8, 0x150),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_9, 0x154),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_10, 0x158),
VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
@@ -319,8 +342,19 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = {
VC4_HDMI_REG(HDMI_VERTB0, 0x0f0),
VC4_HDMI_REG(HDMI_VERTA1, 0x0f4),
VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
+ VC4_HDMI_REG(HDMI_MISC_CONTROL, 0x100),
VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_1, 0x134),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_2, 0x138),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_3, 0x13c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_4, 0x140),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_5, 0x144),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_6, 0x148),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_7, 0x14c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_8, 0x150),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_9, 0x154),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_10, 0x158),
VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
@@ -420,7 +454,7 @@ static inline u32 vc4_hdmi_read(struct vc4_hdmi *hdmi,
const struct vc4_hdmi_variant *variant = hdmi->variant;
void __iomem *base;
- WARN_ON(!pm_runtime_active(&hdmi->pdev->dev));
+ WARN_ON(pm_runtime_status_suspended(&hdmi->pdev->dev));
if (reg >= variant->num_registers) {
dev_warn(&hdmi->pdev->dev,
@@ -450,7 +484,7 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi,
lockdep_assert_held(&hdmi->hw_lock);
- WARN_ON(!pm_runtime_active(&hdmi->pdev->dev));
+ WARN_ON(pm_runtime_status_suspended(&hdmi->pdev->dev));
if (reg >= variant->num_registers) {
dev_warn(&hdmi->pdev->dev,
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 2a58fc421cf6..fbaa741dda5f 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -94,6 +94,46 @@ static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
return 0;
}
+static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_printer p = drm_seq_file_printer(m);
+ unsigned int next_entry_start = 0;
+ unsigned int i, j;
+ u32 dlist_word, dispstat;
+
+ for (i = 0; i < SCALER_CHANNELS_COUNT; i++) {
+ dispstat = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(i)),
+ SCALER_DISPSTATX_MODE);
+ if (dispstat == SCALER_DISPSTATX_MODE_DISABLED ||
+ dispstat == SCALER_DISPSTATX_MODE_EOF) {
+ drm_printf(&p, "HVS chan %u disabled\n", i);
+ continue;
+ }
+
+ drm_printf(&p, "HVS chan %u:\n", i);
+
+ for (j = HVS_READ(SCALER_DISPLISTX(i)); j < 256; j++) {
+ dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j);
+ drm_printf(&p, "dlist: %02d: 0x%08x\n", j,
+ dlist_word);
+ if (!next_entry_start ||
+ next_entry_start == j) {
+ if (dlist_word & SCALER_CTL0_END)
+ break;
+ next_entry_start = j +
+ VC4_GET_FIELD(dlist_word,
+ SCALER_CTL0_SIZE);
+ }
+ }
+ }
+
+ return 0;
+}
+
/* The filter kernel is composed of dwords each containing 3 9-bit
* signed integers packed next to each other.
*/
@@ -220,10 +260,11 @@ u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
{
+ struct vc4_dev *vc4 = hvs->vc4;
u32 reg;
int ret;
- if (!hvs->hvs5)
+ if (!vc4->is_vc5)
return output;
switch (output) {
@@ -273,6 +314,7 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
struct drm_display_mode *mode, bool oneshot)
{
+ struct vc4_dev *vc4 = hvs->vc4;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
unsigned int chan = vc4_crtc_state->assigned_channel;
@@ -291,7 +333,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
*/
dispctrl = SCALER_DISPCTRLX_ENABLE;
- if (!hvs->hvs5)
+ if (!vc4->is_vc5)
dispctrl |= VC4_SET_FIELD(mode->hdisplay,
SCALER_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay,
@@ -312,7 +354,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
SCALER_DISPBKGND_AUTOHS |
- ((!hvs->hvs5) ? SCALER_DISPBKGND_GAMMA : 0) |
+ ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
(interlace ? SCALER_DISPBKGND_INTERLACE : 0));
/* Reload the LUT, since the SRAMs would have been disabled if
@@ -617,11 +659,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (!hvs)
return -ENOMEM;
+ hvs->vc4 = vc4;
hvs->pdev = pdev;
- if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm2711-hvs"))
- hvs->hvs5 = true;
-
hvs->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(hvs->regs))
return PTR_ERR(hvs->regs);
@@ -630,7 +670,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
hvs->regset.regs = hvs_regs;
hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
- if (hvs->hvs5) {
+ if (vc4->is_vc5) {
hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hvs->core_clk)) {
dev_err(&pdev->dev, "Couldn't get core clock\n");
@@ -644,7 +684,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
}
}
- if (!hvs->hvs5)
+ if (!vc4->is_vc5)
hvs->dlist = hvs->regs + SCALER_DLIST_START;
else
hvs->dlist = hvs->regs + SCALER5_DLIST_START;
@@ -665,7 +705,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
* between planes when they don't overlap on the screen, but
* for now we just allocate globally.
*/
- if (!hvs->hvs5)
+ if (!vc4->is_vc5)
/* 48k words of 2x12-bit pixels */
drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else
@@ -734,6 +774,8 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun,
NULL);
+ vc4_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist,
+ NULL);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 4342fb43e8c1..2eacfb6773d2 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -265,6 +265,9 @@ vc4_irq_enable(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
if (!vc4->v3d)
return;
@@ -279,6 +282,9 @@ vc4_irq_disable(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
if (!vc4->v3d)
return;
@@ -296,8 +302,12 @@ vc4_irq_disable(struct drm_device *dev)
int vc4_irq_install(struct drm_device *dev, int irq)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
@@ -316,6 +326,9 @@ void vc4_irq_uninstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
vc4_irq_disable(dev);
free_irq(vc4->irq, dev);
}
@@ -326,6 +339,9 @@ void vc4_irq_reset(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
/* Acknowledge any stale IRQs. */
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index c169bd72e53b..b45dcdfd7306 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -16,6 +16,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -393,7 +394,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
old_hvs_state->fifo_state[channel].pending_commit = NULL;
}
- if (vc4->hvs->hvs5) {
+ if (vc4->is_vc5) {
unsigned long state_rate = max(old_hvs_state->core_clock_rate,
new_hvs_state->core_clock_rate);
unsigned long core_rate = max_t(unsigned long,
@@ -405,14 +406,14 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
* Do a temporary request on the core clock during the
* modeset.
*/
- clk_set_min_rate(hvs->core_clk, core_rate);
+ WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
}
drm_atomic_helper_commit_modeset_disables(dev, state);
vc4_ctm_commit(vc4, state);
- if (vc4->hvs->hvs5)
+ if (vc4->is_vc5)
vc5_hvs_pv_muxing_commit(vc4, state);
else
vc4_hvs_pv_muxing_commit(vc4, state);
@@ -430,7 +431,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
- if (vc4->hvs->hvs5) {
+ if (vc4->is_vc5) {
drm_dbg(dev, "Running the core clock at %lu Hz\n",
new_hvs_state->core_clock_rate);
@@ -438,7 +439,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
* Request a clock rate based on the current HVS
* requirements.
*/
- clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate);
+ WARN_ON(clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate));
drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
clk_get_rate(hvs->core_clk));
@@ -479,8 +480,12 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_mode_fb_cmd2 mode_cmd_local;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return ERR_PTR(-ENODEV);
+
/* If the user didn't specify a modifier, use the
* vc4_set_tiling_ioctl() state for the BO.
*/
@@ -946,7 +951,9 @@ vc4_core_clock_atomic_check(struct drm_atomic_state *state)
continue;
num_outputs++;
- cob_rate += hvs_new_state->fifo_state[i].fifo_load;
+ cob_rate = max_t(unsigned long,
+ hvs_new_state->fifo_state[i].fifo_load,
+ cob_rate);
}
pixel_rate = load_state->hvs_load;
@@ -997,11 +1004,15 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = {
.fb_create = vc4_fb_create,
};
+static const struct drm_mode_config_funcs vc5_mode_funcs = {
+ .atomic_check = vc4_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_gem_fb_create,
+};
+
int vc4_kms_load(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
- "brcm,bcm2711-vc5");
int ret;
/*
@@ -1009,7 +1020,7 @@ int vc4_kms_load(struct drm_device *dev)
* the BCM2711, but the load tracker computations are used for
* the core clock rate calculation.
*/
- if (!is_vc5) {
+ if (!vc4->is_vc5) {
/* Start with the load tracker enabled. Can be
* disabled through the debugfs load_tracker file.
*/
@@ -1025,7 +1036,7 @@ int vc4_kms_load(struct drm_device *dev)
return ret;
}
- if (is_vc5) {
+ if (vc4->is_vc5) {
dev->mode_config.max_width = 7680;
dev->mode_config.max_height = 7680;
} else {
@@ -1033,7 +1044,7 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.max_height = 2048;
}
- dev->mode_config.funcs = &vc4_mode_funcs;
+ dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index 18abc06335c1..79a74184d732 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -17,13 +17,30 @@
void vc4_perfmon_get(struct vc4_perfmon *perfmon)
{
- if (perfmon)
- refcount_inc(&perfmon->refcnt);
+ struct vc4_dev *vc4;
+
+ if (!perfmon)
+ return;
+
+ vc4 = perfmon->dev;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
+ refcount_inc(&perfmon->refcnt);
}
void vc4_perfmon_put(struct vc4_perfmon *perfmon)
{
- if (perfmon && refcount_dec_and_test(&perfmon->refcnt))
+ struct vc4_dev *vc4;
+
+ if (!perfmon)
+ return;
+
+ vc4 = perfmon->dev;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
+ if (refcount_dec_and_test(&perfmon->refcnt))
kfree(perfmon);
}
@@ -32,6 +49,9 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon)
unsigned int i;
u32 mask;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
return;
@@ -49,6 +69,9 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
{
unsigned int i;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
if (WARN_ON_ONCE(!vc4->active_perfmon ||
perfmon != vc4->active_perfmon))
return;
@@ -64,8 +87,12 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
{
+ struct vc4_dev *vc4 = vc4file->dev;
struct vc4_perfmon *perfmon;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return NULL;
+
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_find(&vc4file->perfmon.idr, id);
vc4_perfmon_get(perfmon);
@@ -76,8 +103,14 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
void vc4_perfmon_open_file(struct vc4_file *vc4file)
{
+ struct vc4_dev *vc4 = vc4file->dev;
+
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
mutex_init(&vc4file->perfmon.lock);
idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN);
+ vc4file->dev = vc4;
}
static int vc4_perfmon_idr_del(int id, void *elem, void *data)
@@ -91,6 +124,11 @@ static int vc4_perfmon_idr_del(int id, void *elem, void *data)
void vc4_perfmon_close_file(struct vc4_file *vc4file)
{
+ struct vc4_dev *vc4 = vc4file->dev;
+
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
mutex_lock(&vc4file->perfmon.lock);
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
idr_destroy(&vc4file->perfmon.idr);
@@ -107,6 +145,9 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
unsigned int i;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d) {
DRM_DEBUG("Creating perfmon no VC4 V3D probed\n");
return -ENODEV;
@@ -127,6 +168,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
GFP_KERNEL);
if (!perfmon)
return -ENOMEM;
+ perfmon->dev = vc4;
for (i = 0; i < req->ncounters; i++)
perfmon->events[i] = req->events[i];
@@ -157,6 +199,9 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vc4_perfmon_destroy *req = data;
struct vc4_perfmon *perfmon;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d) {
DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n");
return -ENODEV;
@@ -182,6 +227,9 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct vc4_perfmon *perfmon;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d) {
DRM_DEBUG("Getting perfmon no VC4 V3D probed\n");
return -ENODEV;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index b3438f4a81ce..f27e87a23df7 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -18,8 +18,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
@@ -310,16 +312,16 @@ static int vc4_plane_margins_adj(struct drm_plane_state *pstate)
adjhdisplay,
crtc_state->mode.hdisplay);
vc4_pstate->crtc_x += left;
- if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - left)
- vc4_pstate->crtc_x = crtc_state->mode.hdisplay - left;
+ if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - right)
+ vc4_pstate->crtc_x = crtc_state->mode.hdisplay - right;
adjvdisplay = crtc_state->mode.vdisplay - (top + bottom);
vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y *
adjvdisplay,
crtc_state->mode.vdisplay);
vc4_pstate->crtc_y += top;
- if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - top)
- vc4_pstate->crtc_y = crtc_state->mode.vdisplay - top;
+ if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - bottom)
+ vc4_pstate->crtc_y = crtc_state->mode.vdisplay - bottom;
vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w *
adjhdisplay,
@@ -339,7 +341,6 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
- u32 subpixel_src_mask = (1 << 16) - 1;
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@@ -361,18 +362,15 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
for (i = 0; i < num_planes; i++)
vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
- /* We don't support subpixel source positioning for scaling. */
- if ((state->src.x1 & subpixel_src_mask) ||
- (state->src.x2 & subpixel_src_mask) ||
- (state->src.y1 & subpixel_src_mask) ||
- (state->src.y2 & subpixel_src_mask)) {
- return -EINVAL;
- }
-
- vc4_state->src_x = state->src.x1 >> 16;
- vc4_state->src_y = state->src.y1 >> 16;
- vc4_state->src_w[0] = (state->src.x2 - state->src.x1) >> 16;
- vc4_state->src_h[0] = (state->src.y2 - state->src.y1) >> 16;
+ /*
+ * We don't support subpixel source positioning for scaling,
+ * but fractional coordinates can be generated by clipping
+ * so just round for now
+ */
+ vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16);
+ vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16);
+ vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x;
+ vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y;
vc4_state->crtc_x = state->dst.x1;
vc4_state->crtc_y = state->dst.y1;
@@ -489,10 +487,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
}
/* Align it to 64 or 128 (hvs5) bytes */
- lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
+ lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
- lbm /= vc4->hvs->hvs5 ? 4 : 2;
+ lbm /= vc4->is_vc5 ? 4 : 2;
return lbm;
}
@@ -608,7 +606,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
&vc4_state->lbm,
lbm_size,
- vc4->hvs->hvs5 ? 64 : 32,
+ vc4->is_vc5 ? 64 : 32,
0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
@@ -668,6 +666,48 @@ static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = {
}
};
+static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
+{
+ if (!state->fb->format->has_alpha)
+ return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
+ SCALER_POS2_ALPHA_MODE);
+
+ switch (state->pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
+ SCALER_POS2_ALPHA_MODE);
+ default:
+ case DRM_MODE_BLEND_PREMULTI:
+ return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_PIPELINE,
+ SCALER_POS2_ALPHA_MODE) |
+ SCALER_POS2_ALPHA_PREMULT;
+ case DRM_MODE_BLEND_COVERAGE:
+ return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_PIPELINE,
+ SCALER_POS2_ALPHA_MODE);
+ }
+}
+
+static u32 vc4_hvs5_get_alpha_blend_mode(struct drm_plane_state *state)
+{
+ if (!state->fb->format->has_alpha)
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
+ SCALER5_CTL2_ALPHA_MODE);
+
+ switch (state->pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
+ SCALER5_CTL2_ALPHA_MODE);
+ default:
+ case DRM_MODE_BLEND_PREMULTI:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
+ SCALER5_CTL2_ALPHA_MODE) |
+ SCALER5_CTL2_ALPHA_PREMULT;
+ case DRM_MODE_BLEND_COVERAGE:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
+ SCALER5_CTL2_ALPHA_MODE);
+ }
+}
+
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
@@ -917,7 +957,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
fb->format->has_alpha;
- if (!vc4->hvs->hvs5) {
+ if (!vc4->is_vc5) {
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
@@ -950,13 +990,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Position Word 2: Source Image Size, Alpha */
vc4_state->pos2_offset = vc4_state->dlist_count;
vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(fb->format->has_alpha ?
- SCALER_POS2_ALPHA_MODE_PIPELINE :
- SCALER_POS2_ALPHA_MODE_FIXED,
- SCALER_POS2_ALPHA_MODE) |
(mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
- (fb->format->has_alpha ?
- SCALER_POS2_ALPHA_PREMULT : 0) |
+ vc4_hvs4_get_alpha_blend_mode(state) |
VC4_SET_FIELD(vc4_state->src_w[0],
SCALER_POS2_WIDTH) |
VC4_SET_FIELD(vc4_state->src_h[0],
@@ -1001,14 +1036,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(state->alpha >> 4,
SCALER5_CTL2_ALPHA) |
- (fb->format->has_alpha ?
- SCALER5_CTL2_ALPHA_PREMULT : 0) |
+ vc4_hvs5_get_alpha_blend_mode(state) |
(mix_plane_alpha ?
- SCALER5_CTL2_ALPHA_MIX : 0) |
- VC4_SET_FIELD(fb->format->has_alpha ?
- SCALER5_CTL2_ALPHA_MODE_PIPELINE :
- SCALER5_CTL2_ALPHA_MODE_FIXED,
- SCALER5_CTL2_ALPHA_MODE)
+ SCALER5_CTL2_ALPHA_MIX : 0)
);
/* Position Word 1: Scaled Image Dimensions. */
@@ -1321,6 +1351,10 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane,
old_vc4_state = to_vc4_plane_state(plane->state);
new_vc4_state = to_vc4_plane_state(new_plane_state);
+
+ if (!new_vc4_state->hw_dlist)
+ return -EINVAL;
+
if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
@@ -1385,6 +1419,13 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_async_update = vc4_plane_atomic_async_update,
};
+static const struct drm_plane_helper_funcs vc5_plane_helper_funcs = {
+ .atomic_check = vc4_plane_atomic_check,
+ .atomic_update = vc4_plane_atomic_update,
+ .atomic_async_check = vc4_plane_atomic_async_check,
+ .atomic_async_update = vc4_plane_atomic_async_update,
+};
+
static bool vc4_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -1453,14 +1494,13 @@ static const struct drm_plane_funcs vc4_plane_funcs = {
struct drm_plane *vc4_plane_init(struct drm_device *dev,
enum drm_plane_type type)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = NULL;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
int num_formats = 0;
int ret = 0;
unsigned i;
- bool hvs5 = of_device_is_compatible(dev->dev->of_node,
- "brcm,bcm2711-vc5");
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
DRM_FORMAT_MOD_BROADCOM_SAND128,
@@ -1476,7 +1516,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
- if (!hvs_formats[i].hvs5_only || hvs5) {
+ if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
formats[num_formats] = hvs_formats[i].drm;
num_formats++;
}
@@ -1490,9 +1530,16 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
if (ret)
return ERR_PTR(ret);
- drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
+ if (vc4->is_vc5)
+ drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
+ else
+ drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180 |
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index a2b5cbbbc1b0..f0290fad991d 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -264,7 +264,7 @@
* output line.
*/
# define SCALER_DISPSTAT_ESLINE(x) BIT(10 + ((x) * 8))
-/* Set when the the downstream tries to read from the display FIFO
+/* Set when the downstream tries to read from the display FIFO
* while it's empty.
*/
# define SCALER_DISPSTAT_EUFLOW(x) BIT(9 + ((x) * 8))
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 3c918eeaf56e..f6b7dc3df08c 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -593,11 +593,15 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_rcl_setup setup = {0};
struct drm_vc4_submit_cl *args = exec->args;
bool has_bin = args->bin_cl_size != 0;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (args->min_x_tile > args->max_x_tile ||
args->min_y_tile > args->max_y_tile) {
DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n",
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 3579d487402e..d20b0bc51a18 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -18,6 +18,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 7bb3067f8425..cc714dcfe1f2 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -127,6 +127,9 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
int
vc4_v3d_pm_get(struct vc4_dev *vc4)
{
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
mutex_lock(&vc4->power_lock);
if (vc4->power_refcount++ == 0) {
int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
@@ -145,6 +148,9 @@ vc4_v3d_pm_get(struct vc4_dev *vc4)
void
vc4_v3d_pm_put(struct vc4_dev *vc4)
{
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
mutex_lock(&vc4->power_lock);
if (--vc4->power_refcount == 0) {
pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
@@ -172,6 +178,9 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
uint64_t seqno = 0;
struct vc4_exec_info *exec;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
try_again:
spin_lock_irqsave(&vc4->job_lock, irqflags);
slot = ffs(~vc4->bin_alloc_used);
@@ -316,6 +325,9 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used)
{
int ret = 0;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
mutex_lock(&vc4->bin_bo_lock);
if (used && *used)
@@ -348,6 +360,9 @@ static void bin_bo_release(struct kref *ref)
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
{
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
mutex_lock(&vc4->bin_bo_lock);
kref_put(&vc4->bin_bo_kref, bin_bo_release);
mutex_unlock(&vc4->bin_bo_lock);
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index eec76af49f04..2feba55bcef7 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -105,9 +105,13 @@ size_is_lt(uint32_t width, uint32_t height, int cpp)
struct drm_gem_cma_object *
vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
{
+ struct vc4_dev *vc4 = exec->dev;
struct drm_gem_cma_object *obj;
struct vc4_bo *bo;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return NULL;
+
if (hindex >= exec->bo_count) {
DRM_DEBUG("BO index %d greater than BO count %d\n",
hindex, exec->bo_count);
@@ -160,10 +164,14 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp)
{
+ struct vc4_dev *vc4 = exec->dev;
uint32_t aligned_width, aligned_height, stride, size;
uint32_t utile_w = utile_width(cpp);
uint32_t utile_h = utile_height(cpp);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return false;
+
/* The shaded vertex format stores signed 12.4 fixed point
* (-2048,2047) offsets from the viewport center, so we should
* never have a render target larger than 4096. The texture
@@ -482,10 +490,14 @@ vc4_validate_bin_cl(struct drm_device *dev,
void *unvalidated,
struct vc4_exec_info *exec)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t len = exec->args->bin_cl_size;
uint32_t dst_offset = 0;
uint32_t src_offset = 0;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
while (src_offset < len) {
void *dst_pkt = validated + dst_offset;
void *src_pkt = unvalidated + src_offset;
@@ -926,9 +938,13 @@ int
vc4_validate_shader_recs(struct drm_device *dev,
struct vc4_exec_info *exec)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t i;
int ret = 0;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
for (i = 0; i < exec->shader_state_count; i++) {
ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]);
if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 7cf82b071de2..e315aeb5fef5 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -778,6 +778,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
struct vc4_validated_shader_info *
vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{
+ struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
bool found_shader_end = false;
int shader_end_ip = 0;
uint32_t last_thread_switch_ip = -3;
@@ -785,6 +786,9 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return NULL;
+
memset(&validation_state, 0, sizeof(validation_state));
validation_state.shader = shader_obj->vaddr;
validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index f73352e7b832..5c7f198c0712 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -27,6 +27,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0a194aaad419..f80664cf98d0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -37,6 +37,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index f8d83358d2a0..9b2702116f93 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -580,8 +580,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
spin_unlock(&vgdev->display_info_lock);
/* not in cache - need to talk to hw */
- virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
- &cache_ent);
+ ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
+ &cache_ent);
+ if (ret)
+ return ret;
virtio_gpu_notify(vgdev);
copy_exit:
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index f293e6ad52da..1cc8f3fc8e4b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -168,9 +168,9 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
* since virtio_gpu doesn't support dma-buf import from other devices.
*/
shmem->pages = drm_gem_shmem_get_sg_table(&bo->base);
- if (!shmem->pages) {
+ if (IS_ERR(shmem->pages)) {
drm_gem_shmem_unpin(&bo->base);
- return -EINVAL;
+ return PTR_ERR(shmem->pages);
}
if (use_dma_api) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 55d80b77d9b0..44425f20d91a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -90,7 +90,6 @@ static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo)
{
- int ret;
struct virtio_gpu_object_array *objs;
objs = virtio_gpu_array_alloc(1);
@@ -98,11 +97,8 @@ int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
return -ENOMEM;
virtio_gpu_array_add_obj(objs, &bo->base.base);
- ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
- if (ret)
- return ret;
- return 0;
+ return virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
}
struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 7c052efe8836..b7529b2b9883 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -31,6 +31,8 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
+#include <drm/drm_edid.h>
+
#include "virtgpu_drv.h"
#include "virtgpu_trace.h"
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index c6a1036bf2ea..775b97766e08 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -157,7 +157,7 @@ static void compose_plane(struct vkms_composer *primary_composer,
void *vaddr;
void (*pixel_blend)(const u8 *p_src, u8 *p_dst);
- if (WARN_ON(iosys_map_is_null(&primary_composer->map[0])))
+ if (WARN_ON(iosys_map_is_null(&plane_composer->map[0])))
return;
vaddr = plane_composer->map[0].vaddr;
@@ -180,7 +180,7 @@ static int compose_active_planes(void **vaddr_out,
int i;
if (!*vaddr_out) {
- *vaddr_out = kzalloc(gem_obj->size, GFP_KERNEL);
+ *vaddr_out = kvzalloc(gem_obj->size, GFP_KERNEL);
if (!*vaddr_out) {
DRM_ERROR("Cannot allocate memory for output frame.");
return -ENOMEM;
@@ -213,7 +213,7 @@ static int compose_active_planes(void **vaddr_out,
*
* Work handler for composing and computing CRCs. work_struct scheduled in
* an ordered workqueue that's periodically scheduled to run by
- * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
+ * vkms_vblank_simulate() and flushed at vkms_atomic_commit_tail().
*/
void vkms_composer_worker(struct work_struct *work)
{
@@ -263,7 +263,7 @@ void vkms_composer_worker(struct work_struct *work)
crtc_state);
if (ret) {
if (ret == -EINVAL && !wb_pending)
- kfree(vaddr_out);
+ kvfree(vaddr_out);
return;
}
@@ -275,7 +275,7 @@ void vkms_composer_worker(struct work_struct *work)
crtc_state->wb_pending = false;
spin_unlock_irq(&out->composer_lock);
} else {
- kfree(vaddr_out);
+ kvfree(vaddr_out);
}
/*
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 91e63b12f60f..1d60654b553b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -6,6 +6,7 @@
#include <linux/hrtimer.h>
#include <drm/drm.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index ba0e82ae549a..991857125bb4 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -2,6 +2,7 @@
#include "vkms_drv.h"
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 0a315221d1f5..3b3c1e757ab4 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -3,6 +3,7 @@
#include <linux/iosys-map.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_writeback.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index eb94433067ba..85a66014c2b6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -393,6 +393,12 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
kfree(vmw_bo);
}
+/* default destructor */
+static void vmw_bo_default_destroy(struct ttm_buffer_object *bo)
+{
+ kfree(bo);
+}
+
/**
* vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
*
@@ -425,7 +431,7 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
ttm_bo_type_kernel, placement, 0,
- &ctx, NULL, NULL, NULL);
+ &ctx, NULL, NULL, vmw_bo_default_destroy);
if (unlikely(ret))
goto error_free;
@@ -448,6 +454,8 @@ int vmw_bo_create(struct vmw_private *vmw,
{
int ret;
+ BUG_ON(!bo_free);
+
*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
if (unlikely(!*p_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 0ba9739f406d..5b85b477e4c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -26,6 +26,7 @@
*
**************************************************************************/
+#include <linux/fb.h>
#include <linux/pci.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 693028c31b6b..ff2f735bbe7a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -252,7 +252,7 @@ static void vmw_cursor_update_position(struct vmw_private *dev_priv,
vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, TRUE);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 1d1c8b82c898..7046dfd0d1c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -29,6 +29,7 @@
#define VMWGFX_KMS_H_
#include <drm/drm_encoder.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include "vmwgfx_drv.h"
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index cefafe859aba..a987c78abe41 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -80,15 +80,6 @@ struct drm_pending_vblank_event;
/* timeout in ms to wait for backend to respond */
#define XEN_DRM_FRONT_WAIT_BACK_MS 3000
-#ifndef GRANT_INVALID_REF
-/*
- * Note on usage of grant reference 0 as invalid grant reference:
- * grant reference 0 is valid, but never exposed to a PV driver,
- * because of the fact it is already in use/reserved by the PV console.
- */
-#define GRANT_INVALID_REF 0
-#endif
-
struct xen_drm_front_info {
struct xenbus_device *xb_dev;
struct xen_drm_front_drm_info *drm_info;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c
index 44f1f70c0aed..a1ba6d3d0568 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_conn.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <video/videomode.h>
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
index 08b526eeec16..e52afd792346 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
@@ -123,12 +123,12 @@ out:
static void evtchnl_free(struct xen_drm_front_info *front_info,
struct xen_drm_front_evtchnl *evtchnl)
{
- unsigned long page = 0;
+ void *page = NULL;
if (evtchnl->type == EVTCHNL_TYPE_REQ)
- page = (unsigned long)evtchnl->u.req.ring.sring;
+ page = evtchnl->u.req.ring.sring;
else if (evtchnl->type == EVTCHNL_TYPE_EVT)
- page = (unsigned long)evtchnl->u.evt.page;
+ page = evtchnl->u.evt.page;
if (!page)
return;
@@ -147,8 +147,7 @@ static void evtchnl_free(struct xen_drm_front_info *front_info,
xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
/* end access and free the page */
- if (evtchnl->gref != GRANT_INVALID_REF)
- gnttab_end_foreign_access(evtchnl->gref, page);
+ xenbus_teardown_ring(&page, 1, &evtchnl->gref);
memset(evtchnl, 0, sizeof(*evtchnl));
}
@@ -158,8 +157,7 @@ static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
enum xen_drm_front_evtchnl_type type)
{
struct xenbus_device *xb_dev = front_info->xb_dev;
- unsigned long page;
- grant_ref_t gref;
+ void *page;
irq_handler_t handler;
int ret;
@@ -168,44 +166,25 @@ static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
evtchnl->index = index;
evtchnl->front_info = front_info;
evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
- evtchnl->gref = GRANT_INVALID_REF;
- page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
- if (!page) {
- ret = -ENOMEM;
+ ret = xenbus_setup_ring(xb_dev, GFP_NOIO | __GFP_HIGH, &page,
+ 1, &evtchnl->gref);
+ if (ret)
goto fail;
- }
if (type == EVTCHNL_TYPE_REQ) {
struct xen_displif_sring *sring;
init_completion(&evtchnl->u.req.completion);
mutex_init(&evtchnl->u.req.req_io_lock);
- sring = (struct xen_displif_sring *)page;
- SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
-
- ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
- if (ret < 0) {
- evtchnl->u.req.ring.sring = NULL;
- free_page(page);
- goto fail;
- }
+ sring = page;
+ XEN_FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
handler = evtchnl_interrupt_ctrl;
} else {
- ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
- virt_to_gfn((void *)page), 0);
- if (ret < 0) {
- free_page(page);
- goto fail;
- }
-
- evtchnl->u.evt.page = (struct xendispl_event_page *)page;
- gref = ret;
+ evtchnl->u.evt.page = page;
handler = evtchnl_interrupt_evt;
}
- evtchnl->gref = gref;
ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
if (ret < 0)
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 5a5bf4e5b717..e31554d7139f 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -71,7 +71,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
* the whole buffer.
*/
vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_pgoff = 0;
/*
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index cfda74490765..dfa78a49a6d9 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 11c409cbc88e..cc32aa89cf8f 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 155971c319b2..d14612b34796 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -25,6 +25,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>