summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml11
-rw-r--r--Documentation/gpu/drm-internals.rst6
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst26
-rw-r--r--Documentation/gpu/drm-kms.rst6
-rw-r--r--Documentation/gpu/drm-mm.rst2
-rw-r--r--Documentation/gpu/i915.rst1
-rw-r--r--Documentation/gpu/todo.rst15
-rw-r--r--Documentation/gpu/vkms.rst2
-rw-r--r--drivers/char/agp/ati-agp.c8
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/frontend.c4
-rw-r--r--drivers/char/agp/nvidia-agp.c3
-rw-r--r--drivers/char/agp/sworks-agp.c5
-rw-r--r--drivers/char/agp/via-agp.c3
-rw-r--r--drivers/dma-buf/dma-resv.c46
-rw-r--r--drivers/dma-buf/st-dma-resv.c26
-rw-r--r--drivers/dma-buf/udmabuf.c4
-rw-r--r--drivers/firmware/sysfb_simplefb.c2
-rw-r--r--drivers/gpu/drm/Kconfig15
-rw-r--r--drivers/gpu/drm/Makefile14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c12
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c3
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c18
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c5
-rw-r--r--drivers/gpu/drm/bridge/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c31
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/Kconfig2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c440
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h80
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c20
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h2
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c39
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c4
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c33
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c129
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c16
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c25
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c53
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c4
-rw-r--r--drivers/gpu/drm/dp/Makefile9
-rw-r--r--drivers/gpu/drm/dp/drm_dp.c (renamed from drivers/gpu/drm/drm_dp_helper.c)6
-rw-r--r--drivers/gpu/drm/dp/drm_dp_aux_bus.c (renamed from drivers/gpu/drm/drm_dp_aux_bus.c)4
-rw-r--r--drivers/gpu/drm/dp/drm_dp_aux_dev.c (renamed from drivers/gpu/drm/drm_dp_aux_dev.c)6
-rw-r--r--drivers/gpu/drm/dp/drm_dp_cec.c (renamed from drivers/gpu/drm/drm_dp_cec.c)2
-rw-r--r--drivers/gpu/drm/dp/drm_dp_dual_mode_helper.c (renamed from drivers/gpu/drm/drm_dp_dual_mode_helper.c)2
-rw-r--r--drivers/gpu/drm/dp/drm_dp_helper_internal.h33
-rw-r--r--drivers/gpu/drm/dp/drm_dp_helper_mod.c22
-rw-r--r--drivers/gpu/drm/dp/drm_dp_mst_topology.c (renamed from drivers/gpu/drm/drm_dp_mst_topology.c)8
-rw-r--r--drivers/gpu/drm/dp/drm_dp_mst_topology_internal.h (renamed from drivers/gpu/drm/drm_dp_mst_topology_internal.h)2
-rw-r--r--drivers/gpu/drm/drm_buddy.c535
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h27
-rw-r--r--drivers/gpu/drm/drm_dsc.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c104
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c14
-rw-r--r--drivers/gpu/drm/drm_plane.c9
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c5
-rw-r--r--drivers/gpu/drm/drm_privacy_screen_x86.c17
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c3
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c13
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c14
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c6
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c2
-rw-r--r--drivers/gpu/drm/gma500/mmu.c8
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c3
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c248
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h51
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c30
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c27
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c39
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c192
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c25
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c19
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c38
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c133
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c32
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c39
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h80
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h82
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c126
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h25
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c204
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c143
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c38
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h37
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c31
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c262
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c31
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c43
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c466
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.h143
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c7
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c101
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c89
-rw-r--r--drivers/gpu/drm/i915/i915_module.c6
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_request.c12
-rw-r--r--drivers/gpu/drm/i915/i915_request.h6
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c11
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c37
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c569
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h34
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c418
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h234
-rw-r--r--drivers/gpu/drm/i915/i915_vma_snapshot.c134
-rw-r--r--drivers/gpu/drm/i915/i915_vma_snapshot.h112
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h19
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_step.c15
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c42
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c787
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c28
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c209
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c119
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c30
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c13
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h3
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c62
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.h38
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c25
-rw-r--r--drivers/gpu/drm/meson/meson_osd_afbcd.c41
-rw-r--r--drivers/gpu/drm/meson/meson_osd_afbcd.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c5
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h77
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c1373
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c9
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c317
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c8
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c21
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c33
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h212
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c32
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c8
-rw-r--r--drivers/gpu/drm/selftests/test-drm_plane_helper.c4
-rw-r--r--drivers/gpu/drm/stm/drv.c5
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c114
-rw-r--r--drivers/gpu/drm/stm/ltdc.c810
-rw-r--r--drivers/gpu/drm/stm/ltdc.h12
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dp.c2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c4
-rw-r--r--drivers/gpu/drm/tiny/bochs.c20
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c17
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_sys_manager.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c33
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c132
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h5
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h2
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c3
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c2
-rw-r--r--drivers/platform/chrome/Kconfig11
-rw-r--r--drivers/platform/chrome/Makefile1
-rw-r--r--drivers/platform/chrome/chromeos_privacy_screen.c153
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/video/fbdev/asiliantfb.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c29
-rw-r--r--drivers/video/fbdev/s3c-fb.c2
-rw-r--r--drivers/video/fbdev/simplefb.c65
-rw-r--r--drivers/video/fbdev/vga16fb.c5
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h4
-rw-r--r--include/drm/dp/drm_dp_aux_bus.h (renamed from include/drm/drm_dp_aux_bus.h)0
-rw-r--r--include/drm/dp/drm_dp_dual_mode_helper.h (renamed from include/drm/drm_dp_dual_mode_helper.h)0
-rw-r--r--include/drm/dp/drm_dp_helper.h (renamed from include/drm/drm_dp_helper.h)7
-rw-r--r--include/drm/dp/drm_dp_mst_helper.h (renamed from include/drm/drm_dp_mst_helper.h)2
-rw-r--r--include/drm/drm_buddy.h150
-rw-r--r--include/drm/drm_connector.h18
-rw-r--r--include/drm/drm_crtc.h10
-rw-r--r--include/drm/drm_dsc.h2
-rw-r--r--include/drm/drm_edid.h4
-rw-r--r--include/drm/drm_mipi_dbi.h2
-rw-r--r--include/drm/drm_modeset_lock.h1
-rw-r--r--include/drm/drm_module.h125
-rw-r--r--include/drm/drm_plane.h2
-rw-r--r--include/drm/drm_privacy_screen_driver.h13
-rw-r--r--include/drm/ttm/ttm_resource.h23
-rw-r--r--include/linux/dma-buf-map.h266
-rw-r--r--include/linux/dma-resv.h4
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/rwsem.h2
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h1
-rw-r--r--include/uapi/drm/panfrost_drm.h4
334 files changed, 8914 insertions, 4038 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
index 5079c1cc337b..56b4c55f6f1b 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -39,6 +39,7 @@ properties:
- const: lvds-encoder # Generic LVDS encoder compatible fallback
- items:
- enum:
+ - ti,ds90cf364a # For the DS90CF364A FPD-Link LVDS Receiver
- ti,ds90cf384a # For the DS90CF384A FPD-Link LVDS Receiver
- const: lvds-decoder # Generic LVDS decoders compatible fallback
- enum:
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
index b446d0f0f1b4..48a97bb3e2e0 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
@@ -32,6 +32,9 @@ properties:
maxItems: 1
description: GPIO specifier for bridge_en pin (active high).
+ vcc-supply:
+ description: A 1.8V power supply (see regulator/regulator.yaml).
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -91,7 +94,6 @@ properties:
required:
- compatible
- reg
- - enable-gpios
- ports
allOf:
@@ -133,6 +135,7 @@ examples:
reg = <0x2d>;
enable-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+ vcc-supply = <&reg_sn65dsi83_1v8>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 62f5f050c1bc..9cf5588a09d8 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -222,6 +222,8 @@ properties:
- logictechno,lttd800480070-l6wh-rt
# Mitsubishi "AA070MC01 7.0" WVGA TFT LCD panel
- mitsubishi,aa070mc01-ca1
+ # Multi-Inno Technology Co.,Ltd MI0700S4T-6 7" 800x480 TFT Resistive Touch Module
+ - multi-inno,mi0700s4t-6
# Multi-Inno Technology Co.,Ltd MI1010AIT-1CP 10.1" 1280x800 LVDS IPS Cap Touch Mod.
- multi-inno,mi1010ait-1cp
# NEC LCD Technologies, Ltd. 12.1" WXGA (1280x800) LVDS TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml b/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
index 78d060097052..059cc6dbcfca 100644
--- a/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
@@ -4,7 +4,12 @@
$id: http://devicetree.org/schemas/display/panel/sony,acx424akp.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Sony ACX424AKP 4" 480x864 AMOLED panel
+title: Sony ACX424AKP/ACX424AKM 4" 480x864/480x854 AMOLED panel
+
+description: The Sony ACX424AKP and ACX424AKM are panels built around
+ the Novatek NT35560 display controller. The only difference is that
+ the AKM is configured to use 10 pixels less in the Y axis than the
+ AKP.
maintainers:
- Linus Walleij <linus.walleij@linaro.org>
@@ -14,7 +19,9 @@ allOf:
properties:
compatible:
- const: sony,acx424akp
+ enum:
+ - sony,acx424akp
+ - sony,acx424akm
reg: true
reset-gpios: true
vddi-supply:
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 607f78f0f189..38afed24a75c 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -75,6 +75,12 @@ update it, its value is mostly useless. The DRM core prints it to the
kernel log at initialization time and passes it to userspace through the
DRM_IOCTL_VERSION ioctl.
+Module Initialization
+---------------------
+
+.. kernel-doc:: include/drm/drm_module.h
+ :doc: overview
+
Managing Ownership of the Framebuffer Aperture
----------------------------------------------
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 5bb55ec1b9b5..c3ce91eecbc1 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -232,34 +232,34 @@ HDCP Helper Functions Reference
Display Port Helper Functions Reference
=======================================
-.. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
+.. kernel-doc:: drivers/gpu/drm/dp/drm_dp.c
:doc: dp helpers
-.. kernel-doc:: include/drm/drm_dp_helper.h
+.. kernel-doc:: include/drm/dp/drm_dp_helper.h
:internal:
-.. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
+.. kernel-doc:: drivers/gpu/drm/dp/drm_dp.c
:export:
Display Port CEC Helper Functions Reference
===========================================
-.. kernel-doc:: drivers/gpu/drm/drm_dp_cec.c
+.. kernel-doc:: drivers/gpu/drm/dp/drm_dp_cec.c
:doc: dp cec helpers
-.. kernel-doc:: drivers/gpu/drm/drm_dp_cec.c
+.. kernel-doc:: drivers/gpu/drm/dp/drm_dp_cec.c
:export:
Display Port Dual Mode Adaptor Helper Functions Reference
=========================================================
-.. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
+.. kernel-doc:: drivers/gpu/drm/dp/drm_dp_dual_mode_helper.c
:doc: dp dual mode helpers
-.. kernel-doc:: include/drm/drm_dp_dual_mode_helper.h
+.. kernel-doc:: include/drm/dp/drm_dp_dual_mode_helper.h
:internal:
-.. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
+.. kernel-doc:: drivers/gpu/drm/dp/drm_dp_dual_mode_helper.c
:export:
Display Port MST Helpers
@@ -268,19 +268,19 @@ Display Port MST Helpers
Overview
--------
-.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+.. kernel-doc:: drivers/gpu/drm/dp/drm_dp_mst_topology.c
:doc: dp mst helper
-.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+.. kernel-doc:: drivers/gpu/drm/dp/drm_dp_mst_topology.c
:doc: Branch device and port refcounting
Functions Reference
-------------------
-.. kernel-doc:: include/drm/drm_dp_mst_helper.h
+.. kernel-doc:: include/drm/dp/drm_dp_mst_helper.h
:internal:
-.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+.. kernel-doc:: drivers/gpu/drm/dp/drm_dp_mst_topology.c
:export:
Topology Lifetime Internals
@@ -289,7 +289,7 @@ Topology Lifetime Internals
These functions aren't exported to drivers, but are documented here to help make
the MST topology helpers easier to understand
-.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+.. kernel-doc:: drivers/gpu/drm/dp/drm_dp_mst_topology.c
:functions: drm_dp_mst_topology_try_get_mstb drm_dp_mst_topology_get_mstb
drm_dp_mst_topology_put_mstb
drm_dp_mst_topology_try_get_port drm_dp_mst_topology_get_port
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index d14bf1c35d7e..6f9c064fd323 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -423,12 +423,12 @@ Connector Functions Reference
Writeback Connectors
--------------------
-.. kernel-doc:: include/drm/drm_writeback.h
- :internal:
-
.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
:doc: overview
+.. kernel-doc:: include/drm/drm_writeback.h
+ :internal:
+
.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
:export:
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index e0538083a2c0..198bcc1affa1 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -8,7 +8,7 @@ the very dynamic nature of many of that data, managing graphics memory
efficiently is thus crucial for the graphics stack and plays a central
role in the DRM infrastructure.
-The DRM core includes two memory managers, namely Translation Table Maps
+The DRM core includes two memory managers, namely Translation Table Manager
(TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
manager to be developed and tried to be a one-size-fits-them all
solution. It provides a single userspace API to accommodate the need of
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index b7d801993bfa..bcaefc952764 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -539,6 +539,7 @@ GuC ABI
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
HuC
---
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index c8f39c1ef1ee..ee842606e883 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -467,6 +467,21 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>
Level: Intermediate
+Request memory regions in all drivers
+-------------------------------------
+
+Go through all drivers and add code to request the memory regions that the
+driver uses. This requires adding calls to request_mem_region(),
+pci_request_region() or similar functions. Use helpers for managed cleanup
+where possible.
+
+Drivers are pretty bad at doing this and there used to be conflicts among
+DRM and fbdev drivers. Still, it's the correct thing to do.
+
+Contact: Thomas Zimmermann <tzimmermann@suse.de>
+
+Level: Starter
+
Core refactorings
=================
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
index 941f0e7e5eef..9c873c3912cc 100644
--- a/Documentation/gpu/vkms.rst
+++ b/Documentation/gpu/vkms.rst
@@ -124,8 +124,6 @@ Add Plane Features
There's lots of plane features we could add support for:
-- Multiple overlay planes. [Good to get started]
-
- Clearing primary plane: clear primary plane before plane composition (at the
start) for correctness of pixel blend ops. It also guarantees alpha channel
is cleared in the target buffer for stable crc. [Good to get started]
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 20bf5f78a362..6f5530482d83 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -55,7 +55,7 @@ static struct _ati_generic_private {
static int ati_create_page_map(struct ati_page_map *page_map)
{
- int i, err = 0;
+ int i, err;
page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
if (page_map->real == NULL)
@@ -63,6 +63,10 @@ static int ati_create_page_map(struct ati_page_map *page_map)
set_memory_uc((unsigned long)page_map->real, 1);
err = map_page_into_agp(virt_to_page(page_map->real));
+ if (err) {
+ free_page((unsigned long)page_map->real);
+ return err;
+ }
page_map->remapped = page_map->real;
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
@@ -303,7 +307,7 @@ static int ati_insert_memory(struct agp_memory * mem,
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
- writel(agp_bridge->driver->mask_memory(agp_bridge,
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
page_to_phys(mem->pages[i]),
mem->type),
cur_gatt+GET_GATT_OFF(addr));
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 004a3ce8ba72..0e19c600db53 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -62,6 +62,7 @@ EXPORT_SYMBOL(agp_find_bridge);
/**
* agp_backend_acquire - attempt to acquire an agp backend.
+ * @pdev: the PCI device
*
*/
struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
@@ -83,6 +84,7 @@ EXPORT_SYMBOL(agp_backend_acquire);
/**
* agp_backend_release - release the lock on the agp backend.
+ * @bridge: the AGP backend to release
*
* The caller must insure that the graphics aperture translation table
* is read for use by another entity.
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 00ff5fcb808a..321118a9cfa5 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -39,7 +39,9 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+
#include "agp.h"
+#include "compat_ioctl.h"
struct agp_front_data agp_fe;
@@ -1017,7 +1019,7 @@ static long agp_ioctl(struct file *file,
case AGPIOC_UNBIND:
ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
break;
-
+
case AGPIOC_CHIPSET_FLUSH:
break;
}
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index f78e756157db..826dbd06f6bb 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -261,7 +261,8 @@ static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type
static void nvidia_tlbflush(struct agp_memory *mem)
{
unsigned long end;
- u32 wbc_reg, temp;
+ u32 wbc_reg;
+ u32 __maybe_unused temp;
int i;
/* flush chipset */
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index f875970bda65..b91da5998dd7 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -262,13 +262,10 @@ static void serverworks_tlbflush(struct agp_memory *temp)
static int serverworks_configure(void)
{
- struct aper_size_info_lvl2 *current_size;
u32 temp;
u8 enable_reg;
u16 cap_reg;
- current_size = A_SIZE_LVL2(agp_bridge->current_size);
-
/* Get the memory mapped registers */
pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
@@ -350,7 +347,7 @@ static int serverworks_insert_memory(struct agp_memory *mem,
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr);
- writel(agp_bridge->driver->mask_memory(agp_bridge,
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
page_to_phys(mem->pages[i]), mem->type),
cur_gatt+GET_GATT_OFF(addr));
}
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index a460ae352772..b2f484f527fb 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -128,9 +128,6 @@ static int via_fetch_size_agp3(void)
static int via_configure_agp3(void)
{
u32 temp;
- struct aper_size_info_16 *current_size;
-
- current_size = A_SIZE_16(agp_bridge->current_size);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 4deea75c0b9c..6dd9a40b55d4 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -542,57 +542,45 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
* dma_resv_get_fences - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
- * @fence_excl: the returned exclusive fence (or NULL)
- * @shared_count: the number of shared fences returned
- * @shared: the array of shared fence ptrs returned (array is krealloc'd to
- * the required size, and must be freed by caller)
+ * @write: true if we should return all fences
+ * @num_fences: the number of fences returned
+ * @fences: the array of fence ptrs returned (array is krealloc'd to the
+ * required size, and must be freed by caller)
*
- * Retrieve all fences from the reservation object. If the pointer for the
- * exclusive fence is not specified the fence is put into the array of the
- * shared fences as well. Returns either zero or -ENOMEM.
+ * Retrieve all fences from the reservation object.
+ * Returns either zero or -ENOMEM.
*/
-int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
- unsigned int *shared_count, struct dma_fence ***shared)
+int dma_resv_get_fences(struct dma_resv *obj, bool write,
+ unsigned int *num_fences, struct dma_fence ***fences)
{
struct dma_resv_iter cursor;
struct dma_fence *fence;
- *shared_count = 0;
- *shared = NULL;
+ *num_fences = 0;
+ *fences = NULL;
- if (fence_excl)
- *fence_excl = NULL;
-
- dma_resv_iter_begin(&cursor, obj, true);
+ dma_resv_iter_begin(&cursor, obj, write);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor)) {
unsigned int count;
- while (*shared_count)
- dma_fence_put((*shared)[--(*shared_count)]);
-
- if (fence_excl)
- dma_fence_put(*fence_excl);
+ while (*num_fences)
+ dma_fence_put((*fences)[--(*num_fences)]);
- count = cursor.shared_count;
- count += fence_excl ? 0 : 1;
+ count = cursor.shared_count + 1;
/* Eventually re-allocate the array */
- *shared = krealloc_array(*shared, count,
+ *fences = krealloc_array(*fences, count,
sizeof(void *),
GFP_KERNEL);
- if (count && !*shared) {
+ if (count && !*fences) {
dma_resv_iter_end(&cursor);
return -ENOMEM;
}
}
- dma_fence_get(fence);
- if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
- *fence_excl = fence;
- else
- (*shared)[(*shared_count)++] = fence;
+ (*fences)[(*num_fences)++] = dma_fence_get(fence);
}
dma_resv_iter_end(&cursor);
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index bc32b3eedcb6..cbe999c6e7a6 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -275,7 +275,7 @@ static int test_shared_for_each_unlocked(void *arg)
static int test_get_fences(void *arg, bool shared)
{
- struct dma_fence *f, *excl = NULL, **fences = NULL;
+ struct dma_fence *f, **fences = NULL;
struct dma_resv resv;
int r, i;
@@ -304,35 +304,19 @@ static int test_get_fences(void *arg, bool shared)
}
dma_resv_unlock(&resv);
- r = dma_resv_get_fences(&resv, &excl, &i, &fences);
+ r = dma_resv_get_fences(&resv, shared, &i, &fences);
if (r) {
pr_err("get_fences failed\n");
goto err_free;
}
- if (shared) {
- if (excl != NULL) {
- pr_err("get_fences returned unexpected excl fence\n");
- goto err_free;
- }
- if (i != 1 || fences[0] != f) {
- pr_err("get_fences returned unexpected shared fence\n");
- goto err_free;
- }
- } else {
- if (excl != f) {
- pr_err("get_fences returned unexpected excl fence\n");
- goto err_free;
- }
- if (i != 0) {
- pr_err("get_fences returned unexpected shared fence\n");
- goto err_free;
- }
+ if (i != 1 || fences[0] != f) {
+ pr_err("get_fences returned unexpected fence\n");
+ goto err_free;
}
dma_fence_signal(f);
err_free:
- dma_fence_put(excl);
while (i--)
dma_fence_put(fences[i]);
kfree(fences);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index c57a609db75b..e7330684d3b8 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -190,6 +190,10 @@ static long udmabuf_create(struct miscdevice *device,
if (ubuf->pagecount > pglimit)
goto err;
}
+
+ if (!ubuf->pagecount)
+ goto err;
+
ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
GFP_KERNEL);
if (!ubuf->pages) {
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index 303a491e520d..76c4abc42a30 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -99,7 +99,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
/* setup IORESOURCE_MEM as framebuffer memory */
memset(&res, 0, sizeof(res));
- res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res.flags = IORESOURCE_MEM;
res.name = simplefb_resname;
res.start = base;
res.end = res.start + length - 1;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b1f22e457fd0..dfdd3ec5f793 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -68,6 +68,7 @@ config DRM_DEBUG_SELFTEST
depends on DRM
depends on DEBUG_KERNEL
select PRIME_NUMBERS
+ select DRM_DP_HELPER
select DRM_LIB_RANDOM
select DRM_KMS_HELPER
select DRM_EXPORT_FOR_TESTS if m
@@ -80,6 +81,12 @@ config DRM_DEBUG_SELFTEST
If in doubt, say "N".
+config DRM_DP_HELPER
+ tristate
+ depends on DRM
+ help
+ DRM helpers for DisplayPort.
+
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -198,6 +205,12 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_BUDDY
+ tristate
+ depends on DRM
+ help
+ A page based buddy allocator
+
config DRM_VRAM_HELPER
tristate
depends on DRM
@@ -236,6 +249,7 @@ config DRM_RADEON
depends on DRM && PCI && MMU
depends on AGP || !AGP
select FW_LOADER
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
@@ -256,6 +270,7 @@ config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
select FW_LOADER
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select DRM_SCHED
select DRM_TTM
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 301a44dc18e3..8675c2af7ae1 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -31,8 +31,6 @@ drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm-$(CONFIG_DRM_PRIVACY_SCREEN) += drm_privacy_screen.o drm_privacy_screen_x86.o
-obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
-
obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o
drm_cma_helper-y := drm_gem_cma_helper.o
@@ -42,27 +40,26 @@ obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o
obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o
+obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
+
drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
drm_ttm_helper-y := drm_gem_ttm_helper.o
obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
-drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
+drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o \
drm_dsc.o drm_encoder_slave.o drm_flip_work.o drm_hdcp.o \
drm_probe_helper.o \
- drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
- drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
+ drm_plane_helper.o drm_atomic_helper.o \
+ drm_kms_helper_common.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_scdc_helper.o drm_gem_atomic_helper.o \
drm_gem_framebuffer_helper.o \
drm_atomic_state_helper.o drm_damage_helper.o \
drm_format_helper.o drm_self_refresh_helper.o drm_rect.o
-
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
-drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
-drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
@@ -72,6 +69,7 @@ obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
obj-y += arm/
+obj-y += dp/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
obj-$(CONFIG_DRM_TDFX) += tdfx/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index c16a2704ced6..fa20261aa928 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -26,7 +26,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -175,7 +175,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
/* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
- if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
+ if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) &&
(mode_clock * 5/4 <= max_tmds_clock))
bpc = 10;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 06d07502a1f6..e8440d306496 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1274,14 +1274,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
/*
* Work around dma_resv shortcommings by wrapping up the
* submission in a dma_fence_chain and add it as exclusive
- * fence, but first add the submission as shared fence to make
- * sure that shared fences never signal before the exclusive
- * one.
+ * fence.
*/
dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
dma_fence_get(p->fence), 1);
- dma_resv_add_shared_fence(resv, p->fence);
rcu_assign_pointer(resv->fence_excl, &chain->base);
e->chain = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 82011e75ed85..3f21a13882a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -200,8 +200,10 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL,
- &work->shared_count, &work->shared);
+ /* TODO: Unify this with other drivers */
+ r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
+ &work->shared_count,
+ &work->shared);
if (unlikely(r != 0)) {
DRM_ERROR("failed to get fences for buffer\n");
goto unpin;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index c0d8f40a5b45..9b12cab5e606 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -226,12 +226,6 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (!amdgpu_vm_ready(vm))
goto out_unlock;
- fence = dma_resv_excl_fence(bo->tbo.base.resv);
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- fence = NULL;
- }
-
r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (r || !fence)
goto out_unlock;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 72022df264f6..e0c7fbe01d93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -167,6 +167,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
return 0;
err_free:
+ ttm_resource_fini(man, &node->base.base);
kfree(node);
err_out:
@@ -198,6 +199,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
atomic64_sub(res->num_pages, &mgr->used);
+ ttm_resource_fini(man, res);
kfree(node);
}
@@ -286,7 +288,8 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
man->use_tt = true;
man->func = &amdgpu_gtt_mgr_func;
- ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT);
+ ttm_resource_manager_init(man, &adev->mman.bdev,
+ gtt_size >> PAGE_SHIFT);
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index b7fb72bff2c1..be48487e2ca7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
unsigned count;
int r;
- r = dma_resv_get_fences(resv, NULL, &count, &fences);
+ r = dma_resv_get_fences(resv, true, &count, &fences);
if (r)
goto fallback;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 6043bf6fd414..902235fae4cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -33,7 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
@@ -44,7 +44,7 @@
#include <linux/hrtimer.h>
#include "amdgpu_irq.h"
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include "modules/inc/mod_freesync.h"
#include "amdgpu_dm_irq_params.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index 786afe4f58f9..0d85c2096ab5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -95,6 +95,7 @@ static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
atomic64_sub(res->num_pages, &mgr->used);
+ ttm_resource_fini(man, res);
kfree(res);
}
@@ -152,7 +153,7 @@ int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
man->use_tt = true;
man->func = &amdgpu_preempt_mgr_func;
- ttm_resource_manager_init(man, (1 << 30));
+ ttm_resource_manager_init(man, &adev->mman.bdev, (1 << 30));
atomic64_set(&mgr->used, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5c3f24069f2a..d178fbec7048 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2087,7 +2087,7 @@ static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
TTM_PL_VRAM);
struct drm_printer p = drm_seq_file_printer(m);
- man->func->debug(man, &p);
+ ttm_resource_manager_debug(man, &p);
return 0;
}
@@ -2105,7 +2105,7 @@ static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
TTM_PL_TT);
struct drm_printer p = drm_seq_file_printer(m);
- man->func->debug(man, &p);
+ ttm_resource_manager_debug(man, &p);
return 0;
}
@@ -2116,7 +2116,7 @@ static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
AMDGPU_PL_GDS);
struct drm_printer p = drm_seq_file_printer(m);
- man->func->debug(man, &p);
+ ttm_resource_manager_debug(man, &p);
return 0;
}
@@ -2127,7 +2127,7 @@ static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
AMDGPU_PL_GWS);
struct drm_printer p = drm_seq_file_printer(m);
- man->func->debug(man, &p);
+ ttm_resource_manager_debug(man, &p);
return 0;
}
@@ -2138,7 +2138,7 @@ static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
AMDGPU_PL_OA);
struct drm_printer p = drm_seq_file_printer(m);
- man->func->debug(man, &p);
+ ttm_resource_manager_debug(man, &p);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 7a2b487db57c..7442095f089c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -472,6 +472,7 @@ error_free:
while (i--)
drm_mm_remove_node(&node->mm_nodes[i]);
spin_unlock(&mgr->lock);
+ ttm_resource_fini(man, &node->base);
kvfree(node);
error_sub:
@@ -511,6 +512,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage);
+ ttm_resource_fini(man, res);
kvfree(node);
}
@@ -689,7 +691,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
struct ttm_resource_manager *man = &mgr->manager;
- ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
+ ttm_resource_manager_init(man, &adev->mman.bdev,
+ adev->gmc.real_vram_size >> PAGE_SHIFT);
man->func = &amdgpu_vram_mgr_func;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index f327becb022f..49a2f594fb2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -34,7 +34,7 @@
#include "atombios_dp.h"
#include "amdgpu_connectors.h"
#include "amdgpu_atombios.h"
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7f9773f8dab6..526076e4bde3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -76,7 +76,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
@@ -5856,7 +5856,7 @@ static void fill_stream_properties_from_drm_display_mode(
else if (drm_mode_is_420_also(info, mode_in)
&& aconnector->force_yuv420_output)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
- else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
else
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b9a69b0cef23..bee806ae3e52 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -29,7 +29,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_plane.h>
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index cc34a35d0bcb..35c944a8e74d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -25,8 +25,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_mst_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
index 7f25c11f4248..48a18766f002 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
@@ -27,7 +27,7 @@
#include <dc_link.h>
#include <inc/link_hwss.h>
#include <inc/link_dpcd.h>
-#include "drm/drm_dp_helper.h"
+#include <drm/dp/drm_dp_helper.h>
#include <dc_dp_types.h>
#include "dm_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 9c74564cbd8d..efc2339f1fa0 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -25,7 +25,7 @@
#include <drm/drm_dsc.h>
#include "dc_hw_types.h"
#include "dsc.h"
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include "dc.h"
#include "rc_calc.h"
#include "fixed31_32.h"
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 5df1d80c8341..17d05071b809 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -36,7 +36,7 @@
#include <asm/byteorder.h>
#include <drm/drm_print.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include "cgs_common.h"
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index ffd0df1701e6..270260e82b61 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -26,7 +26,7 @@
#ifndef __DAL_DPCD_DEFS_H__
#define __DAL_DPCD_DEFS_H__
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
#define DP_SINK_HW_REVISION_START 0x409
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 399fbca8947b..8502263d2968 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -30,7 +30,7 @@
#include "hdcp_log.h"
#include <drm/drm_hdcp.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
enum mod_hdcp_trans_input_result {
UNKNOWN = 0,
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 8a02ade369db..42510fdea27e 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -1078,11 +1078,11 @@ static void d71_improc_update(struct komeda_component *c,
mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
/* config color format */
- if (st->color_format == DRM_COLOR_FORMAT_YCRCB420)
+ if (st->color_format == DRM_COLOR_FORMAT_YCBCR420)
ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
- else if (st->color_format == DRM_COLOR_FORMAT_YCRCB422)
+ else if (st->color_format == DRM_COLOR_FORMAT_YCBCR422)
ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422;
- else if (st->color_format == DRM_COLOR_FORMAT_YCRCB444)
+ else if (st->color_format == DRM_COLOR_FORMAT_YCBCR444)
ctrl |= IPS_CTRL_YUV;
malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl);
@@ -1144,11 +1144,11 @@ static int d71_improc_init(struct d71_dev *d71,
improc = to_improc(c);
improc->supported_color_depths = BIT(8) | BIT(10);
improc->supported_color_formats = DRM_COLOR_FORMAT_RGB444 |
- DRM_COLOR_FORMAT_YCRCB444 |
- DRM_COLOR_FORMAT_YCRCB422;
+ DRM_COLOR_FORMAT_YCBCR444 |
+ DRM_COLOR_FORMAT_YCBCR422;
value = malidp_read32(reg, BLK_INFO);
if (value & IPS_INFO_CHD420)
- improc->supported_color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+ improc->supported_color_formats |= DRM_COLOR_FORMAT_YCBCR420;
improc->supports_csc = true;
improc->supports_gamma = true;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index e7933930a657..51e51ff299b7 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -9,6 +9,7 @@
#include <linux/platform_device.h>
#include <linux/component.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
@@ -198,7 +199,7 @@ static struct platform_driver komeda_platform_driver = {
},
};
-module_platform_driver(komeda_platform_driver);
+drm_module_platform_driver(komeda_platform_driver);
MODULE_AUTHOR("James.Qian.Wang <james.qian.wang@arm.com>");
MODULE_DESCRIPTION("Komeda KMS driver");
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 479c2422a2e0..e89ae0ec60eb 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -30,6 +30,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -434,7 +435,7 @@ static struct platform_driver hdlcd_platform_driver = {
},
};
-module_platform_driver(hdlcd_platform_driver);
+drm_module_platform_driver(hdlcd_platform_driver);
MODULE_AUTHOR("Liviu Dudau");
MODULE_DESCRIPTION("ARM HDLCD DRM driver");
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 78d15b04b105..d5aef21426cf 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -1008,7 +1009,7 @@ static struct platform_driver malidp_platform_driver = {
},
};
-module_platform_driver(malidp_platform_driver);
+drm_module_platform_driver(malidp_platform_driver);
MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>");
MODULE_DESCRIPTION("ARM Mali DP DRM driver");
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 6d8613f6fe1c..7465c4f0156a 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
@@ -230,22 +231,7 @@ static struct pci_driver ast_pci_driver = {
.driver.pm = &ast_pm_ops,
};
-static int __init ast_init(void)
-{
- if (drm_firmware_drivers_only() && ast_modeset == -1)
- return -EINVAL;
-
- if (ast_modeset == 0)
- return -EINVAL;
- return pci_register_driver(&ast_pci_driver);
-}
-static void __exit ast_exit(void)
-{
- pci_unregister_driver(&ast_pci_driver);
-}
-
-module_init(ast_init);
-module_exit(ast_exit);
+drm_module_pci_driver_if_modeset(ast_pci_driver, ast_modeset);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 79a361867955..22e9e2d3c71a 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -209,6 +209,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
if (ast->chip == AST2500 &&
scu_rev == 0x100) /* ast2510 */
ast->support_wide_screen = true;
+ if (ast->chip == AST2600) /* ast2600 */
+ ast->support_wide_screen = true;
}
break;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 400845d4144c..2c7115a4d81f 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -471,7 +471,10 @@ static void ast_set_color_reg(struct ast_private *ast,
static void ast_set_crtthd_reg(struct ast_private *ast)
{
/* Set Threshold */
- if (ast->chip == AST2300 || ast->chip == AST2400 ||
+ if (ast->chip == AST2600) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0);
+ } else if (ast->chip == AST2300 || ast->chip == AST2400 ||
ast->chip == AST2500) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 61db5a66b493..fcd93f1aec90 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -30,6 +30,7 @@ config DRM_CDNS_DSI
config DRM_CHIPONE_ICN6211
tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge"
depends on OF
+ depends on DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
help
@@ -183,6 +184,7 @@ config DRM_PARADE_PS8640
tristate "Parade PS8640 MIPI DSI to eDP Converter"
depends on OF
select DRM_DP_AUX_BUS
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
@@ -253,6 +255,7 @@ config DRM_TOSHIBA_TC358764
config DRM_TOSHIBA_TC358767
tristate "Toshiba TC358767 eDP bridge"
depends on OF
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_PANEL
@@ -272,6 +275,7 @@ config DRM_TOSHIBA_TC358768
config DRM_TOSHIBA_TC358775
tristate "Toshiba TC358775 DSI/LVDS bridge"
depends on OF
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_PANEL
@@ -299,6 +303,7 @@ config DRM_TI_SN65DSI83
config DRM_TI_SN65DSI86
tristate "TI SN65DSI86 DSI to eDP bridge"
depends on OF
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_PANEL
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 592ecfcf00ca..6a882891d91c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -169,6 +169,7 @@
#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
+#define ADV7535_REG_POWER2_HPD_OVERRIDE BIT(6)
#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index f8e5da148599..005bf18682ff 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -223,7 +223,7 @@ static void adv7511_set_config_csc(struct adv7511 *adv7511,
config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
if ((connector->display_info.color_formats &
- DRM_COLOR_FORMAT_YCRCB422) &&
+ DRM_COLOR_FORMAT_YCBCR422) &&
config.hdmi_mode) {
config.csc_enable = false;
config.avi_infoframe.colorspace =
@@ -351,11 +351,17 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
* from standby or are enabled. When the HPD goes low the adv7511 is
* reset and the outputs are disabled which might cause the monitor to
* go to standby again. To avoid this we ignore the HPD pin for the
- * first few seconds after enabling the output.
+ * first few seconds after enabling the output. On the other hand
+ * adv7535 require to enable HPD Override bit for proper HPD.
*/
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
- ADV7511_REG_POWER2_HPD_SRC_MASK,
- ADV7511_REG_POWER2_HPD_SRC_NONE);
+ if (adv7511->type == ADV7535)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7535_REG_POWER2_HPD_OVERRIDE,
+ ADV7535_REG_POWER2_HPD_OVERRIDE);
+ else
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_NONE);
}
static void adv7511_power_on(struct adv7511 *adv7511)
@@ -375,6 +381,10 @@ static void adv7511_power_on(struct adv7511 *adv7511)
static void __adv7511_power_off(struct adv7511 *adv7511)
{
/* TODO: setup additional power down modes */
+ if (adv7511->type == ADV7535)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7535_REG_POWER2_HPD_OVERRIDE, 0);
+
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN,
ADV7511_POWER_POWER_DOWN);
@@ -672,9 +682,14 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
status = connector_status_disconnected;
} else {
/* Renable HPD sensing */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
- ADV7511_REG_POWER2_HPD_SRC_MASK,
- ADV7511_REG_POWER2_HPD_SRC_BOTH);
+ if (adv7511->type == ADV7535)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7535_REG_POWER2_HPD_OVERRIDE,
+ ADV7535_REG_POWER2_HPD_OVERRIDE);
+ else
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_BOTH);
}
adv7511->status = status;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index eb7579dec40a..ef6270806d1d 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -29,7 +29,7 @@ static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
struct mipi_dsi_device *dsi = adv->dsi;
struct drm_display_mode *mode = &adv->curr_mode;
unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
- u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
+ static const u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
hsw = mode->hsync_end - mode->hsync_start;
hfp = mode->hsync_start - mode->hdisplay;
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
index 2ef6eb2b786c..319ba0df57be 100644
--- a/drivers/gpu/drm/bridge/analogix/Kconfig
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -3,6 +3,7 @@ config DRM_ANALOGIX_ANX6345
tristate "Analogix ANX6345 bridge"
depends on OF
select DRM_ANALOGIX_DP
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
help
@@ -14,6 +15,7 @@ config DRM_ANALOGIX_ANX6345
config DRM_ANALOGIX_ANX78XX
tristate "Analogix ANX78XX bridge"
select DRM_ANALOGIX_DP
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
help
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index e33cd077595a..94e56a2e91f2 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -22,7 +22,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 5e6a0ed39199..2768b41c48e9 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -21,7 +21,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
index fe40bab21530..e8297168bfef 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
@@ -8,7 +8,7 @@
#include <linux/regmap.h>
#include <drm/drm.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_print.h>
#include "analogix-i2c-dptx.h"
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index b7d2e4449cfa..eb590fb8e8d0 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1537,9 +1537,9 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
video->color_depth = COLOR_8;
break;
}
- if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR444)
video->color_space = COLOR_YCBCR444;
- else if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ else if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)
video->color_space = COLOR_YCBCR422;
else
video->color_space = COLOR_RGB;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index c051502d7fbf..32665203a6ae 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -10,7 +10,7 @@
#define _ANALOGIX_DP_CORE_H
#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#define DP_TIMEOUT_LOOP_COUNT 100
#define MAX_CR_LOOP 5
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 2346dbcc505f..76662fce4ce6 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -24,8 +24,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_hdcp.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -213,6 +214,65 @@ static int wait_aux_op_finish(struct anx7625_data *ctx)
return 0;
}
+static int anx7625_aux_dpcd_read(struct anx7625_data *ctx,
+ u32 address, u8 len, u8 *buf)
+{
+ struct device *dev = &ctx->client->dev;
+ int ret;
+ u8 addrh, addrm, addrl;
+ u8 cmd;
+
+ if (len > MAX_DPCD_BUFFER_SIZE) {
+ dev_err(dev, "exceed aux buffer len.\n");
+ return -EINVAL;
+ }
+
+ addrl = address & 0xFF;
+ addrm = (address >> 8) & 0xFF;
+ addrh = (address >> 16) & 0xFF;
+
+ cmd = DPCD_CMD(len, DPCD_READ);
+ cmd = ((len - 1) << 4) | 0x09;
+
+ /* Set command and length */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_COMMAND, cmd);
+
+ /* Set aux access address */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_7_0, addrl);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_15_8, addrm);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_19_16, addrh);
+
+ /* Enable aux access */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN);
+
+ if (ret < 0) {
+ dev_err(dev, "cannot access aux related register.\n");
+ return -EIO;
+ }
+
+ usleep_range(2000, 2100);
+
+ ret = wait_aux_op_finish(ctx);
+ if (ret) {
+ dev_err(dev, "aux IO error: wait aux op finish.\n");
+ return ret;
+ }
+
+ ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_BUFF_START, len, buf);
+ if (ret < 0) {
+ dev_err(dev, "read dpcd register failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int anx7625_video_mute_control(struct anx7625_data *ctx,
u8 status)
{
@@ -669,6 +729,165 @@ static int anx7625_dpi_config(struct anx7625_data *ctx)
return ret;
}
+static int anx7625_read_flash_status(struct anx7625_data *ctx)
+{
+ return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, R_RAM_CTRL);
+}
+
+static int anx7625_hdcp_key_probe(struct anx7625_data *ctx)
+{
+ int ret, val;
+ struct device *dev = &ctx->client->dev;
+ u8 ident[FLASH_BUF_LEN];
+
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ FLASH_ADDR_HIGH, 0x91);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ FLASH_ADDR_LOW, 0xA0);
+ if (ret < 0) {
+ dev_err(dev, "IO error : set key flash address.\n");
+ return ret;
+ }
+
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ FLASH_LEN_HIGH, (FLASH_BUF_LEN - 1) >> 8);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ FLASH_LEN_LOW, (FLASH_BUF_LEN - 1) & 0xFF);
+ if (ret < 0) {
+ dev_err(dev, "IO error : set key flash len.\n");
+ return ret;
+ }
+
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ R_FLASH_RW_CTRL, FLASH_READ);
+ ret |= readx_poll_timeout(anx7625_read_flash_status,
+ ctx, val,
+ ((val & FLASH_DONE) || (val < 0)),
+ 2000,
+ 2000 * 150);
+ if (ret) {
+ dev_err(dev, "flash read access fail!\n");
+ return -EIO;
+ }
+
+ ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
+ FLASH_BUF_BASE_ADDR,
+ FLASH_BUF_LEN, ident);
+ if (ret < 0) {
+ dev_err(dev, "read flash data fail!\n");
+ return -EIO;
+ }
+
+ if (ident[29] == 0xFF && ident[30] == 0xFF && ident[31] == 0xFF)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int anx7625_hdcp_key_load(struct anx7625_data *ctx)
+{
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Select HDCP 1.4 KEY */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ R_BOOT_RETRY, 0x12);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ FLASH_ADDR_HIGH, HDCP14KEY_START_ADDR >> 8);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ FLASH_ADDR_LOW, HDCP14KEY_START_ADDR & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ R_RAM_LEN_H, HDCP14KEY_SIZE >> 12);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ R_RAM_LEN_L, HDCP14KEY_SIZE >> 4);
+
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ R_RAM_ADDR_H, 0);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ R_RAM_ADDR_L, 0);
+ /* Enable HDCP 1.4 KEY load */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ R_RAM_CTRL, DECRYPT_EN | LOAD_START);
+ dev_dbg(dev, "load HDCP 1.4 key done\n");
+ return ret;
+}
+
+static int anx7625_hdcp_disable(struct anx7625_data *ctx)
+{
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ dev_dbg(dev, "disable HDCP 1.4\n");
+
+ /* Disable HDCP */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, 0xee, 0x9f);
+ /* Try auth flag */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10);
+ /* Interrupt for DRM */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01);
+ if (ret < 0)
+ dev_err(dev, "fail to disable HDCP\n");
+
+ return anx7625_write_and(ctx, ctx->i2c.tx_p0_client,
+ TX_HDCP_CTRL0, ~HARD_AUTH_EN & 0xFF);
+}
+
+static int anx7625_hdcp_enable(struct anx7625_data *ctx)
+{
+ u8 bcap;
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ ret = anx7625_hdcp_key_probe(ctx);
+ if (ret) {
+ dev_dbg(dev, "no key found, not to do hdcp\n");
+ return ret;
+ }
+
+ /* Read downstream capability */
+ anx7625_aux_dpcd_read(ctx, 0x68028, 1, &bcap);
+ if (!(bcap & 0x01)) {
+ pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap);
+ return 0;
+ }
+
+ dev_dbg(dev, "enable HDCP 1.4\n");
+
+ /* First clear HDCP state */
+ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p0_client,
+ TX_HDCP_CTRL0,
+ KSVLIST_VLD | BKSV_SRM_PASS | RE_AUTHEN);
+ usleep_range(1000, 1100);
+ /* Second clear HDCP state */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p0_client,
+ TX_HDCP_CTRL0,
+ KSVLIST_VLD | BKSV_SRM_PASS | RE_AUTHEN);
+
+ /* Set time for waiting KSVR */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p0_client,
+ SP_TX_WAIT_KSVR_TIME, 0xc8);
+ /* Set time for waiting R0 */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p0_client,
+ SP_TX_WAIT_R0_TIME, 0xb0);
+ ret |= anx7625_hdcp_key_load(ctx);
+ if (ret) {
+ pr_warn("prepare HDCP key failed.\n");
+ return ret;
+ }
+
+ ret = anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xee, 0x20);
+
+ /* Try auth flag */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10);
+ /* Interrupt for DRM */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01);
+ if (ret < 0)
+ dev_err(dev, "fail to enable HDCP\n");
+
+ return anx7625_write_or(ctx, ctx->i2c.tx_p0_client,
+ TX_HDCP_CTRL0, HARD_AUTH_EN);
+}
+
static void anx7625_dp_start(struct anx7625_data *ctx)
{
int ret;
@@ -679,6 +898,9 @@ static void anx7625_dp_start(struct anx7625_data *ctx)
return;
}
+ /* Disable HDCP */
+ anx7625_write_and(ctx, ctx->i2c.rx_p1_client, 0xee, 0x9f);
+
if (ctx->pdata.is_dpi)
ret = anx7625_dpi_config(ctx);
else
@@ -686,6 +908,10 @@ static void anx7625_dp_start(struct anx7625_data *ctx)
if (ret < 0)
DRM_DEV_ERROR(dev, "MIPI phy setup error.\n");
+
+ ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+
+ ctx->dp_en = 1;
}
static void anx7625_dp_stop(struct anx7625_data *ctx)
@@ -705,6 +931,10 @@ static void anx7625_dp_stop(struct anx7625_data *ctx)
ret |= anx7625_video_mute_control(ctx, 1);
if (ret < 0)
DRM_DEV_ERROR(dev, "IO error : mute video fail\n");
+
+ ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+
+ ctx->dp_en = 0;
}
static int sp_tx_rst_aux(struct anx7625_data *ctx)
@@ -1098,9 +1328,18 @@ static void anx7625_init_gpio(struct anx7625_data *platform)
/* Gpio for chip power enable */
platform->pdata.gpio_p_on =
devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR_OR_NULL(platform->pdata.gpio_p_on)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "no enable gpio found\n");
+ platform->pdata.gpio_p_on = NULL;
+ }
+
/* Gpio for chip reset */
platform->pdata.gpio_reset =
devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR_OR_NULL(platform->pdata.gpio_reset)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "no reset gpio found\n");
+ platform->pdata.gpio_reset = NULL;
+ }
if (platform->pdata.gpio_p_on && platform->pdata.gpio_reset) {
platform->pdata.low_power_mode = 1;
@@ -1601,9 +1840,27 @@ static int anx7625_audio_hook_plugged_cb(struct device *dev, void *data,
return 0;
}
+static int anx7625_audio_get_eld(struct device *dev, void *data,
+ u8 *buf, size_t len)
+{
+ struct anx7625_data *ctx = dev_get_drvdata(dev);
+
+ if (!ctx->connector) {
+ dev_err(dev, "connector not initial\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "audio copy eld\n");
+ memcpy(buf, ctx->connector->eld,
+ min(sizeof(ctx->connector->eld), len));
+
+ return 0;
+}
+
static const struct hdmi_codec_ops anx7625_codec_ops = {
.hw_params = anx7625_audio_hw_params,
.audio_shutdown = anx7625_audio_shutdown,
+ .get_eld = anx7625_audio_get_eld,
.get_dai_id = anx7625_hdmi_i2s_get_dai_id,
.hook_plugged_cb = anx7625_audio_hook_plugged_cb,
};
@@ -1660,7 +1917,7 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
if (!host) {
DRM_DEV_ERROR(dev, "fail to find dsi host.\n");
- return -EINVAL;
+ return -EPROBE_DEFER;
}
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
@@ -1688,6 +1945,83 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
return 0;
}
+static void hdcp_check_work_func(struct work_struct *work)
+{
+ u8 status;
+ struct delayed_work *dwork;
+ struct anx7625_data *ctx;
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ dwork = to_delayed_work(work);
+ ctx = container_of(dwork, struct anx7625_data, hdcp_work);
+ dev = &ctx->client->dev;
+
+ if (!ctx->connector) {
+ dev_err(dev, "HDCP connector is null!");
+ return;
+ }
+
+ drm_dev = ctx->connector->dev;
+ drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&ctx->hdcp_wq_lock);
+
+ status = anx7625_reg_read(ctx, ctx->i2c.tx_p0_client, 0);
+ dev_dbg(dev, "sink HDCP status check: %.02x\n", status);
+ if (status & BIT(1)) {
+ ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ drm_hdcp_update_content_protection(ctx->connector,
+ ctx->hdcp_cp);
+ dev_dbg(dev, "update CP to ENABLE\n");
+ }
+
+ mutex_unlock(&ctx->hdcp_wq_lock);
+ drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
+}
+
+static int anx7625_connector_atomic_check(struct anx7625_data *ctx,
+ struct drm_connector_state *state)
+{
+ struct device *dev = &ctx->client->dev;
+ int cp;
+
+ dev_dbg(dev, "hdcp state check\n");
+ cp = state->content_protection;
+
+ if (cp == ctx->hdcp_cp)
+ return 0;
+
+ if (cp == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (ctx->dp_en) {
+ dev_dbg(dev, "enable HDCP\n");
+ anx7625_hdcp_enable(ctx);
+
+ queue_delayed_work(ctx->hdcp_workqueue,
+ &ctx->hdcp_work,
+ msecs_to_jiffies(2000));
+ }
+ }
+
+ if (cp == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ if (ctx->hdcp_cp != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ dev_err(dev, "current CP is not ENABLED\n");
+ return -EINVAL;
+ }
+ anx7625_hdcp_disable(ctx);
+ ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+ drm_hdcp_update_content_protection(ctx->connector,
+ ctx->hdcp_cp);
+ dev_dbg(dev, "update CP to UNDESIRE\n");
+ }
+
+ if (cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ dev_err(dev, "Userspace illegal set to PROTECTION ENABLE\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int anx7625_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@@ -1902,25 +2236,57 @@ static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge,
return true;
}
-static void anx7625_bridge_enable(struct drm_bridge *bridge)
+static int anx7625_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ dev_dbg(dev, "drm bridge atomic check\n");
+
+ anx7625_bridge_mode_fixup(bridge, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+
+ return anx7625_connector_atomic_check(ctx, conn_state);
+}
+
+static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = &ctx->client->dev;
+ struct drm_connector *connector;
- DRM_DEV_DEBUG_DRIVER(dev, "drm enable\n");
+ dev_dbg(dev, "drm atomic enable\n");
+
+ if (!bridge->encoder) {
+ dev_err(dev, "Parent encoder object not found");
+ return;
+ }
+
+ connector = drm_atomic_get_new_connector_for_encoder(state->base.state,
+ bridge->encoder);
+ if (!connector)
+ return;
+
+ ctx->connector = connector;
pm_runtime_get_sync(dev);
anx7625_dp_start(ctx);
}
-static void anx7625_bridge_disable(struct drm_bridge *bridge)
+static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = &ctx->client->dev;
- DRM_DEV_DEBUG_DRIVER(dev, "drm disable\n");
+ dev_dbg(dev, "drm atomic disable\n");
+ ctx->connector = NULL;
anx7625_dp_stop(ctx);
pm_runtime_put_sync(dev);
@@ -1950,11 +2316,14 @@ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.attach = anx7625_bridge_attach,
- .disable = anx7625_bridge_disable,
.mode_valid = anx7625_bridge_mode_valid,
.mode_set = anx7625_bridge_mode_set,
- .mode_fixup = anx7625_bridge_mode_fixup,
- .enable = anx7625_bridge_enable,
+ .atomic_check = anx7625_bridge_atomic_check,
+ .atomic_enable = anx7625_bridge_atomic_enable,
+ .atomic_disable = anx7625_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.detect = anx7625_bridge_detect,
.get_edid = anx7625_bridge_get_edid,
};
@@ -1962,40 +2331,54 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
struct i2c_client *client)
{
+ int err = 0;
+
ctx->i2c.tx_p0_client = i2c_new_dummy_device(client->adapter,
TX_P0_ADDR >> 1);
- if (!ctx->i2c.tx_p0_client)
- return -ENOMEM;
+ if (IS_ERR(ctx->i2c.tx_p0_client))
+ return PTR_ERR(ctx->i2c.tx_p0_client);
ctx->i2c.tx_p1_client = i2c_new_dummy_device(client->adapter,
TX_P1_ADDR >> 1);
- if (!ctx->i2c.tx_p1_client)
+ if (IS_ERR(ctx->i2c.tx_p1_client)) {
+ err = PTR_ERR(ctx->i2c.tx_p1_client);
goto free_tx_p0;
+ }
ctx->i2c.tx_p2_client = i2c_new_dummy_device(client->adapter,
TX_P2_ADDR >> 1);
- if (!ctx->i2c.tx_p2_client)
+ if (IS_ERR(ctx->i2c.tx_p2_client)) {
+ err = PTR_ERR(ctx->i2c.tx_p2_client);
goto free_tx_p1;
+ }
ctx->i2c.rx_p0_client = i2c_new_dummy_device(client->adapter,
RX_P0_ADDR >> 1);
- if (!ctx->i2c.rx_p0_client)
+ if (IS_ERR(ctx->i2c.rx_p0_client)) {
+ err = PTR_ERR(ctx->i2c.rx_p0_client);
goto free_tx_p2;
+ }
ctx->i2c.rx_p1_client = i2c_new_dummy_device(client->adapter,
RX_P1_ADDR >> 1);
- if (!ctx->i2c.rx_p1_client)
+ if (IS_ERR(ctx->i2c.rx_p1_client)) {
+ err = PTR_ERR(ctx->i2c.rx_p1_client);
goto free_rx_p0;
+ }
ctx->i2c.rx_p2_client = i2c_new_dummy_device(client->adapter,
RX_P2_ADDR >> 1);
- if (!ctx->i2c.rx_p2_client)
+ if (IS_ERR(ctx->i2c.rx_p2_client)) {
+ err = PTR_ERR(ctx->i2c.rx_p2_client);
goto free_rx_p1;
+ }
ctx->i2c.tcpc_client = i2c_new_dummy_device(client->adapter,
TCPC_INTERFACE_ADDR >> 1);
- if (!ctx->i2c.tcpc_client)
+ if (IS_ERR(ctx->i2c.tcpc_client)) {
+ err = PTR_ERR(ctx->i2c.tcpc_client);
goto free_rx_p2;
+ }
return 0;
@@ -2012,7 +2395,7 @@ free_tx_p1:
free_tx_p0:
i2c_unregister_device(ctx->i2c.tx_p0_client);
- return -ENOMEM;
+ return err;
}
static void anx7625_unregister_i2c_dummy_clients(struct anx7625_data *ctx)
@@ -2134,6 +2517,15 @@ static int anx7625_i2c_probe(struct i2c_client *client,
anx7625_init_gpio(platform);
mutex_init(&platform->lock);
+ mutex_init(&platform->hdcp_wq_lock);
+
+ INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func);
+ platform->hdcp_workqueue = create_workqueue("hdcp workqueue");
+ if (!platform->hdcp_workqueue) {
+ dev_err(dev, "fail to create work queue\n");
+ ret = -ENOMEM;
+ goto free_platform;
+ }
platform->pdata.intp_irq = client->irq;
if (platform->pdata.intp_irq) {
@@ -2143,7 +2535,7 @@ static int anx7625_i2c_probe(struct i2c_client *client,
if (!platform->workqueue) {
DRM_DEV_ERROR(dev, "fail to create work queue\n");
ret = -ENOMEM;
- goto free_platform;
+ goto free_hdcp_wq;
}
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
@@ -2213,6 +2605,10 @@ free_wq:
if (platform->workqueue)
destroy_workqueue(platform->workqueue);
+free_hdcp_wq:
+ if (platform->hdcp_workqueue)
+ destroy_workqueue(platform->hdcp_workqueue);
+
free_platform:
kfree(platform);
@@ -2228,6 +2624,12 @@ static int anx7625_i2c_remove(struct i2c_client *client)
if (platform->pdata.intp_irq)
destroy_workqueue(platform->workqueue);
+ if (platform->hdcp_workqueue) {
+ cancel_delayed_work(&platform->hdcp_work);
+ flush_workqueue(platform->workqueue);
+ destroy_workqueue(platform->workqueue);
+ }
+
if (!platform->pdata.low_power_mode)
pm_runtime_put_sync_suspend(&client->dev);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index 3d79b6fb13c8..56165f5b254c 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -59,10 +59,23 @@
/***************************************************************/
/* Register definition of device address 0x70 */
-#define I2C_ADDR_70_DPTX 0x70
-
-#define SP_TX_LINK_BW_SET_REG 0xA0
-#define SP_TX_LANE_COUNT_SET_REG 0xA1
+#define TX_HDCP_CTRL0 0x01
+#define STORE_AN BIT(7)
+#define RX_REPEATER BIT(6)
+#define RE_AUTHEN BIT(5)
+#define SW_AUTH_OK BIT(4)
+#define HARD_AUTH_EN BIT(3)
+#define ENC_EN BIT(2)
+#define BKSV_SRM_PASS BIT(1)
+#define KSVLIST_VLD BIT(0)
+
+#define SP_TX_WAIT_R0_TIME 0x40
+#define SP_TX_WAIT_KSVR_TIME 0x42
+#define SP_TX_SYS_CTRL1_REG 0x80
+#define HDCP2TX_FW_EN BIT(4)
+
+#define SP_TX_LINK_BW_SET_REG 0xA0
+#define SP_TX_LANE_COUNT_SET_REG 0xA1
#define M_VID_0 0xC0
#define M_VID_1 0xC1
@@ -71,6 +84,12 @@
#define N_VID_1 0xC4
#define N_VID_2 0xC5
+#define KEY_START_ADDR 0x9000
+#define KEY_RESERVED 416
+
+#define HDCP14KEY_START_ADDR (KEY_START_ADDR + KEY_RESERVED)
+#define HDCP14KEY_SIZE 624
+
/***************************************************************/
/* Register definition of device address 0x72 */
#define AUX_RST 0x04
@@ -155,9 +174,43 @@
#define I2C_ADDR_7E_FLASH_CONTROLLER 0x7E
+#define R_BOOT_RETRY 0x00
+#define R_RAM_ADDR_H 0x01
+#define R_RAM_ADDR_L 0x02
+#define R_RAM_LEN_H 0x03
+#define R_RAM_LEN_L 0x04
#define FLASH_LOAD_STA 0x05
#define FLASH_LOAD_STA_CHK BIT(7)
+#define R_RAM_CTRL 0x05
+/* bit positions */
+#define FLASH_DONE BIT(7)
+#define BOOT_LOAD_DONE BIT(6)
+#define CRC_OK BIT(5)
+#define LOAD_DONE BIT(4)
+#define O_RW_DONE BIT(3)
+#define FUSE_BUSY BIT(2)
+#define DECRYPT_EN BIT(1)
+#define LOAD_START BIT(0)
+
+#define FLASH_ADDR_HIGH 0x0F
+#define FLASH_ADDR_LOW 0x10
+#define FLASH_LEN_HIGH 0x31
+#define FLASH_LEN_LOW 0x32
+#define R_FLASH_RW_CTRL 0x33
+/* bit positions */
+#define READ_DELAY_SELECT BIT(7)
+#define GENERAL_INSTRUCTION_EN BIT(6)
+#define FLASH_ERASE_EN BIT(5)
+#define RDID_READ_EN BIT(4)
+#define REMS_READ_EN BIT(3)
+#define WRITE_STATUS_EN BIT(2)
+#define FLASH_READ BIT(1)
+#define FLASH_WRITE BIT(0)
+
+#define FLASH_BUF_BASE_ADDR 0x60
+#define FLASH_BUF_LEN 0x20
+
#define XTAL_FRQ_SEL 0x3F
/* bit field positions */
#define XTAL_FRQ_SEL_POS 5
@@ -184,10 +237,15 @@
#define AP_AUX_CTRL_ADDRONLY 0x20
#define AP_AUX_BUFF_START 0x15
-#define PIXEL_CLOCK_L 0x25
-#define PIXEL_CLOCK_H 0x26
+#define PIXEL_CLOCK_L 0x25
+#define PIXEL_CLOCK_H 0x26
+
+#define AP_AUX_COMMAND 0x27 /* com+len */
+#define LENGTH_SHIFT 4
+#define DPCD_READ 0x09
+#define DPCD_WRITE 0x08
+#define DPCD_CMD(len, cmd) ((((len) - 1) << LENGTH_SHIFT) | (cmd))
-#define AP_AUX_COMMAND 0x27 /* com+len */
/* Bit 0&1: 3D video structure */
/* 0x01: frame packing, 0x02:Line alternative, 0x03:Side-by-side(full) */
#define AP_AV_STATUS 0x28
@@ -392,21 +450,29 @@ struct anx7625_data {
struct platform_device *audio_pdev;
int hpd_status;
int hpd_high_cnt;
+ int dp_en;
+ int hdcp_cp;
/* Lock for work queue */
struct mutex lock;
struct i2c_client *client;
struct anx7625_i2c_client i2c;
struct i2c_client *last_client;
+ struct timer_list hdcp_timer;
struct s_edid_data slimport_edid_p;
struct device *codec_dev;
hdmi_codec_plugged_cb plugged_cb;
struct work_struct work;
struct workqueue_struct *workqueue;
+ struct delayed_work hdcp_work;
+ struct workqueue_struct *hdcp_workqueue;
+ /* Lock for hdcp work queue */
+ struct mutex hdcp_wq_lock;
char edid_block;
struct display_timing dt;
u8 display_timing_valid;
struct drm_bridge bridge;
u8 bridge_attached;
+ struct drm_connector *connector;
struct mipi_dsi_device *dsi;
};
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
index ef8c230e0f62..de697bade05e 100644
--- a/drivers/gpu/drm/bridge/cadence/Kconfig
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_CDNS_MHDP8546
tristate "Cadence DPI/DP bridge"
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
depends on OF
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 5530fbf64f1e..ac18e15aa167 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -41,7 +41,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
@@ -1553,13 +1553,13 @@ static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
switch (fmt->color_format) {
case DRM_COLOR_FORMAT_RGB444:
- case DRM_COLOR_FORMAT_YCRCB444:
+ case DRM_COLOR_FORMAT_YCBCR444:
bpp = fmt->bpc * 3;
break;
- case DRM_COLOR_FORMAT_YCRCB422:
+ case DRM_COLOR_FORMAT_YCBCR422:
bpp = fmt->bpc * 2;
break;
- case DRM_COLOR_FORMAT_YCRCB420:
+ case DRM_COLOR_FORMAT_YCBCR420:
bpp = fmt->bpc * 3 / 2;
break;
default:
@@ -1767,8 +1767,8 @@ static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
* If YCBCR supported and stream not SD, use ITU709
* Need to handle ITU version with YCBCR420 when supported
*/
- if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 ||
- pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720)
+ if ((pxlfmt == DRM_COLOR_FORMAT_YCBCR444 ||
+ pxlfmt == DRM_COLOR_FORMAT_YCBCR422) && mode->crtc_vdisplay >= 720)
misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
@@ -1778,15 +1778,15 @@ static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
misc0 |= DP_COLOR_FORMAT_RGB;
break;
- case DRM_COLOR_FORMAT_YCRCB444:
+ case DRM_COLOR_FORMAT_YCBCR444:
pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
break;
- case DRM_COLOR_FORMAT_YCRCB422:
+ case DRM_COLOR_FORMAT_YCBCR422:
pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
break;
- case DRM_COLOR_FORMAT_YCRCB420:
+ case DRM_COLOR_FORMAT_YCBCR420:
pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
break;
default:
@@ -1882,7 +1882,7 @@ static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
if (mhdp->display_fmt.y_only)
misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
/* Use VSC SDP for Y420 */
- if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420)
+ if (pxlfmt == DRM_COLOR_FORMAT_YCBCR420)
misc1 = CDNS_DP_TEST_VSC_SDP;
cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
index c74439d0b1a7..fc77f987c835 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
@@ -17,7 +17,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
struct clk;
struct device;
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index a6151db95586..e8f36dca56b3 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -4,6 +4,7 @@
* Author: Jagan Teki <jagan@amarulasolutions.com>
*/
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_mipi_dsi.h>
@@ -30,6 +31,7 @@
struct chipone {
struct device *dev;
struct drm_bridge bridge;
+ struct drm_display_mode mode;
struct drm_bridge *panel_bridge;
struct gpio_desc *enable_gpio;
struct regulator *vdd1;
@@ -42,11 +44,6 @@ static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
return container_of(bridge, struct chipone, bridge);
}
-static struct drm_display_mode *bridge_to_mode(struct drm_bridge *bridge)
-{
- return &bridge->encoder->crtc->state->adjusted_mode;
-}
-
static inline int chipone_dsi_write(struct chipone *icn, const void *seq,
size_t len)
{
@@ -61,10 +58,11 @@ static inline int chipone_dsi_write(struct chipone *icn, const void *seq,
chipone_dsi_write(icn, d, ARRAY_SIZE(d)); \
}
-static void chipone_enable(struct drm_bridge *bridge)
+static void chipone_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct chipone *icn = bridge_to_chipone(bridge);
- struct drm_display_mode *mode = bridge_to_mode(bridge);
+ struct drm_display_mode *mode = &icn->mode;
ICN6211_DSI(icn, 0x7a, 0xc1);
@@ -114,7 +112,8 @@ static void chipone_enable(struct drm_bridge *bridge)
usleep_range(10000, 11000);
}
-static void chipone_pre_enable(struct drm_bridge *bridge)
+static void chipone_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct chipone *icn = bridge_to_chipone(bridge);
int ret;
@@ -145,7 +144,8 @@ static void chipone_pre_enable(struct drm_bridge *bridge)
usleep_range(10000, 11000);
}
-static void chipone_post_disable(struct drm_bridge *bridge)
+static void chipone_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct chipone *icn = bridge_to_chipone(bridge);
@@ -161,6 +161,15 @@ static void chipone_post_disable(struct drm_bridge *bridge)
gpiod_set_value(icn->enable_gpio, 0);
}
+static void chipone_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct chipone *icn = bridge_to_chipone(bridge);
+
+ drm_mode_copy(&icn->mode, adjusted_mode);
+}
+
static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
{
struct chipone *icn = bridge_to_chipone(bridge);
@@ -169,10 +178,14 @@ static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flag
}
static const struct drm_bridge_funcs chipone_bridge_funcs = {
- .attach = chipone_attach,
- .post_disable = chipone_post_disable,
- .pre_enable = chipone_pre_enable,
- .enable = chipone_enable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = chipone_atomic_pre_enable,
+ .atomic_enable = chipone_atomic_enable,
+ .atomic_post_disable = chipone_atomic_post_disable,
+ .mode_set = chipone_mode_set,
+ .attach = chipone_attach,
};
static int chipone_parse_dt(struct chipone *icn)
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 06b59b422c69..69288cf894b9 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -936,9 +936,6 @@ static int it66121_probe(struct i2c_client *client,
return -EPROBE_DEFER;
}
- if (!ctx->next_bridge)
- return -EPROBE_DEFER;
-
i2c_set_clientdata(client, ctx);
mutex_init(&ctx->lock);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index dafb1b47c15f..feb128a4557d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -1090,7 +1090,7 @@ static int lt9611_probe(struct i2c_client *client,
if (!lt9611)
return -ENOMEM;
- lt9611->dev = &client->dev;
+ lt9611->dev = dev;
lt9611->client = client;
lt9611->sleep = false;
@@ -1100,7 +1100,7 @@ static int lt9611_probe(struct i2c_client *client,
return PTR_ERR(lt9611->regmap);
}
- ret = lt9611_parse_dt(&client->dev, lt9611);
+ ret = lt9611_parse_dt(dev, lt9611);
if (ret) {
dev_err(dev, "failed to parse device tree\n");
return ret;
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 33f9716da0ee..3d62e6bf6892 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -860,7 +860,7 @@ static int lt9611uxc_probe(struct i2c_client *client,
if (!lt9611uxc)
return -ENOMEM;
- lt9611uxc->dev = &client->dev;
+ lt9611uxc->dev = dev;
lt9611uxc->client = client;
mutex_init(&lt9611uxc->ocm_lock);
@@ -870,7 +870,7 @@ static int lt9611uxc_probe(struct i2c_client *client,
return PTR_ERR(lt9611uxc->regmap);
}
- ret = lt9611uxc_parse_dt(&client->dev, lt9611uxc);
+ ret = lt9611uxc_parse_dt(dev, lt9611uxc);
if (ret) {
dev_err(dev, "failed to parse device tree\n");
return ret;
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index a7389a0facfb..9282e61dfbf0 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -65,7 +65,6 @@ struct nwl_dsi_transfer {
struct nwl_dsi {
struct drm_bridge bridge;
struct mipi_dsi_host dsi_host;
- struct drm_bridge *panel_bridge;
struct device *dev;
struct phy *phy;
union phy_configure_opts phy_cfg;
@@ -924,13 +923,11 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
}
- dsi->panel_bridge = panel_bridge;
- if (!dsi->panel_bridge)
+ if (!panel_bridge)
return -EPROBE_DEFER;
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
- flags);
+ return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
}
static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
@@ -1206,6 +1203,7 @@ static int nwl_dsi_probe(struct platform_device *pdev)
ret = nwl_dsi_select_input(dsi);
if (ret < 0) {
+ pm_runtime_disable(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 818704bf5e86..3f17337ee389 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -14,8 +14,8 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_dp_aux_bus.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_aux_bus.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -102,6 +102,7 @@ struct ps8640 {
struct regulator_bulk_data supplies[2];
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_powerdown;
+ struct device_link *link;
bool pre_enabled;
};
@@ -456,14 +457,36 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
return ret;
}
+ ps_bridge->link = device_link_add(bridge->dev->dev, dev, DL_FLAG_STATELESS);
+ if (!ps_bridge->link) {
+ dev_err(dev, "failed to create device link");
+ ret = -EINVAL;
+ goto err_devlink;
+ }
+
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
- &ps_bridge->bridge, flags);
+ ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ &ps_bridge->bridge, flags);
+ if (ret)
+ goto err_bridge_attach;
+
+ return 0;
+
+err_bridge_attach:
+ device_link_del(ps_bridge->link);
+err_devlink:
+ drm_dp_aux_unregister(&ps_bridge->aux);
+
+ return ret;
}
static void ps8640_bridge_detach(struct drm_bridge *bridge)
{
- drm_dp_aux_unregister(&bridge_to_ps8640(bridge)->aux);
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+
+ drm_dp_aux_unregister(&ps_bridge->aux);
+ if (ps_bridge->link)
+ device_link_del(ps_bridge->link);
}
static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 89558e581530..65549fbfdc87 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -166,10 +166,12 @@ struct sii902x {
struct i2c_client *i2c;
struct regmap *regmap;
struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
struct drm_connector connector;
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
struct regulator_bulk_data supplies[2];
+ bool sink_is_hdmi;
/*
* Mutex protects audio and video functions from interfering
* each other, by keeping their i2c command sequences atomic.
@@ -245,10 +247,8 @@ static void sii902x_reset(struct sii902x *sii902x)
gpiod_set_value(sii902x->reset_gpio, 0);
}
-static enum drm_connector_status
-sii902x_connector_detect(struct drm_connector *connector, bool force)
+static enum drm_connector_status sii902x_detect(struct sii902x *sii902x)
{
- struct sii902x *sii902x = connector_to_sii902x(connector);
unsigned int status;
mutex_lock(&sii902x->mutex);
@@ -261,6 +261,14 @@ sii902x_connector_detect(struct drm_connector *connector, bool force)
connector_status_connected : connector_status_disconnected;
}
+static enum drm_connector_status
+sii902x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct sii902x *sii902x = connector_to_sii902x(connector);
+
+ return sii902x_detect(sii902x);
+}
+
static const struct drm_connector_funcs sii902x_connector_funcs = {
.detect = sii902x_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -270,42 +278,40 @@ static const struct drm_connector_funcs sii902x_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int sii902x_get_modes(struct drm_connector *connector)
+static struct edid *sii902x_get_edid(struct sii902x *sii902x,
+ struct drm_connector *connector)
{
- struct sii902x *sii902x = connector_to_sii902x(connector);
- u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI;
struct edid *edid;
- int num = 0, ret;
mutex_lock(&sii902x->mutex);
edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]);
- drm_connector_update_edid_property(connector, edid);
if (edid) {
if (drm_detect_hdmi_monitor(edid))
- output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI;
-
- num = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ sii902x->sink_is_hdmi = true;
+ else
+ sii902x->sink_is_hdmi = false;
}
- ret = drm_display_info_set_bus_formats(&connector->display_info,
- &bus_format, 1);
- if (ret)
- goto error_out;
+ mutex_unlock(&sii902x->mutex);
- ret = regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
- SII902X_SYS_CTRL_OUTPUT_MODE, output_mode);
- if (ret)
- goto error_out;
+ return edid;
+}
- ret = num;
+static int sii902x_get_modes(struct drm_connector *connector)
+{
+ struct sii902x *sii902x = connector_to_sii902x(connector);
+ struct edid *edid;
+ int num = 0;
-error_out:
- mutex_unlock(&sii902x->mutex);
+ edid = sii902x_get_edid(sii902x, connector);
+ drm_connector_update_edid_property(connector, edid);
+ if (edid) {
+ num = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
- return ret;
+ return num;
}
static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector,
@@ -354,12 +360,16 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *adj)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI;
struct regmap *regmap = sii902x->regmap;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame;
u16 pixel_clock_10kHz = adj->clock / 10;
int ret;
+ if (sii902x->sink_is_hdmi)
+ output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI;
+
buf[0] = pixel_clock_10kHz & 0xff;
buf[1] = pixel_clock_10kHz >> 8;
buf[2] = drm_mode_vrefresh(adj);
@@ -375,6 +385,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
mutex_lock(&sii902x->mutex);
+ ret = regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_OUTPUT_MODE, output_mode);
+ if (ret)
+ goto out;
+
ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10);
if (ret)
goto out;
@@ -405,13 +420,13 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
struct drm_device *drm = bridge->dev;
int ret;
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return drm_bridge_attach(bridge->encoder, sii902x->next_bridge,
+ bridge, flags);
drm_connector_helper_add(&sii902x->connector,
&sii902x_connector_helper_funcs);
@@ -433,16 +448,38 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
else
sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
+ ret = drm_display_info_set_bus_formats(&sii902x->connector.display_info,
+ &bus_format, 1);
+ if (ret)
+ return ret;
+
drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
return 0;
}
+static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+
+ return sii902x_detect(sii902x);
+}
+
+static struct edid *sii902x_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+
+ return sii902x_get_edid(sii902x, connector);
+}
+
static const struct drm_bridge_funcs sii902x_bridge_funcs = {
.attach = sii902x_bridge_attach,
.mode_set = sii902x_bridge_mode_set,
.disable = sii902x_bridge_disable,
.enable = sii902x_bridge_enable,
+ .detect = sii902x_bridge_detect,
+ .get_edid = sii902x_bridge_get_edid,
};
static int sii902x_mute(struct sii902x *sii902x, bool mute)
@@ -829,8 +866,12 @@ static irqreturn_t sii902x_interrupt(int irq, void *data)
mutex_unlock(&sii902x->mutex);
- if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev)
+ if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev) {
drm_helper_hpd_irq_event(sii902x->bridge.dev);
+ drm_bridge_hpd_notify(&sii902x->bridge, (status & SII902X_PLUGGED_STATUS)
+ ? connector_status_connected
+ : connector_status_disconnected);
+ }
return IRQ_HANDLED;
}
@@ -1001,6 +1042,11 @@ static int sii902x_init(struct sii902x *sii902x)
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
+ sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+
+ if (sii902x->i2c->irq > 0)
+ sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD;
+
drm_bridge_add(&sii902x->bridge);
sii902x_audio_codec_init(sii902x, dev);
@@ -1022,6 +1068,7 @@ static int sii902x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
+ struct device_node *endpoint;
struct sii902x *sii902x;
int ret;
@@ -1049,6 +1096,28 @@ static int sii902x_probe(struct i2c_client *client,
return PTR_ERR(sii902x->reset_gpio);
}
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
+ if (endpoint) {
+ struct device_node *remote = of_graph_get_remote_port_parent(endpoint);
+
+ of_node_put(endpoint);
+ if (!remote) {
+ dev_err(dev, "Endpoint in port@1 unconnected\n");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(remote)) {
+ dev_err(dev, "port@1 remote device is disabled\n");
+ of_node_put(remote);
+ return -ENODEV;
+ }
+
+ sii902x->next_bridge = of_drm_find_bridge(remote);
+ of_node_put(remote);
+ if (!sii902x->next_bridge)
+ return -EPROBE_DEFER;
+ }
+
mutex_init(&sii902x->mutex);
sii902x->supplies[0].supply = "iovcc";
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 843265d7f1b1..ec7745c31da0 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2120,7 +2120,7 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
if (ret) {
dev_err(ctx->dev, "Failed to register RC device\n");
ctx->error = ret;
- rc_free_device(ctx->rc_dev);
+ rc_free_device(rc_dev);
return;
}
ctx->rc_dev = rc_dev;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 54d8fdad395f..b0d8110dd412 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2540,7 +2540,7 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
struct drm_display_mode *mode = &crtc_state->mode;
u8 max_bpc = conn_state->max_requested_bpc;
bool is_hdmi2_sink = info->hdmi.scdc.supported ||
- (info->color_formats & DRM_COLOR_FORMAT_YCRCB420);
+ (info->color_formats & DRM_COLOR_FORMAT_YCBCR420);
u32 *output_fmts;
unsigned int i = 0;
@@ -2594,36 +2594,36 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
*/
if (max_bpc >= 16 && info->bpc == 16) {
- if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR444)
output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
output_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
}
if (max_bpc >= 12 && info->bpc >= 12) {
- if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422)
output_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
- if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR444)
output_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
output_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
}
if (max_bpc >= 10 && info->bpc >= 10) {
- if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422)
output_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
- if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR444)
output_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
output_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
}
- if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422)
output_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
- if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR444)
output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
/* Default 8bit RGB fallback */
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index e44e18a0112a..11d20b8638cd 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -871,7 +871,8 @@ static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
dsi_write(dsi, DSI_INT_MSK1, 0);
}
-static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
+static void dw_mipi_dsi_bridge_post_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
@@ -978,7 +979,8 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
dw_mipi_dsi_mode_set(dsi->slave, adjusted_mode);
}
-static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
+static void dw_mipi_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
@@ -998,7 +1000,10 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
enum drm_mode_status mode_status = MODE_OK;
if (pdata->mode_valid)
- mode_status = pdata->mode_valid(pdata->priv_data, mode);
+ mode_status = pdata->mode_valid(pdata->priv_data, mode,
+ dsi->mode_flags,
+ dw_mipi_dsi_get_lanes(dsi),
+ dsi->format);
return mode_status;
}
@@ -1032,11 +1037,14 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
}
static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
- .mode_set = dw_mipi_dsi_bridge_mode_set,
- .enable = dw_mipi_dsi_bridge_enable,
- .post_disable = dw_mipi_dsi_bridge_post_disable,
- .mode_valid = dw_mipi_dsi_bridge_mode_valid,
- .attach = dw_mipi_dsi_bridge_attach,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = dw_mipi_dsi_bridge_atomic_enable,
+ .atomic_post_disable = dw_mipi_dsi_bridge_post_atomic_disable,
+ .mode_set = dw_mipi_dsi_bridge_mode_set,
+ .mode_valid = dw_mipi_dsi_bridge_mode_valid,
+ .attach = dw_mipi_dsi_bridge_attach,
};
#ifdef CONFIG_DEBUG_FS
@@ -1199,6 +1207,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
ret = mipi_dsi_host_register(&dsi->dsi_host);
if (ret) {
dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ pm_runtime_disable(dev);
dw_mipi_dsi_debugfs_remove(dsi);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 23a6f90b694b..c23e0abc65e8 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -27,7 +27,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 2c76331b251d..695af3badcc7 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -22,7 +22,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -241,7 +241,7 @@ static inline u32 TC358775_LVCFG_PCLKDIV(uint32_t val)
}
#define TC358775_LVCFG_LVDLINK__MASK 0x00000002
-#define TC358775_LVCFG_LVDLINK__SHIFT 0
+#define TC358775_LVCFG_LVDLINK__SHIFT 1
static inline u32 TC358775_LVCFG_LVDLINK(uint32_t val)
{
return ((val) << TC358775_LVCFG_LVDLINK__SHIFT) &
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 945f08de45f1..19daaddd29a4 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -33,6 +33,7 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -143,6 +144,7 @@ struct sn65dsi83 {
struct mipi_dsi_device *dsi;
struct drm_bridge *panel_bridge;
struct gpio_desc *enable_gpio;
+ struct regulator *vcc;
int dsi_lanes;
bool lvds_dual_link;
bool lvds_dual_link_even_odd_swap;
@@ -337,6 +339,12 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
u16 val;
int ret;
+ ret = regulator_enable(ctx->vcc);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to enable vcc: %d\n", ret);
+ return;
+ }
+
/* Deassert reset */
gpiod_set_value(ctx->enable_gpio, 1);
usleep_range(1000, 1100);
@@ -486,11 +494,16 @@ static void sn65dsi83_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ int ret;
/* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */
gpiod_set_value(ctx->enable_gpio, 0);
usleep_range(10000, 11000);
+ ret = regulator_disable(ctx->vcc);
+ if (ret)
+ dev_err(ctx->dev, "Failed to disable vcc: %d\n", ret);
+
regcache_mark_dirty(ctx->regmap);
}
@@ -560,10 +573,14 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
ctx->host_node = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
- if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4)
- return -EINVAL;
- if (!ctx->host_node)
- return -ENODEV;
+ if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) {
+ ret = -EINVAL;
+ goto err_put_node;
+ }
+ if (!ctx->host_node) {
+ ret = -ENODEV;
+ goto err_put_node;
+ }
ctx->lvds_dual_link = false;
ctx->lvds_dual_link_even_odd_swap = false;
@@ -590,16 +607,27 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
if (ret < 0)
- return ret;
+ goto err_put_node;
if (panel) {
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(panel_bridge))
- return PTR_ERR(panel_bridge);
+ if (IS_ERR(panel_bridge)) {
+ ret = PTR_ERR(panel_bridge);
+ goto err_put_node;
+ }
}
ctx->panel_bridge = panel_bridge;
+ ctx->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(ctx->vcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->vcc),
+ "Failed to get supply 'vcc'\n");
+
return 0;
+
+err_put_node:
+ of_node_put(ctx->host_node);
+ return ret;
}
static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
@@ -662,7 +690,8 @@ static int sn65dsi83_probe(struct i2c_client *client,
}
/* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */
- ctx->enable_gpio = devm_gpiod_get(ctx->dev, "enable", GPIOD_OUT_LOW);
+ ctx->enable_gpio = devm_gpiod_get_optional(ctx->dev, "enable",
+ GPIOD_OUT_LOW);
if (IS_ERR(ctx->enable_gpio))
return PTR_ERR(ctx->enable_gpio);
@@ -673,8 +702,10 @@ static int sn65dsi83_probe(struct i2c_client *client,
return ret;
ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config);
- if (IS_ERR(ctx->regmap))
- return PTR_ERR(ctx->regmap);
+ if (IS_ERR(ctx->regmap)) {
+ ret = PTR_ERR(ctx->regmap);
+ goto err_put_node;
+ }
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
@@ -691,6 +722,8 @@ static int sn65dsi83_probe(struct i2c_client *client,
err_remove_bridge:
drm_bridge_remove(&ctx->bridge);
+err_put_node:
+ of_node_put(ctx->host_node);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index dab8f76618f3..ba136a188be7 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -26,8 +26,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_dp_aux_bus.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_aux_bus.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/dp/Makefile b/drivers/gpu/drm/dp/Makefile
new file mode 100644
index 000000000000..75faffc706b1
--- /dev/null
+++ b/drivers/gpu/drm/dp/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: MIT
+
+obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
+
+drm_dp_helper-y := drm_dp.o drm_dp_dual_mode_helper.o drm_dp_helper_mod.o drm_dp_mst_topology.o
+drm_dp_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
+drm_dp_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
+
+obj-$(CONFIG_DRM_DP_HELPER) += drm_dp_helper.o
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/dp/drm_dp.c
index 4a5527cbd448..a20b0f8f24b8 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/dp/drm_dp.c
@@ -29,13 +29,13 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_panel.h>
-#include "drm_crtc_helper_internal.h"
+#include "drm_dp_helper_internal.h"
struct dp_aux_backlight {
struct backlight_device *base;
diff --git a/drivers/gpu/drm/drm_dp_aux_bus.c b/drivers/gpu/drm/dp/drm_dp_aux_bus.c
index 298ea7a49591..415afce3cf96 100644
--- a/drivers/gpu/drm/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/dp/drm_dp_aux_bus.c
@@ -19,8 +19,8 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_dp_aux_bus.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_aux_bus.h>
+#include <drm/dp/drm_dp_helper.h>
/**
* dp_aux_ep_match() - The match function for the dp_aux_bus.
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/dp/drm_dp_aux_dev.c
index 06b374cae956..53ad4e72790b 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/dp/drm_dp_aux_dev.c
@@ -36,11 +36,11 @@
#include <linux/uio.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_print.h>
-#include "drm_crtc_helper_internal.h"
+#include "drm_dp_helper_internal.h"
struct drm_dp_aux_dev {
unsigned index;
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/dp/drm_dp_cec.c
index 3ab2609f9ec7..f9e927355879 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/dp/drm_dp_cec.c
@@ -13,7 +13,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
/*
* Unfortunately it turns out that we have a chicken-and-egg situation
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/dp/drm_dp_dual_mode_helper.c
index 9faf49354cab..2049cb0f7ed0 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/dp/drm_dp_dual_mode_helper.c
@@ -28,7 +28,7 @@
#include <linux/string.h>
#include <drm/drm_device.h>
-#include <drm/drm_dp_dual_mode_helper.h>
+#include <drm/dp/drm_dp_dual_mode_helper.h>
#include <drm/drm_print.h>
/**
diff --git a/drivers/gpu/drm/dp/drm_dp_helper_internal.h b/drivers/gpu/drm/dp/drm_dp_helper_internal.h
new file mode 100644
index 000000000000..8917fc3af9ec
--- /dev/null
+++ b/drivers/gpu/drm/dp/drm_dp_helper_internal.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_DP_HELPER_INTERNAL_H
+#define DRM_DP_HELPER_INTERNAL_H
+
+struct drm_dp_aux;
+
+#ifdef CONFIG_DRM_DP_AUX_CHARDEV
+int drm_dp_aux_dev_init(void);
+void drm_dp_aux_dev_exit(void);
+int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
+void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
+#else
+static inline int drm_dp_aux_dev_init(void)
+{
+ return 0;
+}
+
+static inline void drm_dp_aux_dev_exit(void)
+{
+}
+
+static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
+{
+ return 0;
+}
+
+static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/dp/drm_dp_helper_mod.c b/drivers/gpu/drm/dp/drm_dp_helper_mod.c
new file mode 100644
index 000000000000..db753de24000
--- /dev/null
+++ b/drivers/gpu/drm/dp/drm_dp_helper_mod.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: MIT
+
+#include <linux/module.h>
+
+#include "drm_dp_helper_internal.h"
+
+MODULE_DESCRIPTION("DRM DisplayPort helper");
+MODULE_LICENSE("GPL and additional rights");
+
+static int __init drm_dp_helper_module_init(void)
+{
+ return drm_dp_aux_dev_init();
+}
+
+static void __exit drm_dp_helper_module_exit(void)
+{
+ /* Call exit functions from specific dp helpers here */
+ drm_dp_aux_dev_exit();
+}
+
+module_init(drm_dp_helper_module_init);
+module_exit(drm_dp_helper_module_exit);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/dp/drm_dp_mst_topology.c
index 8b3822142fed..11300b53d24f 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/dp/drm_dp_mst_topology.c
@@ -38,14 +38,14 @@
#include <linux/math64.h>
#endif
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "drm_crtc_helper_internal.h"
+#include "drm_dp_helper_internal.h"
#include "drm_dp_mst_topology_internal.h"
/**
@@ -4196,7 +4196,7 @@ int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handl
int ret = 0;
int sc;
*handled = false;
- sc = esi[0] & 0x3f;
+ sc = DP_GET_SINK_COUNT(esi[0]);
if (sc != mgr->sink_count) {
mgr->sink_count = sc;
@@ -4811,7 +4811,7 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m,
seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
list_for_each_entry(port, &mstb->ports, next) {
- seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
+ seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
prefix,
port->port_num,
port,
diff --git a/drivers/gpu/drm/drm_dp_mst_topology_internal.h b/drivers/gpu/drm/dp/drm_dp_mst_topology_internal.h
index eeda9a61c657..401953b59d45 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology_internal.h
+++ b/drivers/gpu/drm/dp/drm_dp_mst_topology_internal.h
@@ -10,7 +10,7 @@
#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_
#define _DRM_DP_MST_HELPER_INTERNAL_H_
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
void
drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
new file mode 100644
index 000000000000..d60878bc9c20
--- /dev/null
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/kmemleak.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+
+#include <drm/drm_buddy.h>
+
+static struct kmem_cache *slab_blocks;
+
+static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
+ struct drm_buddy_block *parent,
+ unsigned int order,
+ u64 offset)
+{
+ struct drm_buddy_block *block;
+
+ BUG_ON(order > DRM_BUDDY_MAX_ORDER);
+
+ block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
+ if (!block)
+ return NULL;
+
+ block->header = offset;
+ block->header |= order;
+ block->parent = parent;
+
+ BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
+ return block;
+}
+
+static void drm_block_free(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ kmem_cache_free(slab_blocks, block);
+}
+
+static void mark_allocated(struct drm_buddy_block *block)
+{
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_ALLOCATED;
+
+ list_del(&block->link);
+}
+
+static void mark_free(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_FREE;
+
+ list_add(&block->link,
+ &mm->free_list[drm_buddy_block_order(block)]);
+}
+
+static void mark_split(struct drm_buddy_block *block)
+{
+ block->header &= ~DRM_BUDDY_HEADER_STATE;
+ block->header |= DRM_BUDDY_SPLIT;
+
+ list_del(&block->link);
+}
+
+/**
+ * drm_buddy_init - init memory manager
+ *
+ * @mm: DRM buddy manager to initialize
+ * @size: size in bytes to manage
+ * @chunk_size: minimum page size in bytes for our allocations
+ *
+ * Initializes the memory manager and its resources.
+ *
+ * Returns:
+ * 0 on success, error code on failure.
+ */
+int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
+{
+ unsigned int i;
+ u64 offset;
+
+ if (size < chunk_size)
+ return -EINVAL;
+
+ if (chunk_size < PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(chunk_size))
+ return -EINVAL;
+
+ size = round_down(size, chunk_size);
+
+ mm->size = size;
+ mm->avail = size;
+ mm->chunk_size = chunk_size;
+ mm->max_order = ilog2(size) - ilog2(chunk_size);
+
+ BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
+
+ mm->free_list = kmalloc_array(mm->max_order + 1,
+ sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!mm->free_list)
+ return -ENOMEM;
+
+ for (i = 0; i <= mm->max_order; ++i)
+ INIT_LIST_HEAD(&mm->free_list[i]);
+
+ mm->n_roots = hweight64(size);
+
+ mm->roots = kmalloc_array(mm->n_roots,
+ sizeof(struct drm_buddy_block *),
+ GFP_KERNEL);
+ if (!mm->roots)
+ goto out_free_list;
+
+ offset = 0;
+ i = 0;
+
+ /*
+ * Split into power-of-two blocks, in case we are given a size that is
+ * not itself a power-of-two.
+ */
+ do {
+ struct drm_buddy_block *root;
+ unsigned int order;
+ u64 root_size;
+
+ root_size = rounddown_pow_of_two(size);
+ order = ilog2(root_size) - ilog2(chunk_size);
+
+ root = drm_block_alloc(mm, NULL, order, offset);
+ if (!root)
+ goto out_free_roots;
+
+ mark_free(mm, root);
+
+ BUG_ON(i > mm->max_order);
+ BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
+
+ mm->roots[i] = root;
+
+ offset += root_size;
+ size -= root_size;
+ i++;
+ } while (size);
+
+ return 0;
+
+out_free_roots:
+ while (i--)
+ drm_block_free(mm, mm->roots[i]);
+ kfree(mm->roots);
+out_free_list:
+ kfree(mm->free_list);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_buddy_init);
+
+/**
+ * drm_buddy_fini - tear down the memory manager
+ *
+ * @mm: DRM buddy manager to free
+ *
+ * Cleanup memory manager resources and the freelist
+ */
+void drm_buddy_fini(struct drm_buddy *mm)
+{
+ int i;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
+ drm_block_free(mm, mm->roots[i]);
+ }
+
+ WARN_ON(mm->avail != mm->size);
+
+ kfree(mm->roots);
+ kfree(mm->free_list);
+}
+EXPORT_SYMBOL(drm_buddy_fini);
+
+static int split_block(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ unsigned int block_order = drm_buddy_block_order(block) - 1;
+ u64 offset = drm_buddy_block_offset(block);
+
+ BUG_ON(!drm_buddy_block_is_free(block));
+ BUG_ON(!drm_buddy_block_order(block));
+
+ block->left = drm_block_alloc(mm, block, block_order, offset);
+ if (!block->left)
+ return -ENOMEM;
+
+ block->right = drm_block_alloc(mm, block, block_order,
+ offset + (mm->chunk_size << block_order));
+ if (!block->right) {
+ drm_block_free(mm, block->left);
+ return -ENOMEM;
+ }
+
+ mark_free(mm, block->left);
+ mark_free(mm, block->right);
+
+ mark_split(block);
+
+ return 0;
+}
+
+static struct drm_buddy_block *
+get_buddy(struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *parent;
+
+ parent = block->parent;
+ if (!parent)
+ return NULL;
+
+ if (parent->left == block)
+ return parent->right;
+
+ return parent->left;
+}
+
+static void __drm_buddy_free(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *parent;
+
+ while ((parent = block->parent)) {
+ struct drm_buddy_block *buddy;
+
+ buddy = get_buddy(block);
+
+ if (!drm_buddy_block_is_free(buddy))
+ break;
+
+ list_del(&buddy->link);
+
+ drm_block_free(mm, block);
+ drm_block_free(mm, buddy);
+
+ block = parent;
+ }
+
+ mark_free(mm, block);
+}
+
+/**
+ * drm_buddy_free_block - free a block
+ *
+ * @mm: DRM buddy manager
+ * @block: block to be freed
+ */
+void drm_buddy_free_block(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ BUG_ON(!drm_buddy_block_is_allocated(block));
+ mm->avail += drm_buddy_block_size(mm, block);
+ __drm_buddy_free(mm, block);
+}
+EXPORT_SYMBOL(drm_buddy_free_block);
+
+/**
+ * drm_buddy_free_list - free blocks
+ *
+ * @mm: DRM buddy manager
+ * @objects: input list head to free blocks
+ */
+void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
+{
+ struct drm_buddy_block *block, *on;
+
+ list_for_each_entry_safe(block, on, objects, link) {
+ drm_buddy_free_block(mm, block);
+ cond_resched();
+ }
+ INIT_LIST_HEAD(objects);
+}
+EXPORT_SYMBOL(drm_buddy_free_list);
+
+/**
+ * drm_buddy_alloc_blocks - allocate power-of-two blocks
+ *
+ * @mm: DRM buddy manager to allocate from
+ * @order: size of the allocation
+ *
+ * The order value here translates to:
+ *
+ * 0 = 2^0 * mm->chunk_size
+ * 1 = 2^1 * mm->chunk_size
+ * 2 = 2^2 * mm->chunk_size
+ *
+ * Returns:
+ * allocated ptr to the &drm_buddy_block on success
+ */
+struct drm_buddy_block *
+drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order)
+{
+ struct drm_buddy_block *block = NULL;
+ unsigned int i;
+ int err;
+
+ for (i = order; i <= mm->max_order; ++i) {
+ block = list_first_entry_or_null(&mm->free_list[i],
+ struct drm_buddy_block,
+ link);
+ if (block)
+ break;
+ }
+
+ if (!block)
+ return ERR_PTR(-ENOSPC);
+
+ BUG_ON(!drm_buddy_block_is_free(block));
+
+ while (i != order) {
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto out_free;
+
+ /* Go low */
+ block = block->left;
+ i--;
+ }
+
+ mark_allocated(block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ kmemleak_update_trace(block);
+ return block;
+
+out_free:
+ if (i != order)
+ __drm_buddy_free(mm, block);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(drm_buddy_alloc_blocks);
+
+static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= e2 && e1 >= s2;
+}
+
+static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= s2 && e1 >= e2;
+}
+
+/**
+ * drm_buddy_alloc_range - allocate range
+ *
+ * @mm: DRM buddy manager to allocate from
+ * @blocks: output list head to add allocated blocks
+ * @start: start of the allowed range for this block
+ * @size: size of the allocation
+ *
+ * Intended for pre-allocating portions of the address space, for example to
+ * reserve a block for the initial framebuffer or similar, hence the expectation
+ * here is that drm_buddy_alloc_blocks() is still the main vehicle for
+ * allocations, so if that's not the case then the drm_mm range allocator is
+ * probably a much better fit, and so you should probably go use that instead.
+ *
+ * Note that it's safe to chain together multiple alloc_ranges
+ * with the same blocks list
+ *
+ * Returns:
+ * 0 on success, error code on failure.
+ */
+int drm_buddy_alloc_range(struct drm_buddy *mm,
+ struct list_head *blocks,
+ u64 start, u64 size)
+{
+ struct drm_buddy_block *block;
+ struct drm_buddy_block *buddy;
+ LIST_HEAD(allocated);
+ LIST_HEAD(dfs);
+ u64 end;
+ int err;
+ int i;
+
+ if (size < mm->chunk_size)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(size | start, mm->chunk_size))
+ return -EINVAL;
+
+ if (range_overflows(start, size, mm->size))
+ return -EINVAL;
+
+ for (i = 0; i < mm->n_roots; ++i)
+ list_add_tail(&mm->roots[i]->tmp_link, &dfs);
+
+ end = start + size - 1;
+
+ do {
+ u64 block_start;
+ u64 block_end;
+
+ block = list_first_entry_or_null(&dfs,
+ struct drm_buddy_block,
+ tmp_link);
+ if (!block)
+ break;
+
+ list_del(&block->tmp_link);
+
+ block_start = drm_buddy_block_offset(block);
+ block_end = block_start + drm_buddy_block_size(mm, block) - 1;
+
+ if (!overlaps(start, end, block_start, block_end))
+ continue;
+
+ if (drm_buddy_block_is_allocated(block)) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
+ if (contains(start, end, block_start, block_end)) {
+ if (!drm_buddy_block_is_free(block)) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
+ mark_allocated(block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ list_add_tail(&block->link, &allocated);
+ continue;
+ }
+
+ if (!drm_buddy_block_is_split(block)) {
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto err_undo;
+ }
+
+ list_add(&block->right->tmp_link, &dfs);
+ list_add(&block->left->tmp_link, &dfs);
+ } while (1);
+
+ list_splice_tail(&allocated, blocks);
+ return 0;
+
+err_undo:
+ /*
+ * We really don't want to leave around a bunch of split blocks, since
+ * bigger is better, so make sure we merge everything back before we
+ * free the allocated blocks.
+ */
+ buddy = get_buddy(block);
+ if (buddy &&
+ (drm_buddy_block_is_free(block) &&
+ drm_buddy_block_is_free(buddy)))
+ __drm_buddy_free(mm, block);
+
+err_free:
+ drm_buddy_free_list(mm, &allocated);
+ return err;
+}
+EXPORT_SYMBOL(drm_buddy_alloc_range);
+
+/**
+ * drm_buddy_block_print - print block information
+ *
+ * @mm: DRM buddy manager
+ * @block: DRM buddy block
+ * @p: DRM printer to use
+ */
+void drm_buddy_block_print(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ struct drm_printer *p)
+{
+ u64 start = drm_buddy_block_offset(block);
+ u64 size = drm_buddy_block_size(mm, block);
+
+ drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
+}
+EXPORT_SYMBOL(drm_buddy_block_print);
+
+/**
+ * drm_buddy_print - print allocator state
+ *
+ * @mm: DRM buddy manager
+ * @p: DRM printer to use
+ */
+void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
+{
+ int order;
+
+ drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
+ mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
+
+ for (order = mm->max_order; order >= 0; order--) {
+ struct drm_buddy_block *block;
+ u64 count = 0, free;
+
+ list_for_each_entry(block, &mm->free_list[order], link) {
+ BUG_ON(!drm_buddy_block_is_free(block));
+ count++;
+ }
+
+ drm_printf(p, "order-%d ", order);
+
+ free = count * (mm->chunk_size << order);
+ if (free < SZ_1M)
+ drm_printf(p, "free: %lluKiB", free >> 10);
+ else
+ drm_printf(p, "free: %lluMiB", free >> 20);
+
+ drm_printf(p, ", pages: %llu\n", count);
+ }
+}
+EXPORT_SYMBOL(drm_buddy_print);
+
+static void drm_buddy_module_exit(void)
+{
+ kmem_cache_destroy(slab_blocks);
+}
+
+static int __init drm_buddy_module_init(void)
+{
+ slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
+ if (!slab_blocks)
+ return -ENOMEM;
+
+ return 0;
+}
+
+module_init(drm_buddy_module_init);
+module_exit(drm_buddy_module_exit);
+
+MODULE_DESCRIPTION("DRM Buddy Allocator");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index bb14f488c8f6..9079fbe21d2f 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -82,6 +82,10 @@
* driver boot-up state too. Drivers can access this blob through
* &drm_crtc_state.gamma_lut.
*
+ * Note that for mostly historical reasons stemming from Xorg heritage,
+ * this is also used to store the color map (also sometimes color lut, CLUT
+ * or color palette) for indexed formats like DRM_FORMAT_C8.
+ *
* “GAMMA_LUT_SIZE”:
* Unsigned range property to give the size of the lookup table to be set
* on the GAMMA_LUT property (the size depends on the underlying hardware).
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 61e09f8a8d0f..28e04e750130 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -28,36 +28,9 @@
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
-/* drm_dp_aux_dev.c */
-#ifdef CONFIG_DRM_DP_AUX_CHARDEV
-int drm_dp_aux_dev_init(void);
-void drm_dp_aux_dev_exit(void);
-int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
-void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
-#else
-static inline int drm_dp_aux_dev_init(void)
-{
- return 0;
-}
-
-static inline void drm_dp_aux_dev_exit(void)
-{
-}
-
-static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
-{
- return 0;
-}
-
-static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
-{
-}
-#endif
-
/* drm_probe_helper.c */
enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 46a3c1b62463..fdd8d5f42622 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -12,7 +12,7 @@
#include <linux/errno.h>
#include <linux/byteorder/generic.h>
#include <drm/drm_print.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_dsc.h>
/**
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 12893e7be89b..a504542238ed 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -93,6 +93,8 @@ static int oui(u8 first, u8 second, u8 third)
/* Non desktop display (i.e. HMD) */
#define EDID_QUIRK_NON_DESKTOP (1 << 12)
+#define MICROSOFT_IEEE_OUI 0xca125c
+
struct detailed_mode_closure {
struct drm_connector *connector;
struct edid *edid;
@@ -212,9 +214,7 @@ static const struct edid_quirk {
/* Windows Mixed Reality Headsets */
EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('H', 'P', 'N', 0x3515, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
- EDID_QUIRK('L', 'E', 'N', 0xb800, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
@@ -3776,7 +3776,7 @@ static int do_y420vdb_modes(struct drm_connector *connector,
}
if (modes > 0)
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+ info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
return modes;
}
@@ -4222,6 +4222,17 @@ static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
return oui(db[3], db[2], db[1]) == HDMI_FORUM_IEEE_OUI;
}
+static bool cea_db_is_microsoft_vsdb(const u8 *db)
+{
+ if (cea_db_tag(db) != VENDOR_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) != 21)
+ return false;
+
+ return oui(db[3], db[2], db[1]) == MICROSOFT_IEEE_OUI;
+}
+
static bool cea_db_is_vcdb(const u8 *db)
{
if (cea_db_tag(db) != USE_EXTENDED_TAG)
@@ -4279,7 +4290,7 @@ static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector,
if (map_len == 0) {
/* All CEA modes support ycbcr420 sampling also.*/
hdmi->y420_cmdb_map = U64_MAX;
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+ info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
return;
}
@@ -4302,7 +4313,7 @@ static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector,
map |= (u64)db[2 + count] << (8 * count);
if (map)
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+ info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
hdmi->y420_cmdb_map = map;
}
@@ -5075,21 +5086,21 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
dc_bpc = 10;
- info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
+ info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_30;
DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
connector->name);
}
if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
dc_bpc = 12;
- info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
+ info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_36;
DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
connector->name);
}
if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
dc_bpc = 16;
- info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
+ info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_48;
DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
connector->name);
}
@@ -5104,16 +5115,9 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
connector->name, dc_bpc);
info->bpc = dc_bpc;
- /*
- * Deep color support mandates RGB444 support for all video
- * modes and forbids YCRCB422 support for all video modes per
- * HDMI 1.3 spec.
- */
- info->color_formats = DRM_COLOR_FORMAT_RGB444;
-
/* YCRCB444 is optional according to spec. */
if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ info->edid_hdmi_ycbcr444_dc_modes = info->edid_hdmi_rgb444_dc_modes;
DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
connector->name);
}
@@ -5149,6 +5153,25 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
drm_parse_hdmi_deep_color_info(connector, db);
}
+/*
+ * See EDID extension for head-mounted and specialized monitors, specified at:
+ * https://docs.microsoft.com/en-us/windows-hardware/drivers/display/specialized-monitors-edid-extension
+ */
+static void drm_parse_microsoft_vsdb(struct drm_connector *connector,
+ const u8 *db)
+{
+ struct drm_display_info *info = &connector->display_info;
+ u8 version = db[4];
+ bool desktop_usage = db[5] & BIT(6);
+
+ /* Version 1 and 2 for HMDs, version 3 flags desktop usage explicitly */
+ if (version == 1 || version == 2 || (version == 3 && !desktop_usage))
+ info->non_desktop = true;
+
+ drm_dbg_kms(connector->dev, "HMD or specialized display VSDB version %u: 0x%02x\n",
+ version, db[5]);
+}
+
static void drm_parse_cea_ext(struct drm_connector *connector,
const struct edid *edid)
{
@@ -5165,9 +5188,9 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
/* The existence of a CEA block should imply RGB support */
info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (edid_ext[3] & EDID_CEA_YCRCB444)
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ info->color_formats |= DRM_COLOR_FORMAT_YCBCR444;
if (edid_ext[3] & EDID_CEA_YCRCB422)
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ info->color_formats |= DRM_COLOR_FORMAT_YCBCR422;
if (cea_db_offsets(edid_ext, &start, &end))
return;
@@ -5179,6 +5202,8 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
drm_parse_hdmi_vsdb_video(connector, db);
if (cea_db_is_hdmi_forum_vsdb(db))
drm_parse_hdmi_forum_vsdb(connector, db);
+ if (cea_db_is_microsoft_vsdb(db))
+ drm_parse_microsoft_vsdb(connector, db);
if (cea_db_is_y420cmdb(db))
drm_parse_y420cmdb_bitmap(connector, db);
if (cea_db_is_vcdb(db))
@@ -5333,17 +5358,13 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
- info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
-
drm_get_monitor_range(connector, edid);
- DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
-
if (edid->revision < 3)
- return quirks;
+ goto out;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
- return quirks;
+ goto out;
drm_parse_cea_ext(connector, edid);
@@ -5363,7 +5384,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
- return quirks;
+ goto out;
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
@@ -5395,17 +5416,25 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ info->color_formats |= DRM_COLOR_FORMAT_YCBCR444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ info->color_formats |= DRM_COLOR_FORMAT_YCBCR422;
drm_update_mso(connector, edid);
+out:
+ if (quirks & EDID_QUIRK_NON_DESKTOP) {
+ drm_dbg_kms(connector->dev, "Non-desktop display%s\n",
+ info->non_desktop ? " (redundant quirk)" : "");
+ info->non_desktop = true;
+ }
+
return quirks;
}
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
- struct displayid_detailed_timings_1 *timings)
+ struct displayid_detailed_timings_1 *timings,
+ bool type_7)
{
struct drm_display_mode *mode;
unsigned pixel_clock = (timings->pixel_clock[0] |
@@ -5426,7 +5455,8 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
if (!mode)
return NULL;
- mode->clock = pixel_clock * 10;
+ /* resolution is kHz for type VII, and 10 kHz for type I */
+ mode->clock = type_7 ? pixel_clock : pixel_clock * 10;
mode->hdisplay = hactive;
mode->hsync_start = mode->hdisplay + hsync;
mode->hsync_end = mode->hsync_start + hsync_width;
@@ -5457,6 +5487,7 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector,
int num_timings;
struct drm_display_mode *newmode;
int num_modes = 0;
+ bool type_7 = block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING;
/* blocks must be multiple of 20 bytes length */
if (block->num_bytes % 20)
return 0;
@@ -5465,7 +5496,7 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector,
for (i = 0; i < num_timings; i++) {
struct displayid_detailed_timings_1 *timings = &det->timings[i];
- newmode = drm_mode_displayid_detailed(connector->dev, timings);
+ newmode = drm_mode_displayid_detailed(connector->dev, timings, type_7);
if (!newmode)
continue;
@@ -5484,7 +5515,8 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
displayid_iter_edid_begin(edid, &iter);
displayid_iter_for_each(block, &iter) {
- if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING)
+ if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING ||
+ block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING)
num_modes += add_displayid_detailed_1_modes(connector, block);
}
displayid_iter_end(&iter);
@@ -5652,7 +5684,7 @@ static bool is_hdmi2_sink(const struct drm_connector *connector)
return true;
return connector->display_info.hdmi.scdc.supported ||
- connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
+ connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR420;
}
static inline bool is_eotf_supported(u8 output_eotf, u8 sink_eotf)
@@ -5891,13 +5923,13 @@ static const u32 hdmi_colorimetry_val[] = {
#undef ACE
/**
- * drm_hdmi_avi_infoframe_colorspace() - fill the HDMI AVI infoframe
- * colorspace information
+ * drm_hdmi_avi_infoframe_colorimetry() - fill the HDMI AVI infoframe
+ * colorimetry information
* @frame: HDMI AVI infoframe
* @conn_state: connector state
*/
void
-drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
+drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state)
{
u32 colorimetry_val;
@@ -5916,7 +5948,7 @@ drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
frame->extended_colorimetry = (colorimetry_val >> 2) &
EXTENDED_COLORIMETRY_MASK;
}
-EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorspace);
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorimetry);
/**
* drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 47e92400548d..8be20080cd8d 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -61,17 +61,3 @@ MODULE_PARM_DESC(edid_firmware,
"DEPRECATED. Use drm.edid_firmware module parameter instead.");
#endif
-
-static int __init drm_kms_helper_init(void)
-{
- return drm_dp_aux_dev_init();
-}
-
-static void __exit drm_kms_helper_exit(void)
-{
- /* Call exit functions from specific kms helpers here */
- drm_dp_aux_dev_exit();
-}
-
-module_init(drm_kms_helper_init);
-module_exit(drm_kms_helper_exit);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 82afb854141b..deeec60a3315 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -202,17 +202,13 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
memcpy(formats_ptr(blob_data), plane->format_types, formats_size);
- /* If we can't determine support, just bail */
- if (!plane->funcs->format_mod_supported)
- goto done;
-
mod = modifiers_ptr(blob_data);
for (i = 0; i < plane->modifier_count; i++) {
for (j = 0; j < plane->format_count; j++) {
- if (plane->funcs->format_mod_supported(plane,
+ if (!plane->funcs->format_mod_supported ||
+ plane->funcs->format_mod_supported(plane,
plane->format_types[j],
plane->modifiers[i])) {
-
mod->formats |= 1ULL << j;
}
}
@@ -223,7 +219,6 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod++;
}
-done:
drm_object_attach_property(&plane->base, config->modifiers_property,
blob->base.id);
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
index beaf99e9120a..03b149cc455b 100644
--- a/drivers/gpu/drm/drm_privacy_screen.c
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -387,7 +387,8 @@ static void drm_privacy_screen_device_release(struct device *dev)
* * An ERR_PTR(errno) on failure.
*/
struct drm_privacy_screen *drm_privacy_screen_register(
- struct device *parent, const struct drm_privacy_screen_ops *ops)
+ struct device *parent, const struct drm_privacy_screen_ops *ops,
+ void *data)
{
struct drm_privacy_screen *priv;
int ret;
@@ -404,6 +405,7 @@ struct drm_privacy_screen *drm_privacy_screen_register(
priv->dev.parent = parent;
priv->dev.release = drm_privacy_screen_device_release;
dev_set_name(&priv->dev, "privacy_screen-%s", dev_name(parent));
+ priv->drvdata = data;
priv->ops = ops;
priv->ops->get_hw_state(priv);
@@ -439,6 +441,7 @@ void drm_privacy_screen_unregister(struct drm_privacy_screen *priv)
mutex_unlock(&drm_privacy_screen_devs_lock);
mutex_lock(&priv->lock);
+ priv->drvdata = NULL;
priv->ops = NULL;
mutex_unlock(&priv->lock);
diff --git a/drivers/gpu/drm/drm_privacy_screen_x86.c b/drivers/gpu/drm/drm_privacy_screen_x86.c
index e7aa74ad0b24..72ed40e4997e 100644
--- a/drivers/gpu/drm/drm_privacy_screen_x86.c
+++ b/drivers/gpu/drm/drm_privacy_screen_x86.c
@@ -50,6 +50,13 @@ static bool __init detect_thinkpad_privacy_screen(void)
}
#endif
+#if IS_ENABLED(CONFIG_CHROMEOS_PRIVACY_SCREEN)
+static bool __init detect_chromeos_privacy_screen(void)
+{
+ return acpi_dev_present("GOOG0010", NULL, -1);
+}
+#endif
+
static const struct arch_init_data arch_init_data[] __initconst = {
#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
{
@@ -61,6 +68,16 @@ static const struct arch_init_data arch_init_data[] __initconst = {
.detect = detect_thinkpad_privacy_screen,
},
#endif
+#if IS_ENABLED(CONFIG_CHROMEOS_PRIVACY_SCREEN)
+ {
+ .lookup = {
+ .dev_id = NULL,
+ .con_id = NULL,
+ .provider = "privacy_screen-GOOG0010:00",
+ },
+ .detect = detect_chromeos_privacy_screen,
+ },
+#endif
};
void __init drm_privacy_screen_lookup_init(void)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index a17313282e8b..4eb00a0cb650 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -189,8 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
- ret = dma_resv_get_fences(robj, NULL,
- &bo->nr_shared,
+ ret = dma_resv_get_fences(robj, true, &bo->nr_shared,
&bo->shared);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 6a251e3aa779..f27cfd2a9726 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -66,6 +66,7 @@ config DRM_EXYNOS_DP
bool "Exynos specific extensions for Analogix DP driver"
depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON
select DRM_ANALOGIX_DP
+ select DRM_DP_HELPER
default DRM_EXYNOS
select DRM_PANEL
help
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 32a36572b894..b7d0a4aead0a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -258,6 +258,7 @@ struct exynos_dsi {
struct list_head bridge_chain;
struct drm_bridge *out_bridge;
struct device *dev;
+ struct drm_display_mode mode;
void __iomem *reg_base;
struct phy *phy;
@@ -881,7 +882,7 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
{
- struct drm_display_mode *m = &dsi->encoder.crtc->state->adjusted_mode;
+ struct drm_display_mode *m = &dsi->mode;
unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
u32 reg;
@@ -1446,6 +1447,15 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
pm_runtime_put_sync(dsi->dev);
}
+static void exynos_dsi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+
+ drm_mode_copy(&dsi->mode, adjusted_mode);
+}
+
static enum drm_connector_status
exynos_dsi_detect(struct drm_connector *connector, bool force)
{
@@ -1513,6 +1523,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
.enable = exynos_dsi_enable,
.disable = exynos_dsi_disable,
+ .mode_set = exynos_dsi_mode_set,
};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index ba6ad1466374..f562e91337c7 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -31,7 +31,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "gma_display.h"
@@ -82,7 +82,6 @@ i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int mode = MODE_I2C_START;
- int ret;
if (reading)
mode |= MODE_I2C_READ;
@@ -90,8 +89,7 @@ i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
mode |= MODE_I2C_WRITE;
algo_data->address = address;
algo_data->running = true;
- ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
- return ret;
+ return i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
}
/*
@@ -122,13 +120,11 @@ static int
i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
if (!algo_data->running)
return -EIO;
- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
- return ret;
+ return i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
}
/*
@@ -139,13 +135,11 @@ static int
i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
if (!algo_data->running)
return -EIO;
- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
- return ret;
+ return i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
}
static int
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 99da3118131a..60ba7de59139 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -335,7 +335,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
struct psb_gem_object *pobj;
struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj;
struct drm_gem_object *obj;
- void *tmp_dst, *tmp_src;
+ void *tmp_dst;
int ret = 0, i, cursor_pages;
/* If we didn't get a handle then turn the cursor off */
@@ -400,9 +400,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
/* Copy the cursor to cursor mem */
tmp_dst = dev_priv->vram_addr + cursor_pobj->offset;
for (i = 0; i < cursor_pages; i++) {
- tmp_src = kmap(pobj->pages[i]);
- memcpy(tmp_dst, tmp_src, PAGE_SIZE);
- kunmap(pobj->pages[i]);
+ memcpy_from_page(tmp_dst, pobj->pages[i], 0, PAGE_SIZE);
tmp_dst += PAGE_SIZE;
}
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d5ca5f241974..ea7c16f33a0e 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -6,7 +6,7 @@
* Eric Anholt <eric@anholt.net>
*/
#include <drm/drm.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include "intel_bios.h"
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index fe9ace2a7967..a70b01ccdf70 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -184,17 +184,17 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
pd->invalid_pte = 0;
}
- v = kmap(pd->dummy_pt);
+ v = kmap_local_page(pd->dummy_pt);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pte;
- kunmap(pd->dummy_pt);
+ kunmap_local(v);
- v = kmap(pd->p);
+ v = kmap_local_page(pd->p);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pde;
- kunmap(pd->p);
+ kunmap_local(v);
clear_page(kmap(pd->dummy_page));
kunmap(pd->dummy_page);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 610fc8e135f9..fe4269c5aa0a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -20,6 +20,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_module.h>
#include <drm/drm_vblank.h>
#include "hibmc_drm_drv.h"
@@ -379,7 +380,7 @@ static struct pci_driver hibmc_pci_driver = {
.driver.pm = &hibmc_pm_ops,
};
-module_pci_driver(hibmc_pci_driver);
+drm_module_pci_driver(hibmc_pci_driver);
MODULE_DEVICE_TABLE(pci, hibmc_pci_table);
MODULE_AUTHOR("RongrongZou <zourongrong@huawei.com>");
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index a4c94dc2e216..2ac220bfd0ed 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -9,6 +9,7 @@ config DRM_I915
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
@@ -27,6 +28,7 @@ config DRM_I915
select CEC_CORE if CEC_NOTIFIER
select VMAP_PFN
select DRM_TTM
+ select DRM_BUDDY
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e9ce09620eb5..9d588d936e3d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -163,7 +163,6 @@ gem-y += \
i915-y += \
$(gem-y) \
i915_active.o \
- i915_buddy.o \
i915_cmd_parser.o \
i915_deps.o \
i915_gem_evict.o \
@@ -176,7 +175,7 @@ i915-y += \
i915_trace_points.o \
i915_ttm_buddy_manager.o \
i915_vma.o \
- i915_vma_snapshot.o \
+ i915_vma_resource.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index adf9fee51a46..40b5e7ed12c2 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,7 +25,7 @@
*
*/
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include "display/intel_display.h"
#include "display/intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8e4e2fae2fb3..80b19c304c43 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -38,7 +38,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index b98115418ec5..b50d0e6efe21 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -31,10 +31,10 @@
#include <linux/pwm.h>
#include <linux/sched/clock.h>
+#include <drm/dp/drm_dp_dual_mode_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_dp_dual_mode_helper.h>
-#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_dsc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 5381b993a346..1046e7fe310a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -36,7 +36,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 540a669e01dd..82d024dafe7b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -6,8 +6,8 @@
* Sean Paul <seanpaul@chromium.org>
*/
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index e50d550fdc72..dc1556b46b85 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_DP_LINK_TRAINING_H__
#define __INTEL_DP_LINK_TRAINING_H__
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
struct intel_crtc_state;
struct intel_dp;
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 2b8e89477f48..05dd7dba3a5c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -50,7 +50,7 @@ static void dpt_insert_page(struct i915_address_space *vm,
}
static void dpt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level level,
u32 flags)
{
@@ -66,8 +66,8 @@ static void dpt_insert_entries(struct i915_address_space *vm,
* not to allow the user to override access to a read only page.
*/
- i = vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+ i = vma_res->start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
gen8_set_pte(&base[i++], pte_encode | addr);
}
@@ -78,35 +78,38 @@ static void dpt_clear_range(struct i915_address_space *vm,
static void dpt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
- struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
+ if (vma_res->bound_flags)
+ return;
+
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
- if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
+ if (vm->has_read_only && vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
- if (i915_gem_object_is_lmem(obj))
+ if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
/*
* Without aliasing PPGTT there's no difference between
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
- atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+ vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
-static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+static void dpt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
{
- vm->clear_range(vm, vma->node.start, vma->size);
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
static void dpt_cleanup(struct i915_address_space *vm)
@@ -252,7 +255,11 @@ intel_dpt_create(struct intel_framebuffer *fb)
if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj);
- ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
+ ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
+ if (!ret) {
+ ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
+ i915_gem_object_unlock(dpt_obj);
+ }
if (ret) {
i915_gem_object_put(dpt_obj);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index e60046d90124..a307b4993bcf 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -37,7 +37,11 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
+ if (!ret) {
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ i915_gem_object_unlock(obj);
+ }
if (ret) {
vma = ERR_PTR(ret);
goto err;
@@ -48,7 +52,7 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
goto err;
if (i915_vma_misplaced(vma, 0, alignment, 0)) {
- ret = i915_vma_unbind(vma);
+ ret = i915_vma_unbind_unlocked(vma);
if (ret) {
vma = ERR_PTR(ret);
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 3ec34de35460..87f4af3fd523 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -605,7 +605,7 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
else if (DISPLAY_VER(i915) == 9)
skl_fbc_program_cfb_stride(fbc);
- if (i915->ggtt.num_fences)
+ if (to_gt(i915)->ggtt->num_fences)
snb_fbc_program_fence(fbc);
intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 837484cbcd33..fd5bc7acf08d 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -197,7 +197,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt;
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
};
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 347ff8a59ae3..1aa5bdc7b0dc 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -730,7 +730,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
else
frame->colorspace = HDMI_COLORSPACE_RGB;
- drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
+ drm_hdmi_avi_infoframe_colorimetry(frame, conn_state);
/* nonsense combination */
drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
@@ -1912,7 +1912,7 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
if (ycbcr420_output)
return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36;
else
- return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36;
+ return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36;
case 10:
if (!has_hdmi_sink)
return false;
@@ -1920,7 +1920,7 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
if (ycbcr420_output)
return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30;
else
- return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30;
+ return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30;
case 8:
return true;
default:
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 6cc91d731ab0..76357c9b76e4 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -24,7 +24,7 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_dual_mode_helper.h>
+#include <drm/dp/drm_dp_dual_mode_helper.h>
#include <drm/drm_edid.h>
#include "intel_de.h"
@@ -546,7 +546,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
/* Set the Colorspace as per the HDMI spec */
- drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state);
+ drm_hdmi_avi_infoframe_colorimetry(&frame.avi, conn_state);
/* nonsense combination */
drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index f2fbb196d62a..d7b1de4cc205 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -46,17 +46,18 @@ static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
+ struct intel_memory_region *mem = i915->mm.stolen_region;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 base, size;
- if (plane_config->size == 0)
+ if (!mem || plane_config->size == 0)
return NULL;
base = round_down(plane_config->base,
I915_GTT_MIN_ALIGNMENT);
size = round_up(plane_config->base + plane_config->size,
- I915_GTT_MIN_ALIGNMENT);
+ mem->min_page_size);
size -= base;
/*
@@ -94,7 +95,7 @@ initial_plane_vma(struct drm_i915_private *i915,
goto err_obj;
}
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err_obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 2958e2be4292..bc6d59df064d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -587,10 +587,6 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
struct intel_engine_cs **siblings = NULL;
intel_engine_mask_t prev_mask;
- /* FIXME: This is NIY for execlists */
- if (!(intel_uc_uses_guc_submission(&to_gt(i915)->uc)))
- return -ENODEV;
-
if (get_user(slot, &ext->engine_index))
return -EFAULT;
@@ -600,6 +596,13 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
if (get_user(num_siblings, &ext->num_siblings))
return -EFAULT;
+ if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
+ num_siblings != 1) {
+ drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
+ num_siblings);
+ return -EINVAL;
+ }
+
if (slot >= set->num_engines) {
drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
slot, set->num_engines);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index babfecb17ad1..e5b0f66ea1fe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -174,7 +174,7 @@ i915_gem_context_get_eb_vm(struct i915_gem_context *ctx)
vm = ctx->vm;
if (!vm)
- vm = &ctx->i915->ggtt.vm;
+ vm = &to_gt(ctx->i915)->ggtt->vm;
vm = i915_vm_get(vm);
return vm;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 8e19b3baa684..d42f437149c9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -32,7 +32,6 @@
#include "i915_gem_ioctls.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
-#include "i915_vma_snapshot.h"
struct eb_vma {
struct i915_vma *vma;
@@ -444,7 +443,7 @@ eb_pin_vma(struct i915_execbuffer *eb,
else
pin_flags = entry->offset & PIN_OFFSET_MASK;
- pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
+ pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED | PIN_VALIDATE;
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
@@ -462,17 +461,15 @@ eb_pin_vma(struct i915_execbuffer *eb,
entry->pad_to_size,
entry->alignment,
eb_pin_flags(entry, ev->flags) |
- PIN_USER | PIN_NOEVICT);
+ PIN_USER | PIN_NOEVICT | PIN_VALIDATE);
if (unlikely(err))
return err;
}
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_pin_fence(vma);
- if (unlikely(err)) {
- i915_vma_unpin(vma);
+ if (unlikely(err))
return err;
- }
if (vma->fence)
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -488,13 +485,9 @@ eb_pin_vma(struct i915_execbuffer *eb,
static inline void
eb_unreserve_vma(struct eb_vma *ev)
{
- if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
- return;
-
if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
__i915_vma_unpin_fence(ev->vma);
- __i915_vma_unpin(ev->vma);
ev->flags &= ~__EXEC_OBJECT_RESERVED;
}
@@ -676,10 +669,8 @@ static int eb_reserve_vma(struct i915_execbuffer *eb,
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_pin_fence(vma);
- if (unlikely(err)) {
- i915_vma_unpin(vma);
+ if (unlikely(err))
return err;
- }
if (vma->fence)
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -691,85 +682,95 @@ static int eb_reserve_vma(struct i915_execbuffer *eb,
return 0;
}
-static int eb_reserve(struct i915_execbuffer *eb)
+static bool eb_unbind(struct i915_execbuffer *eb, bool force)
{
const unsigned int count = eb->buffer_count;
- unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
+ unsigned int i;
struct list_head last;
+ bool unpinned = false;
+
+ /* Resort *all* the objects into priority order */
+ INIT_LIST_HEAD(&eb->unbound);
+ INIT_LIST_HEAD(&last);
+
+ for (i = 0; i < count; i++) {
+ struct eb_vma *ev = &eb->vma[i];
+ unsigned int flags = ev->flags;
+
+ if (!force && flags & EXEC_OBJECT_PINNED &&
+ flags & __EXEC_OBJECT_HAS_PIN)
+ continue;
+
+ unpinned = true;
+ eb_unreserve_vma(ev);
+
+ if (flags & EXEC_OBJECT_PINNED)
+ /* Pinned must have their slot */
+ list_add(&ev->bind_link, &eb->unbound);
+ else if (flags & __EXEC_OBJECT_NEEDS_MAP)
+ /* Map require the lowest 256MiB (aperture) */
+ list_add_tail(&ev->bind_link, &eb->unbound);
+ else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
+ /* Prioritise 4GiB region for restricted bo */
+ list_add(&ev->bind_link, &last);
+ else
+ list_add_tail(&ev->bind_link, &last);
+ }
+
+ list_splice_tail(&last, &eb->unbound);
+ return unpinned;
+}
+
+static int eb_reserve(struct i915_execbuffer *eb)
+{
struct eb_vma *ev;
- unsigned int i, pass;
+ unsigned int pass;
int err = 0;
+ bool unpinned;
/*
* Attempt to pin all of the buffers into the GTT.
- * This is done in 3 phases:
+ * This is done in 2 phases:
*
- * 1a. Unbind all objects that do not match the GTT constraints for
- * the execbuffer (fenceable, mappable, alignment etc).
- * 1b. Increment pin count for already bound objects.
- * 2. Bind new objects.
- * 3. Decrement pin count.
+ * 1. Unbind all objects that do not match the GTT constraints for
+ * the execbuffer (fenceable, mappable, alignment etc).
+ * 2. Bind new objects.
*
* This avoid unnecessary unbinding of later objects in order to make
* room for the earlier objects *unless* we need to defragment.
+ *
+ * Defragmenting is skipped if all objects are pinned at a fixed location.
*/
- pass = 0;
- do {
- list_for_each_entry(ev, &eb->unbound, bind_link) {
- err = eb_reserve_vma(eb, ev, pin_flags);
- if (err)
- break;
- }
- if (err != -ENOSPC)
- return err;
+ for (pass = 0; pass <= 2; pass++) {
+ int pin_flags = PIN_USER | PIN_VALIDATE;
- /* Resort *all* the objects into priority order */
- INIT_LIST_HEAD(&eb->unbound);
- INIT_LIST_HEAD(&last);
- for (i = 0; i < count; i++) {
- unsigned int flags;
+ if (pass == 0)
+ pin_flags |= PIN_NONBLOCK;
- ev = &eb->vma[i];
- flags = ev->flags;
- if (flags & EXEC_OBJECT_PINNED &&
- flags & __EXEC_OBJECT_HAS_PIN)
- continue;
-
- eb_unreserve_vma(ev);
+ if (pass >= 1)
+ unpinned = eb_unbind(eb, pass == 2);
- if (flags & EXEC_OBJECT_PINNED)
- /* Pinned must have their slot */
- list_add(&ev->bind_link, &eb->unbound);
- else if (flags & __EXEC_OBJECT_NEEDS_MAP)
- /* Map require the lowest 256MiB (aperture) */
- list_add_tail(&ev->bind_link, &eb->unbound);
- else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
- /* Prioritise 4GiB region for restricted bo */
- list_add(&ev->bind_link, &last);
- else
- list_add_tail(&ev->bind_link, &last);
- }
- list_splice_tail(&last, &eb->unbound);
-
- switch (pass++) {
- case 0:
- break;
-
- case 1:
- /* Too fragmented, unbind everything and retry */
- mutex_lock(&eb->context->vm->mutex);
- err = i915_gem_evict_vm(eb->context->vm);
- mutex_unlock(&eb->context->vm->mutex);
+ if (pass == 2) {
+ err = mutex_lock_interruptible(&eb->context->vm->mutex);
+ if (!err) {
+ err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
+ mutex_unlock(&eb->context->vm->mutex);
+ }
if (err)
return err;
- break;
+ }
- default:
- return -ENOSPC;
+ list_for_each_entry(ev, &eb->unbound, bind_link) {
+ err = eb_reserve_vma(eb, ev, pin_flags);
+ if (err)
+ break;
}
- pin_flags = PIN_USER;
- } while (1);
+ if (err != -ENOSPC)
+ break;
+ }
+
+ return err;
}
static int eb_select_context(struct i915_execbuffer *eb)
@@ -1098,7 +1099,7 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
struct drm_i915_private *i915 =
container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
- return &i915->ggtt;
+ return to_gt(i915)->ggtt;
}
static void reloc_cache_unmap(struct reloc_cache *cache)
@@ -1217,10 +1218,11 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
return vaddr;
}
-static void *reloc_iomap(struct drm_i915_gem_object *obj,
+static void *reloc_iomap(struct i915_vma *batch,
struct i915_execbuffer *eb,
unsigned long page)
{
+ struct drm_i915_gem_object *obj = batch->obj;
struct reloc_cache *cache = &eb->reloc_cache;
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
unsigned long offset;
@@ -1230,7 +1232,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
- struct i915_vma *vma;
+ struct i915_vma *vma = ERR_PTR(-ENODEV);
int err;
if (i915_gem_object_is_tiled(obj))
@@ -1243,10 +1245,23 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (err)
return ERR_PTR(err);
- vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
+ /*
+ * i915_gem_object_ggtt_pin_ww may attempt to remove the batch
+ * VMA from the object list because we no longer pin.
+ *
+ * Only attempt to pin the batch buffer to ggtt if the current batch
+ * is not inside ggtt, or the batch buffer is not misplaced.
+ */
+ if (!i915_is_ggtt(batch->vm)) {
+ vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
+ } else if (i915_vma_is_map_and_fenceable(batch)) {
+ __i915_vma_pin(batch);
+ vma = batch;
+ }
+
if (vma == ERR_PTR(-EDEADLK))
return vma;
@@ -1284,7 +1299,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
return vaddr;
}
-static void *reloc_vaddr(struct drm_i915_gem_object *obj,
+static void *reloc_vaddr(struct i915_vma *vma,
struct i915_execbuffer *eb,
unsigned long page)
{
@@ -1296,9 +1311,9 @@ static void *reloc_vaddr(struct drm_i915_gem_object *obj,
} else {
vaddr = NULL;
if ((cache->vaddr & KMAP) == 0)
- vaddr = reloc_iomap(obj, eb, page);
+ vaddr = reloc_iomap(vma, eb, page);
if (!vaddr)
- vaddr = reloc_kmap(obj, cache, page);
+ vaddr = reloc_kmap(vma->obj, cache, page);
}
return vaddr;
@@ -1339,7 +1354,7 @@ relocate_entry(struct i915_vma *vma,
void *vaddr;
repeat:
- vaddr = reloc_vaddr(vma->obj, eb,
+ vaddr = reloc_vaddr(vma, eb,
offset >> PAGE_SHIFT);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -1414,7 +1429,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
mutex_lock(&vma->vm->mutex);
err = i915_vma_bind(target->vma,
target->vma->obj->cache_level,
- PIN_GLOBAL, NULL);
+ PIN_GLOBAL, NULL, NULL);
mutex_unlock(&vma->vm->mutex);
reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
if (err)
@@ -1944,7 +1959,6 @@ static void eb_capture_stage(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
unsigned int i = count, j;
- struct i915_vma_snapshot *vsnap;
while (i--) {
struct eb_vma *ev = &eb->vma[i];
@@ -1954,11 +1968,6 @@ static void eb_capture_stage(struct i915_execbuffer *eb)
if (!(flags & EXEC_OBJECT_CAPTURE))
continue;
- vsnap = i915_vma_snapshot_alloc(GFP_KERNEL);
- if (!vsnap)
- continue;
-
- i915_vma_snapshot_init(vsnap, vma, "user");
for_each_batch_create_order(eb, j) {
struct i915_capture_list *capture;
@@ -1967,10 +1976,9 @@ static void eb_capture_stage(struct i915_execbuffer *eb)
continue;
capture->next = eb->capture_lists[j];
- capture->vma_snapshot = i915_vma_snapshot_get(vsnap);
+ capture->vma_res = i915_vma_resource_get(vma->resource);
eb->capture_lists[j] = capture;
}
- i915_vma_snapshot_put(vsnap);
}
}
@@ -2201,7 +2209,7 @@ shadow_batch_pin(struct i915_execbuffer *eb,
if (IS_ERR(vma))
return vma;
- err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags);
+ err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags | PIN_VALIDATE);
if (err)
return ERR_PTR(err);
@@ -2215,7 +2223,7 @@ static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i9
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
if (eb->batch_flags & I915_DISPATCH_SECURE)
- return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, 0);
+ return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, PIN_VALIDATE);
return NULL;
}
@@ -2266,13 +2274,12 @@ static int eb_parse(struct i915_execbuffer *eb)
err = i915_gem_object_lock(pool->obj, &eb->ww);
if (err)
- goto err;
+ return err;
shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
- if (IS_ERR(shadow)) {
- err = PTR_ERR(shadow);
- goto err;
- }
+ if (IS_ERR(shadow))
+ return PTR_ERR(shadow);
+
intel_gt_buffer_pool_mark_used(pool);
i915_gem_object_set_readonly(shadow->obj);
shadow->private = pool;
@@ -2284,25 +2291,21 @@ static int eb_parse(struct i915_execbuffer *eb)
shadow = shadow_batch_pin(eb, pool->obj,
&eb->gt->ggtt->vm,
PIN_GLOBAL);
- if (IS_ERR(shadow)) {
- err = PTR_ERR(shadow);
- shadow = trampoline;
- goto err_shadow;
- }
+ if (IS_ERR(shadow))
+ return PTR_ERR(shadow);
+
shadow->private = pool;
eb->batch_flags |= I915_DISPATCH_SECURE;
}
batch = eb_dispatch_secure(eb, shadow);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err_trampoline;
- }
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
err = dma_resv_reserve_shared(shadow->obj->base.resv, 1);
if (err)
- goto err_trampoline;
+ return err;
err = intel_engine_cmd_parser(eb->context->engine,
eb->batches[0]->vma,
@@ -2310,7 +2313,7 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->batch_len[0],
shadow, trampoline);
if (err)
- goto err_unpin_batch;
+ return err;
eb->batches[0] = &eb->vma[eb->buffer_count++];
eb->batches[0]->vma = i915_vma_get(shadow);
@@ -2329,17 +2332,6 @@ secure_batch:
eb->batches[0]->vma = i915_vma_get(batch);
}
return 0;
-
-err_unpin_batch:
- if (batch)
- i915_vma_unpin(batch);
-err_trampoline:
- if (trampoline)
- i915_vma_unpin(trampoline);
-err_shadow:
- i915_vma_unpin(shadow);
-err:
- return err;
}
static int eb_request_submit(struct i915_execbuffer *eb,
@@ -2508,9 +2500,14 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
timeout) < 0) {
i915_request_put(rq);
- tl = intel_context_timeline_lock(ce);
+ /*
+ * Error path, cannot use intel_context_timeline_lock as
+ * that is user interruptable and this clean up step
+ * must be done.
+ */
+ mutex_lock(&ce->timeline->mutex);
intel_context_exit(ce);
- intel_context_timeline_unlock(tl);
+ mutex_unlock(&ce->timeline->mutex);
if (nonblock)
return -EWOULDBLOCK;
@@ -3273,9 +3270,8 @@ eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
* _onstack interface.
*/
if (eb->batches[i]->vma)
- i915_vma_snapshot_init_onstack(&eb->requests[i]->batch_snapshot,
- eb->batches[i]->vma,
- "batch");
+ eb->requests[i]->batch_res =
+ i915_vma_resource_get(eb->batches[i]->vma->resource);
if (eb->batch_pool) {
GEM_BUG_ON(intel_context_is_parallel(eb->context));
intel_gt_buffer_pool_mark_active(eb->batch_pool,
@@ -3460,8 +3456,6 @@ err_request:
err_vma:
eb_release_vmas(&eb, true);
- if (eb.trampoline)
- i915_vma_unpin(eb.trampoline);
WARN_ON(err == -EDEADLK);
i915_gem_ww_ctx_fini(&eb.ww);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index a3b60e1ede90..efe69d6b86f4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -15,6 +15,7 @@
#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
+#include "i915_gem_evict.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -297,7 +298,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
struct intel_runtime_pm *rpm = &i915->runtime_pm;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww;
intel_wakeref_t wakeref;
@@ -360,8 +361,21 @@ retry:
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
}
- /* The entire mappable GGTT is pinned? Unexpected! */
- GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
+ /*
+ * The entire mappable GGTT is pinned? Unexpected!
+ * Try to evict the object we locked too, as normally we skip it
+ * due to lack of short term pinning inside execbuf.
+ */
+ if (vma == ERR_PTR(-ENOSPC)) {
+ ret = mutex_lock_interruptible(&ggtt->vm.mutex);
+ if (!ret) {
+ ret = i915_gem_evict_vm(&ggtt->vm, &ww);
+ mutex_unlock(&ggtt->vm.mutex);
+ }
+ if (ret)
+ goto err_reset;
+ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
+ }
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
@@ -390,16 +404,16 @@ retry:
assert_rpm_wakelock_held(rpm);
/* Mark as being mmapped into userspace for later revocation */
- mutex_lock(&i915->ggtt.vm.mutex);
+ mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
- list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
- mutex_unlock(&i915->ggtt.vm.mutex);
+ list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
+ mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
/* Track the mmo associated with the fenced vma */
vma->mmo = mmo;
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
- intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
+ intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
if (write) {
@@ -514,7 +528,7 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
* wakeref.
*/
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- mutex_lock(&i915->ggtt.vm.mutex);
+ mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
if (!obj->userfault_count)
goto out;
@@ -532,7 +546,7 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
wmb();
out:
- mutex_unlock(&i915->ggtt.vm.mutex);
+ mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
@@ -738,13 +752,14 @@ i915_gem_dumb_mmap_offset(struct drm_file *file,
u32 handle,
u64 *offset)
{
+ struct drm_i915_private *i915 = to_i915(dev);
enum i915_mmap_type mmap_type;
if (HAS_LMEM(to_i915(dev)))
mmap_type = I915_MMAP_TYPE_FIXED;
else if (pat_enabled())
mmap_type = I915_MMAP_TYPE_WC;
- else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
+ else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return -ENODEV;
else
mmap_type = I915_MMAP_TYPE_GTT;
@@ -792,7 +807,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
switch (args->flags) {
case I915_MMAP_OFFSET_GTT:
- if (!i915_ggtt_has_aperture(&i915->ggtt))
+ if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return -ENODEV;
type = I915_MMAP_TYPE_GTT;
break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 9a478c19c477..2d593d573ef1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -285,6 +285,12 @@ void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock);
+ /* Verify that the vma is unbound under the vm mutex. */
+ mutex_lock(&vma->vm->mutex);
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ __i915_vma_unbind(vma);
+ mutex_unlock(&vma->vm->mutex);
+
__i915_vma_put(vma);
spin_lock(&obj->vma.lock);
@@ -761,6 +767,18 @@ i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
return dma_fence_get(i915_gem_to_ttm(obj)->moving);
}
+void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence *fence)
+{
+ struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
+
+ if (*moving == fence)
+ return;
+
+ dma_fence_put(*moving);
+ *moving = dma_fence_get(fence);
+}
+
/**
* i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
* @obj: The object whose moving fence to wait for.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index f66d46882ea7..02c37fe4a535 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -459,7 +459,6 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
@@ -524,6 +523,9 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
struct dma_fence *
i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence *fence);
+
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 0dd107dcecc2..0098a32490f0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -15,6 +15,7 @@
#include "i915_active.h"
#include "i915_selftest.h"
+#include "i915_vma_resource.h"
struct drm_i915_gem_object;
struct intel_fronbuffer;
@@ -57,10 +58,26 @@ struct drm_i915_gem_object_ops {
void (*put_pages)(struct drm_i915_gem_object *obj,
struct sg_table *pages);
int (*truncate)(struct drm_i915_gem_object *obj);
- void (*writeback)(struct drm_i915_gem_object *obj);
- int (*shrinker_release_pages)(struct drm_i915_gem_object *obj,
- bool no_gpu_wait,
- bool should_writeback);
+ /**
+ * shrink - Perform further backend specific actions to facilate
+ * shrinking.
+ * @obj: The gem object
+ * @flags: Extra flags to control shrinking behaviour in the backend
+ *
+ * Possible values for @flags:
+ *
+ * I915_GEM_OBJECT_SHRINK_WRITEBACK - Try to perform writeback of the
+ * backing pages, if supported.
+ *
+ * I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT - Don't wait for the object to
+ * idle. Active objects can be considered later. The TTM backend for
+ * example might have aync migrations going on, which don't use any
+ * i915_vma to track the active GTT binding, and hence having an unbound
+ * object might not be enough.
+ */
+#define I915_GEM_OBJECT_SHRINK_WRITEBACK BIT(0)
+#define I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT BIT(1)
+ int (*shrink)(struct drm_i915_gem_object *obj, unsigned int flags);
int (*pread)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *arg);
@@ -551,31 +568,7 @@ struct drm_i915_gem_object {
struct sg_table *pages;
void *mapping;
- struct i915_page_sizes {
- /**
- * The sg mask of the pages sg_table. i.e the mask of
- * of the lengths for each sg entry.
- */
- unsigned int phys;
-
- /**
- * The gtt page sizes we are allowed to use given the
- * sg mask and the supported page sizes. This will
- * express the smallest unit we can use for the whole
- * object, as well as the larger sizes we may be able
- * to use opportunistically.
- */
- unsigned int sg;
-
- /**
- * The actual gtt page size usage. Since we can have
- * multiple vma associated with this object we need to
- * prevent any trampling of state, hence a copy of this
- * struct also lives in each vma, therefore the gtt
- * value here should only be read/write through the vma.
- */
- unsigned int gtt;
- } page_sizes;
+ struct i915_page_sizes page_sizes;
I915_SELFTEST_DECLARE(unsigned int page_mask);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 022582a1ca17..183b861620b8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -171,16 +171,6 @@ int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
return 0;
}
-/* Try to discard unwanted pages */
-void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
-{
- assert_object_held_shared(obj);
- GEM_BUG_ON(i915_gem_object_has_pages(obj));
-
- if (obj->ops->writeback)
- obj->ops->writeback(obj);
-}
-
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
struct radix_tree_iter iter;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index e81e5ac8d376..00359ec9d58b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
- intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
+ intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
flush_workqueue(i915->wq);
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 60f962f0c041..4efa821f3cb1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -334,6 +334,21 @@ shmem_writeback(struct drm_i915_gem_object *obj)
__shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
}
+static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
+{
+ switch (obj->mm.madv) {
+ case I915_MADV_DONTNEED:
+ return i915_gem_object_truncate(obj);
+ case __I915_MADV_PURGED:
+ return 0;
+ }
+
+ if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
+ shmem_writeback(obj);
+
+ return 0;
+}
+
void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -506,7 +521,7 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.get_pages = shmem_get_pages,
.put_pages = shmem_put_pages,
.truncate = shmem_truncate,
- .writeback = shmem_writeback,
+ .shrink = shmem_shrink,
.pwrite = shmem_pwrite,
.pread = shmem_pread,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index cc927e49d21f..6a6ff98a8746 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -57,21 +57,17 @@ static int drop_pages(struct drm_i915_gem_object *obj,
static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
{
- if (obj->ops->shrinker_release_pages)
- return obj->ops->shrinker_release_pages(obj,
- !(flags & I915_SHRINK_ACTIVE),
- flags & I915_SHRINK_WRITEBACK);
-
- switch (obj->mm.madv) {
- case I915_MADV_DONTNEED:
- i915_gem_object_truncate(obj);
- return 0;
- case __I915_MADV_PURGED:
- return 0;
- }
+ if (obj->ops->shrink) {
+ unsigned int shrink_flags = 0;
+
+ if (!(flags & I915_SHRINK_ACTIVE))
+ shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT;
- if (flags & I915_SHRINK_WRITEBACK)
- i915_gem_object_writeback(obj);
+ if (flags & I915_SHRINK_WRITEBACK)
+ shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK;
+
+ return obj->ops->shrink(obj, shrink_flags);
+ }
return 0;
}
@@ -401,9 +397,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
I915_SHRINK_VMAPS);
/* We also want to clear any cached iomaps as they wrap vmap */
- mutex_lock(&i915->ggtt.vm.mutex);
+ mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
list_for_each_entry_safe(vma, next,
- &i915->ggtt.vm.bound_list, vm_link) {
+ &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
struct drm_i915_gem_object *obj = vma->obj;
@@ -418,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
i915_gem_object_unlock(obj);
}
- mutex_unlock(&i915->ggtt.vm.mutex);
+ mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 0272b24c5c57..b9c3196b91ca 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -73,7 +73,7 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
static int i915_adjust_stolen(struct drm_i915_private *i915,
struct resource *dsm)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct resource *r;
@@ -584,6 +584,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages =
i915_pages_create_for_stolen(obj->base.dev,
obj->stolen->start,
@@ -591,7 +592,7 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
if (IS_ERR(pages))
return PTR_ERR(pages);
- dbg_poison(&to_i915(obj->base.dev)->ggtt,
+ dbg_poison(to_gt(i915)->ggtt,
sg_dma_address(pages->sgl),
sg_dma_len(pages->sgl),
POISON_INUSE);
@@ -604,9 +605,10 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
/* Should only be called from i915_gem_object_release_stolen() */
- dbg_poison(&to_i915(obj->base.dev)->ggtt,
+ dbg_poison(to_gt(i915)->ggtt,
sg_dma_address(pages->sgl),
sg_dma_len(pages->sgl),
POISON_FREE);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 0e0e4805161a..d6adda5bf96b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -183,7 +183,8 @@ static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
- struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma, *vn;
LIST_HEAD(unbind);
int ret = 0;
@@ -338,7 +339,7 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err;
- if (!dev_priv->ggtt.num_fences)
+ if (!to_gt(dev_priv)->ggtt->num_fences)
return -EOPNOTSUPP;
obj = i915_gem_object_lookup(file, args->handle);
@@ -364,9 +365,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
else
- args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -421,7 +422,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err = -ENOENT;
- if (!dev_priv->ggtt.num_fences)
+ if (!to_gt(dev_priv)->ggtt->num_fences)
return -EOPNOTSUPP;
rcu_read_lock();
@@ -437,10 +438,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
- args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
- args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index a5852e818a0e..8419096d4056 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -426,16 +426,14 @@ int i915_ttm_purge(struct drm_i915_gem_object *obj)
return 0;
}
-static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
- bool no_wait_gpu,
- bool should_writeback)
+static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
struct i915_ttm_tt *i915_tt =
container_of(bo->ttm, typeof(*i915_tt), ttm);
struct ttm_operation_ctx ctx = {
.interruptible = true,
- .no_wait_gpu = no_wait_gpu,
+ .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT,
};
struct ttm_placement place = {};
int ret;
@@ -469,7 +467,7 @@ static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
return ret;
}
- if (should_writeback)
+ if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
__shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
return 0;
@@ -844,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
} else if (obj->mm.madv != I915_MADV_WILLNEED) {
bo->priority = I915_TTM_PRIO_PURGE;
} else if (!i915_gem_object_has_pages(obj)) {
- if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
- bo->priority = I915_TTM_PRIO_HAS_PAGES;
+ bo->priority = I915_TTM_PRIO_NO_PAGES;
} else {
- if (bo->priority > I915_TTM_PRIO_NO_PAGES)
- bo->priority = I915_TTM_PRIO_NO_PAGES;
+ bo->priority = I915_TTM_PRIO_HAS_PAGES;
}
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
@@ -979,7 +975,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.get_pages = i915_ttm_get_pages,
.put_pages = i915_ttm_put_pages,
.truncate = i915_ttm_truncate,
- .shrinker_release_pages = i915_ttm_shrinker_release_pages,
+ .shrink = i915_ttm_shrink,
.adjust_lru = i915_ttm_adjust_lru,
.delayed_free = i915_ttm_delayed_free,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index ee9612a3ee5e..1ebe6e4086a1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -142,7 +142,16 @@ int i915_ttm_move_notify(struct ttm_buffer_object *bo)
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
int ret;
- ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ /*
+ * Note: The async unbinding here will actually transform the
+ * blocking wait for unbind into a wait before finally submitting
+ * evict / migration blit and thus stall the migration timeline
+ * which may not be good for overall throughput. We should make
+ * sure we await the unbind fences *after* the migration blit
+ * instead of *before* as we currently do.
+ */
+ ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE |
+ I915_GEM_OBJECT_UNBIND_ASYNC);
if (ret)
return ret;
@@ -427,11 +436,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
if (!IS_ERR(fence))
goto out;
- } else if (move_deps) {
- int err = i915_deps_sync(move_deps, ctx);
+ } else {
+ int err = PTR_ERR(fence);
- if (err)
- return ERR_PTR(err);
+ if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
+ return fence;
+
+ if (move_deps) {
+ err = i915_deps_sync(move_deps, ctx);
+ if (err)
+ return ERR_PTR(err);
+ }
}
/* Error intercept failed or no accelerated migration to start with */
@@ -525,7 +540,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
return ret;
}
- migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, bo->ttm,
+ migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, ttm,
dst_rsgt, true, &deps);
i915_deps_fini(&deps);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 6af237aa1854..8424ee8c5eb8 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -371,9 +371,9 @@ static int igt_check_page_sizes(struct i915_vma *vma)
err = -EINVAL;
}
- if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
+ if (!HAS_PAGE_SIZES(i915, vma->resource->page_sizes_gtt)) {
pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
- vma->page_sizes.gtt & ~supported, supported);
+ vma->resource->page_sizes_gtt & ~supported, supported);
err = -EINVAL;
}
@@ -404,15 +404,9 @@ static int igt_check_page_sizes(struct i915_vma *vma)
if (i915_gem_object_is_lmem(obj) &&
IS_ALIGNED(vma->node.start, SZ_2M) &&
vma->page_sizes.sg & SZ_2M &&
- vma->page_sizes.gtt < SZ_2M) {
+ vma->resource->page_sizes_gtt < SZ_2M) {
pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
- vma->page_sizes.sg, vma->page_sizes.gtt);
- err = -EINVAL;
- }
-
- if (obj->mm.page_sizes.gtt) {
- pr_err("obj->page_sizes.gtt(%u) should never be set\n",
- obj->mm.page_sizes.gtt);
+ vma->page_sizes.sg, vma->resource->page_sizes_gtt);
err = -EINVAL;
}
@@ -548,9 +542,9 @@ static int igt_mock_memory_region_huge_pages(void *arg)
goto out_unpin;
}
- if (vma->page_sizes.gtt != page_size) {
+ if (vma->resource->page_sizes_gtt != page_size) {
pr_err("%s page_sizes.gtt=%u, expected=%u\n",
- __func__, vma->page_sizes.gtt,
+ __func__, vma->resource->page_sizes_gtt,
page_size);
err = -EINVAL;
goto out_unpin;
@@ -631,9 +625,9 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
err = igt_check_page_sizes(vma);
- if (vma->page_sizes.gtt != page_size) {
+ if (vma->resource->page_sizes_gtt != page_size) {
pr_err("page_sizes.gtt=%u, expected %u\n",
- vma->page_sizes.gtt, page_size);
+ vma->resource->page_sizes_gtt, page_size);
err = -EINVAL;
}
@@ -648,7 +642,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
* pages.
*/
for (offset = 4096; offset < page_size; offset += 4096) {
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err)
goto out_unpin;
@@ -658,9 +652,10 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
err = igt_check_page_sizes(vma);
- if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
+ if (vma->resource->page_sizes_gtt != I915_GTT_PAGE_SIZE_4K) {
pr_err("page_sizes.gtt=%u, expected %llu\n",
- vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
+ vma->resource->page_sizes_gtt,
+ I915_GTT_PAGE_SIZE_4K);
err = -EINVAL;
}
@@ -806,9 +801,9 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
}
}
- if (vma->page_sizes.gtt != expected_gtt) {
+ if (vma->resource->page_sizes_gtt != expected_gtt) {
pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
- vma->page_sizes.gtt, expected_gtt,
+ vma->resource->page_sizes_gtt, expected_gtt,
obj->base.size, yesno(!!single));
err = -EINVAL;
break;
@@ -962,10 +957,10 @@ static int igt_mock_ppgtt_64K(void *arg)
}
}
- if (vma->page_sizes.gtt != expected_gtt) {
+ if (vma->resource->page_sizes_gtt != expected_gtt) {
pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
- vma->page_sizes.gtt, expected_gtt, i,
- yesno(!!single));
+ vma->resource->page_sizes_gtt,
+ expected_gtt, i, yesno(!!single));
err = -EINVAL;
goto out_vma_unpin;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 0be86ffb7c19..8f28e46e8ee5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -319,7 +319,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
int err;
if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err)
return err;
}
@@ -544,7 +544,7 @@ static bool has_bit17_swizzle(int sw)
static bool bad_swizzling(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
return true;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 85bf9ba727d7..bd60d42238fb 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1376,7 +1376,7 @@ static int igt_ctx_readonly(void *arg)
goto out_file;
}
- vm = ctx->vm ?: &i915->ggtt.alias->vm;
+ vm = ctx->vm ?: &to_gt(i915)->ggtt->alias->vm;
if (!vm || !vm->has_read_only) {
err = 0;
goto out_file;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index ecb691c81d1e..d534141b2cf7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -4,8 +4,13 @@
*/
#include "gt/intel_migrate.h"
+#include "gt/intel_gpu_commands.h"
#include "gem/i915_gem_ttm_move.h"
+#include "i915_deps.h"
+
+#include "selftests/igt_spinner.h"
+
static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
bool fill)
{
@@ -101,7 +106,8 @@ static int igt_same_create_migrate(void *arg)
}
static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ struct i915_vma *vma)
{
int err;
@@ -109,6 +115,24 @@ static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
if (err)
return err;
+ if (vma) {
+ err = i915_vma_pin_ww(vma, ww, obj->base.size, 0,
+ 0UL | PIN_OFFSET_FIXED |
+ PIN_USER);
+ if (err) {
+ if (err != -EINTR && err != ERESTARTSYS &&
+ err != -EDEADLK)
+ pr_err("Failed to pin vma.\n");
+ return err;
+ }
+
+ i915_vma_unpin(vma);
+ }
+
+ /*
+ * Migration will implicitly unbind (asynchronously) any bound
+ * vmas.
+ */
if (i915_gem_object_is_lmem(obj)) {
err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
if (err) {
@@ -149,11 +173,15 @@ static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
return err;
}
-static int igt_lmem_pages_migrate(void *arg)
+static int __igt_lmem_pages_migrate(struct intel_gt *gt,
+ struct i915_address_space *vm,
+ struct i915_deps *deps,
+ struct igt_spinner *spin,
+ struct dma_fence *spin_fence)
{
- struct intel_gt *gt = arg;
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
+ struct i915_vma *vma = NULL;
struct i915_gem_ww_ctx ww;
struct i915_request *rq;
int err;
@@ -165,6 +193,14 @@ static int igt_lmem_pages_migrate(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ if (vm) {
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+ }
+
/* Initial GPU fill, sync, CPU initialization. */
for_i915_gem_ww(&ww, err, true) {
err = i915_gem_object_lock(obj, &ww);
@@ -175,25 +211,23 @@ static int igt_lmem_pages_migrate(void *arg)
if (err)
continue;
- err = intel_migrate_clear(&gt->migrate, &ww, NULL,
+ err = intel_migrate_clear(&gt->migrate, &ww, deps,
obj->mm.pages->sgl, obj->cache_level,
i915_gem_object_is_lmem(obj),
0xdeadbeaf, &rq);
if (rq) {
dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+ i915_gem_object_set_moving_fence(obj, &rq->fence);
i915_request_put(rq);
}
if (err)
continue;
- err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE,
- 5 * HZ);
- if (err)
- continue;
-
- err = igt_fill_check_buffer(obj, true);
- if (err)
- continue;
+ if (!vma) {
+ err = igt_fill_check_buffer(obj, true);
+ if (err)
+ continue;
+ }
}
if (err)
goto out_put;
@@ -204,7 +238,7 @@ static int igt_lmem_pages_migrate(void *arg)
*/
for (i = 1; i <= 5; ++i) {
for_i915_gem_ww(&ww, err, true)
- err = lmem_pages_migrate_one(&ww, obj);
+ err = lmem_pages_migrate_one(&ww, obj, vma);
if (err)
goto out_put;
}
@@ -213,12 +247,27 @@ static int igt_lmem_pages_migrate(void *arg)
if (err)
goto out_put;
+ if (spin) {
+ if (dma_fence_is_signaled(spin_fence)) {
+ pr_err("Spinner was terminated by hangcheck.\n");
+ err = -EBUSY;
+ goto out_unlock;
+ }
+ igt_spinner_end(spin);
+ }
+
/* Finally sync migration and check content. */
err = i915_gem_object_wait_migration(obj, true);
if (err)
goto out_unlock;
- err = igt_fill_check_buffer(obj, false);
+ if (vma) {
+ err = i915_vma_wait_for_bind(vma);
+ if (err)
+ goto out_unlock;
+ } else {
+ err = igt_fill_check_buffer(obj, false);
+ }
out_unlock:
i915_gem_object_unlock(obj);
@@ -231,6 +280,7 @@ out_put:
static int igt_lmem_pages_failsafe_migrate(void *arg)
{
int fail_gpu, fail_alloc, ret;
+ struct intel_gt *gt = arg;
for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) {
@@ -238,7 +288,118 @@ static int igt_lmem_pages_failsafe_migrate(void *arg)
fail_gpu, fail_alloc);
i915_ttm_migrate_set_failure_modes(fail_gpu,
fail_alloc);
- ret = igt_lmem_pages_migrate(arg);
+ ret = __igt_lmem_pages_migrate(gt, NULL, NULL, NULL, NULL);
+ if (ret)
+ goto out_err;
+ }
+ }
+
+out_err:
+ i915_ttm_migrate_set_failure_modes(false, false);
+ return ret;
+}
+
+/*
+ * This subtest tests that unbinding at migration is indeed performed
+ * async. We launch a spinner and a number of migrations depending on
+ * that spinner to have terminated. Before each migration we bind a
+ * vma, which should then be async unbound by the migration operation.
+ * If we are able to schedule migrations without blocking while the
+ * spinner is still running, those unbinds are indeed async and non-
+ * blocking.
+ *
+ * Note that each async bind operation is awaiting the previous migration
+ * due to the moving fence resulting from the migration.
+ */
+static int igt_async_migrate(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_ppgtt *ppgtt;
+ struct igt_spinner spin;
+ int err;
+
+ ppgtt = i915_ppgtt_create(gt, 0);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ if (igt_spinner_init(&spin, gt)) {
+ err = -ENOMEM;
+ goto out_spin;
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true
+ };
+ struct dma_fence *spin_fence;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_deps deps;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_ce;
+ }
+
+ /*
+ * Use MI_NOOP, making the spinner non-preemptible. If there
+ * is a code path where we fail async operation due to the
+ * running spinner, we will block and fail to end the
+ * spinner resulting in a deadlock. But with a non-
+ * preemptible spinner, hangcheck will terminate the spinner
+ * for us, and we will later detect that and fail the test.
+ */
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ i915_deps_init(&deps, GFP_KERNEL);
+ err = i915_deps_add_dependency(&deps, &rq->fence, &ctx);
+ spin_fence = dma_fence_get(&rq->fence);
+ i915_request_add(rq);
+ if (err)
+ goto out_ce;
+
+ err = __igt_lmem_pages_migrate(gt, &ppgtt->vm, &deps, &spin,
+ spin_fence);
+ i915_deps_fini(&deps);
+ dma_fence_put(spin_fence);
+ if (err)
+ goto out_ce;
+ }
+
+out_ce:
+ igt_spinner_fini(&spin);
+out_spin:
+ i915_vm_put(&ppgtt->vm);
+
+ return err;
+}
+
+/*
+ * Setting ASYNC_FAIL_ALLOC to 2 will simulate memory allocation failure while
+ * arming the migration error check and block async migration. This
+ * will cause us to deadlock and hangcheck will terminate the spinner
+ * causing the test to fail.
+ */
+#define ASYNC_FAIL_ALLOC 1
+static int igt_lmem_async_migrate(void *arg)
+{
+ int fail_gpu, fail_alloc, ret;
+ struct intel_gt *gt = arg;
+
+ for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
+ for (fail_alloc = 0; fail_alloc < ASYNC_FAIL_ALLOC; ++fail_alloc) {
+ pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
+ fail_gpu, fail_alloc);
+ i915_ttm_migrate_set_failure_modes(fail_gpu,
+ fail_alloc);
+ ret = igt_async_migrate(gt);
if (ret)
goto out_err;
}
@@ -256,6 +417,7 @@ int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_lmem_create_migrate),
SUBTEST(igt_same_create_migrate),
SUBTEST(igt_lmem_pages_failsafe_migrate),
+ SUBTEST(igt_lmem_async_migrate),
};
if (!HAS_LMEM(i915))
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 65e37d3cbb97..8ae1a1530bd8 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -168,7 +168,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
kunmap(p);
out:
+ i915_gem_object_lock(obj, NULL);
__i915_vma_put(vma);
+ i915_gem_object_unlock(obj);
return err;
}
@@ -263,7 +265,9 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
if (err)
return err;
+ i915_gem_object_lock(obj, NULL);
__i915_vma_put(vma);
+ i915_gem_object_unlock(obj);
if (igt_timeout(end_time,
"%s: timed out after tiling=%d stride=%d\n",
@@ -309,7 +313,7 @@ static int igt_partial_tiling(void *arg)
int tiling;
int err;
- if (!i915_ggtt_has_aperture(&i915->ggtt))
+ if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return 0;
/* We want to check the page mapping and fencing of a large object
@@ -322,7 +326,7 @@ static int igt_partial_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
- (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -368,10 +372,10 @@ static int igt_partial_tiling(void *arg)
tile.tiling = tiling;
switch (tiling) {
case I915_TILING_X:
- tile.swizzle = i915->ggtt.bit_6_swizzle_x;
+ tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
- tile.swizzle = i915->ggtt.bit_6_swizzle_y;
+ tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
break;
}
@@ -442,7 +446,7 @@ static int igt_smoke_tiling(void *arg)
IGT_TIMEOUT(end);
int err;
- if (!i915_ggtt_has_aperture(&i915->ggtt))
+ if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return 0;
/*
@@ -459,7 +463,7 @@ static int igt_smoke_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
- (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -488,10 +492,10 @@ static int igt_smoke_tiling(void *arg)
break;
case I915_TILING_X:
- tile.swizzle = i915->ggtt.bit_6_swizzle_x;
+ tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
- tile.swizzle = i915->ggtt.bit_6_swizzle_y;
+ tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
break;
}
@@ -858,6 +862,7 @@ static int wc_check(struct drm_i915_gem_object *obj)
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
bool no_map;
if (obj->ops->mmap_offset)
@@ -866,7 +871,7 @@ static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
return false;
if (type == I915_MMAP_TYPE_GTT &&
- !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
+ !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return false;
i915_gem_object_lock(obj, NULL);
@@ -1353,7 +1358,9 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
* for other objects. Ergo we have to revoke the previous mmap PTE
* access as it no longer points to the same object.
*/
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ i915_gem_object_unlock(obj);
if (err) {
pr_err("Failed to unbind object!\n");
goto out_unmap;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index 740ee8086a27..fe0a890775e2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -43,7 +43,7 @@ static int igt_gem_huge(void *arg)
obj = huge_gem_object(i915,
nreal * PAGE_SIZE,
- i915->ggtt.vm.total + PAGE_SIZE);
+ to_gt(i915)->ggtt->vm.total + PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 0bd0d611e0c8..871fe7bda0e0 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -108,17 +108,17 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
}
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory * const pd = ppgtt->pd;
- unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
+ unsigned int first_entry = vma_res->start / I915_GTT_PAGE_SIZE;
unsigned int act_pt = first_entry / GEN6_PTES;
unsigned int act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
- struct sgt_dma iter = sgt_dma(vma);
+ struct sgt_dma iter = sgt_dma(vma_res);
gen6_pte_t *vaddr;
GEM_BUG_ON(!pd->entry[act_pt]);
@@ -144,7 +144,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
}
} while (1);
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
@@ -275,13 +275,13 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static void pd_vma_bind(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 unused)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- struct gen6_ppgtt *ppgtt = vma->private;
- u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
+ struct gen6_ppgtt *ppgtt = vma_res->private;
+ u32 ggtt_offset = vma_res->start / I915_GTT_PAGE_SIZE;
ppgtt->pp_dir = ggtt_offset * sizeof(gen6_pte_t) << 10;
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
@@ -289,9 +289,10 @@ static void pd_vma_bind(struct i915_address_space *vm,
gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
}
-static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
+static void pd_vma_unbind(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
{
- struct gen6_ppgtt *ppgtt = vma->private;
+ struct gen6_ppgtt *ppgtt = vma_res->private;
struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt;
unsigned int pde;
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index b012c50f7ce7..c43e724afa9f 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -453,20 +453,21 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
return idx;
}
-static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
+static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
struct sgt_dma *iter,
enum i915_cache_level cache_level,
u32 flags)
{
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
unsigned int rem = sg_dma_len(iter->sg);
- u64 start = vma->node.start;
+ u64 start = vma_res->start;
- GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
+ GEM_BUG_ON(!i915_vm_is_4lvl(vm));
do {
struct i915_page_directory * const pdp =
- gen8_pdp_for_page_address(vma->vm, start);
+ gen8_pdp_for_page_address(vm, start);
struct i915_page_directory * const pd =
i915_pd_entry(pdp, __gen8_pte_index(start, 2));
gen8_pte_t encode = pte_encode;
@@ -475,7 +476,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
gen8_pte_t *vaddr;
u16 index;
- if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+ if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
rem >= I915_GTT_PAGE_SIZE_2M &&
!__gen8_pte_index(start, 0)) {
@@ -492,7 +493,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
page_size = I915_GTT_PAGE_SIZE;
if (!index &&
- vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+ vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
@@ -541,9 +542,9 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
*/
if (maybe_64K != -1 &&
(index == I915_PDES ||
- (i915_vm_has_scratch_64K(vma->vm) &&
- !iter->sg && IS_ALIGNED(vma->node.start +
- vma->node.size,
+ (i915_vm_has_scratch_64K(vm) &&
+ !iter->sg && IS_ALIGNED(vma_res->start +
+ vma_res->node_size,
I915_GTT_PAGE_SIZE_2M)))) {
vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
@@ -559,10 +560,10 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
* instead - which we detect as missing results during
* selftests.
*/
- if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
+ if (I915_SELFTEST_ONLY(vm->scrub_64K)) {
u16 i;
- encode = vma->vm->scratch[0]->encode;
+ encode = vm->scratch[0]->encode;
vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
@@ -572,22 +573,22 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
}
}
- vma->page_sizes.gtt |= page_size;
+ vma_res->page_sizes_gtt |= page_size;
} while (iter->sg && sg_dma_len(iter->sg));
}
static void gen8_ppgtt_insert(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = sgt_dma(vma);
+ struct sgt_dma iter = sgt_dma(vma_res);
- if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
+ if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ gen8_ppgtt_insert_huge(vm, vma_res, &iter, cache_level, flags);
} else {
- u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
+ u64 idx = vma_res->start >> GEN8_PTE_SHIFT;
do {
struct i915_page_directory * const pdp =
@@ -597,7 +598,7 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
cache_level, flags);
} while (idx);
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index ba083d800a08..5d0ec7c49b6a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -79,7 +79,8 @@ static int intel_context_active_acquire(struct intel_context *ce)
__i915_active_acquire(&ce->active);
- if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine))
+ if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine) ||
+ intel_context_is_parallel(ce))
return 0;
/* Preallocate tracking nodes */
@@ -563,7 +564,6 @@ void intel_context_bind_parent_child(struct intel_context *parent,
* Callers responsibility to validate that this function is used
* correctly but we use GEM_BUG_ON here ensure that they do.
*/
- GEM_BUG_ON(!intel_engine_uses_guc(parent->engine));
GEM_BUG_ON(intel_context_is_pinned(parent));
GEM_BUG_ON(intel_context_is_child(parent));
GEM_BUG_ON(intel_context_is_pinned(child));
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 959e9300ac9e..e53008b4dd05 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1700,18 +1700,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
{
- struct i915_vma_snapshot *vsnap = &rq->batch_snapshot;
+ struct i915_vma_resource *vma_res = rq->batch_res;
void *ring;
int size;
- if (!i915_vma_snapshot_present(vsnap))
- vsnap = NULL;
-
drm_printf(m,
"[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
rq->head, rq->postfix, rq->tail,
- vsnap ? upper_32_bits(vsnap->gtt_offset) : ~0u,
- vsnap ? lower_32_bits(vsnap->gtt_offset) : ~0u);
+ vma_res ? upper_32_bits(vma_res->start) : ~0u,
+ vma_res ? lower_32_bits(vma_res->start) : ~0u);
size = rq->tail - rq->head;
if (rq->tail < rq->head)
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 4a9ef688fac2..961d795220a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2601,6 +2601,43 @@ static void execlists_context_cancel_request(struct intel_context *ce,
current->comm);
}
+static struct intel_context *
+execlists_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width)
+{
+ struct intel_context *parent = NULL, *ce, *err;
+ int i;
+
+ GEM_BUG_ON(num_siblings != 1);
+
+ for (i = 0; i < width; ++i) {
+ ce = intel_context_create(engines[i]);
+ if (IS_ERR(ce)) {
+ err = ce;
+ goto unwind;
+ }
+
+ if (i == 0)
+ parent = ce;
+ else
+ intel_context_bind_parent_child(parent, ce);
+ }
+
+ parent->parallel.fence_context = dma_fence_context_alloc(1);
+
+ intel_context_set_nopreempt(parent);
+ for_each_child(parent, ce)
+ intel_context_set_nopreempt(ce);
+
+ return parent;
+
+unwind:
+ if (parent)
+ intel_context_put(parent);
+ return err;
+}
+
static const struct intel_context_ops execlists_context_ops = {
.flags = COPS_HAS_INFLIGHT,
@@ -2619,6 +2656,7 @@ static const struct intel_context_ops execlists_context_ops = {
.reset = lrc_reset,
.destroy = lrc_destroy,
+ .create_parallel = execlists_create_parallel,
.create_virtual = execlists_create_virtual,
};
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index d2922f64d1c8..8850d4e0f9cc 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -87,7 +87,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
* beyond the end of the batch buffer, across the page boundary,
* and beyond the end of the GTT if we do not provide a guard.
*/
- ret = ggtt_init_hw(&i915->ggtt);
+ ret = ggtt_init_hw(to_gt(i915)->ggtt);
if (ret)
return ret;
@@ -130,22 +130,51 @@ void i915_ggtt_suspend_vm(struct i915_address_space *vm)
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+retry:
+ i915_gem_drain_freed_objects(vm->i915);
+
mutex_lock(&vm->mutex);
/* Skip rewriting PTE on VMA unbind. */
open = atomic_xchg(&vm->open, 0);
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- i915_vma_wait_for_bind(vma);
- if (i915_vma_is_pinned(vma))
+ if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue;
+ /* unlikely to race when GPU is idle, so no worry about slowpath.. */
+ if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
+ /*
+ * No dead objects should appear here, GPU should be
+ * completely idle, and userspace suspended
+ */
+ i915_gem_object_get(obj);
+
+ atomic_set(&vm->open, open);
+ mutex_unlock(&vm->mutex);
+
+ i915_gem_object_lock(obj, NULL);
+ open = i915_vma_unbind(vma);
+ i915_gem_object_unlock(obj);
+
+ GEM_WARN_ON(open);
+
+ i915_gem_object_put(obj);
+ goto retry;
+ }
+
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
- __i915_vma_evict(vma);
+ i915_vma_wait_for_bind(vma);
+
+ __i915_vma_evict(vma, false);
drm_mm_remove_node(&vma->node);
}
+
+ i915_gem_object_unlock(obj);
}
vm->clear_range(vm, 0, vm->total);
@@ -236,7 +265,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level level,
u32 flags)
{
@@ -253,10 +282,10 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
gte = (gen8_pte_t __iomem *)ggtt->gsm;
- gte += vma->node.start / I915_GTT_PAGE_SIZE;
- end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, iter, vma->pages)
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
gen8_set_pte(gte++, pte_encode | addr);
GEM_BUG_ON(gte > end);
@@ -293,7 +322,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
* through the GMADR mapped BAR (i915->mm.gtt->gtt).
*/
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level level,
u32 flags)
{
@@ -304,10 +333,10 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
dma_addr_t addr;
gte = (gen6_pte_t __iomem *)ggtt->gsm;
- gte += vma->node.start / I915_GTT_PAGE_SIZE;
- end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, iter, vma->pages)
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
iowrite32(vm->pte_encode(addr, level, flags), gte++);
GEM_BUG_ON(gte > end);
@@ -390,7 +419,7 @@ static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
struct insert_entries {
struct i915_address_space *vm;
- struct i915_vma *vma;
+ struct i915_vma_resource *vma_res;
enum i915_cache_level level;
u32 flags;
};
@@ -399,18 +428,18 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
+ gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level level,
u32 flags)
{
- struct insert_entries arg = { vm, vma, level, flags };
+ struct insert_entries arg = { vm, vma_res, level, flags };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
@@ -449,14 +478,14 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 unused)
{
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
+ intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
flags);
}
@@ -468,30 +497,32 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
static void ggtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
- struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
- if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
+ if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK))
return;
+ vma_res->bound_flags |= flags;
+
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
- if (i915_gem_object_is_readonly(obj))
+ if (vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
- if (i915_gem_object_is_lmem(obj))
+ if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
- vm->insert_entries(vm, vma, cache_level, pte_flags);
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
-static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+static void ggtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
{
- vm->clear_range(vm, vma->node.start, vma->size);
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -505,7 +536,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
size = ggtt->vm.total - GUC_GGTT_TOP;
- ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
PIN_NOEVICT);
if (ret)
@@ -624,7 +655,7 @@ err:
static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
@@ -632,25 +663,27 @@ static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
/* Currently applicable only to VLV */
pte_flags = 0;
- if (i915_gem_object_is_readonly(vma->obj))
+ if (vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND)
ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
- stash, vma, cache_level, flags);
+ stash, vma_res, cache_level, flags);
if (flags & I915_VMA_GLOBAL_BIND)
- vm->insert_entries(vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+
+ vma_res->bound_flags |= flags;
}
static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
- struct i915_vma *vma)
+ struct i915_vma_resource *vma_res)
{
- if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
- vm->clear_range(vm, vma->node.start, vma->size);
+ if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND)
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
- if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
- ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
+ if (vma_res->bound_flags & I915_VMA_LOCAL_BIND)
+ ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
}
static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
@@ -723,14 +756,14 @@ int i915_init_ggtt(struct drm_i915_private *i915)
{
int ret;
- ret = init_ggtt(&i915->ggtt);
+ ret = init_ggtt(to_gt(i915)->ggtt);
if (ret)
return ret;
if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
- ret = init_aliasing_ppgtt(&i915->ggtt);
+ ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
if (ret)
- cleanup_init_ggtt(&i915->ggtt);
+ cleanup_init_ggtt(to_gt(i915)->ggtt);
}
return 0;
@@ -743,11 +776,21 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
atomic_set(&ggtt->vm.open, 0);
flush_workqueue(ggtt->vm.i915->wq);
+ i915_gem_drain_freed_objects(ggtt->vm.i915);
mutex_lock(&ggtt->vm.mutex);
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ bool trylock;
+
+ trylock = i915_gem_object_trylock(obj, NULL);
+ WARN_ON(!trylock);
+
WARN_ON(__i915_vma_unbind(vma));
+ if (trylock)
+ i915_gem_object_unlock(obj);
+ }
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
@@ -773,7 +816,7 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
*/
void i915_ggtt_driver_release(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
fini_aliasing_ppgtt(ggtt);
@@ -788,7 +831,7 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915)
*/
void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
dma_resv_fini(&ggtt->vm._resv);
@@ -1209,7 +1252,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
{
int ret;
- ret = ggtt_probe_hw(&i915->ggtt, to_gt(i915));
+ ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
if (ret)
return ret;
@@ -1281,7 +1324,7 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- vma->ops->bind_vma(vm, NULL, vma,
+ vma->ops->bind_vma(vm, NULL, vma->resource,
obj ? obj->cache_level : 0,
was_bound);
if (obj) { /* only used during resume => exclusive access */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index f294753bc947..76880fb8fc19 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -428,7 +428,6 @@ int i915_vma_pin_fence(struct i915_vma *vma)
* must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
err = mutex_lock_interruptible(&vma->vm->mutex);
@@ -731,8 +730,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
- i915->ggtt.bit_6_swizzle_x = swizzle_x;
- i915->ggtt.bit_6_swizzle_y = swizzle_y;
+ to_gt(i915)->ggtt->bit_6_swizzle_x = swizzle_x;
+ to_gt(i915)->ggtt->bit_6_swizzle_y = swizzle_y;
}
/*
@@ -899,7 +898,7 @@ void intel_gt_init_swizzling(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
if (GRAPHICS_VER(i915) < 5 ||
- i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 545a2b1f1834..e8403fa53909 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -3,6 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/drm_managed.h>
#include <drm/intel-gtt.h>
#include "gem/i915_gem_internal.h"
@@ -90,9 +91,11 @@ int intel_gt_probe_lmem(struct intel_gt *gt)
return 0;
}
-void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
+int intel_gt_assign_ggtt(struct intel_gt *gt)
{
- gt->ggtt = ggtt;
+ gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);
+
+ return gt->ggtt ? 0 : -ENOMEM;
}
static const struct intel_mmio_range icl_l3bank_steering_table[] = {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index a913fb6ffec3..2dad46c3eff2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -36,7 +36,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
-void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int intel_gt_assign_ggtt(struct intel_gt *gt);
int intel_gt_probe_lmem(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index e8143fa4b5a8..18d158d77aba 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -322,6 +322,9 @@
#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
+#define XEHPSDV_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
+#define XEHPSDV_CCS_BASE_SHIFT 8
+
#define GAMTARBMODE _MMIO(0x4a08)
#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
#define ARB_MODE_SWIZZLE_BDW (1 << 1)
@@ -1042,6 +1045,7 @@
#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
+#define XEHP_DIS_BBL_SYSPIPE REG_BIT(11)
#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 8506db9983da..49a8fb63e6e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -163,6 +163,9 @@ static void __i915_vm_release(struct work_struct *work)
struct i915_address_space *vm =
container_of(work, struct i915_address_space, release_work);
+ /* Synchronize async unbinds. */
+ i915_vma_resource_bind_dep_sync_all(vm);
+
vm->cleanup(vm);
i915_address_space_fini(vm);
@@ -191,6 +194,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
if (!kref_read(&vm->resv_ref))
kref_init(&vm->resv_ref);
+ vm->pending_unbind = RB_ROOT_CACHED;
INIT_WORK(&vm->release_work, __i915_vm_release);
atomic_set(&vm->open, 1);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 177b42b935a1..8073438b67c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -27,6 +27,7 @@
#include "gt/intel_reset.h"
#include "i915_selftest.h"
+#include "i915_vma_resource.h"
#include "i915_vma_types.h"
#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -200,7 +201,7 @@ struct i915_vma_ops {
/* Map an object into an address space with the given cache flags. */
void (*bind_vma)(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags);
/*
@@ -208,7 +209,8 @@ struct i915_vma_ops {
* setting the valid PTE entries to a reserved scratch page.
*/
void (*unbind_vma)(struct i915_address_space *vm,
- struct i915_vma *vma);
+ struct i915_vma_resource *vma_res);
+
};
struct i915_address_space {
@@ -263,6 +265,9 @@ struct i915_address_space {
/* Flags used when creating page-table objects for this vm */
unsigned long lmem_pt_obj_flags;
+ /* Interval tree for pending unbind vma resources */
+ struct rb_root_cached pending_unbind;
+
struct drm_i915_gem_object *
(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
struct drm_i915_gem_object *
@@ -285,7 +290,7 @@ struct i915_address_space {
enum i915_cache_level cache_level,
u32 flags);
void (*insert_entries)(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
@@ -600,11 +605,11 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags);
void ppgtt_unbind_vma(struct i915_address_space *vm,
- struct i915_vma *vma);
+ struct i915_vma_resource *vma_res);
void gtt_write_workarounds(struct intel_gt *gt);
@@ -627,8 +632,8 @@ __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long
static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
-} sgt_dma(struct i915_vma *vma) {
- struct scatterlist *sg = vma->pages->sgl;
+} sgt_dma(struct i915_vma_resource *vma_res) {
+ struct scatterlist *sg = vma_res->bi.pages->sgl;
dma_addr_t addr = sg_dma_address(sg);
return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 91c87a3a8e0b..004e1216e654 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1069,6 +1069,10 @@ lrc_pin(struct intel_context *ce,
void lrc_unpin(struct intel_context *ce)
{
+ if (unlikely(ce->parallel.last_rq)) {
+ i915_request_put(ce->parallel.last_rq);
+ ce->parallel.last_rq = NULL;
+ }
check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
ce->engine);
}
@@ -1164,6 +1168,29 @@ gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
return cs;
}
+/*
+ * On DG2 during context restore of a preempted context in GPGPU mode,
+ * RCS restore hang is detected. This is extremely timing dependent.
+ * To address this below sw wabb is implemented for DG2 A steppings.
+ */
+static u32 *
+dg2_emit_rcs_hang_wabb(const struct intel_context *ce, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(GEN12_STATE_ACK_DEBUG);
+ *cs++ = 0x21;
+
+ *cs++ = MI_LOAD_REGISTER_REG;
+ *cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
+ *cs++ = i915_mmio_reg_offset(GEN12_CULLBIT1);
+
+ *cs++ = MI_LOAD_REGISTER_REG;
+ *cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
+ *cs++ = i915_mmio_reg_offset(GEN12_CULLBIT2);
+
+ return cs;
+}
+
static u32 *
gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
{
@@ -1171,6 +1198,11 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
cs = gen12_emit_cmd_buf_wa(ce, cs);
cs = gen12_emit_restore_scratch(ce, cs);
+ /* Wa_22011450934:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(ce->engine->i915, G11, STEP_A0, STEP_B0))
+ cs = dg2_emit_rcs_hang_wabb(ce, cs);
+
/* Wa_16013000631:dg2 */
if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_G11(ce->engine->i915))
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 083b3090c69c..48e6e2f87700 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -179,32 +179,34 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
u32 pte_flags;
- if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
- vm->allocate_va_range(vm, stash, vma->node.start, vma->size);
- set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+ if (!vma_res->allocated) {
+ vm->allocate_va_range(vm, stash, vma_res->start,
+ vma_res->vma_size);
+ vma_res->allocated = true;
}
/* Applicable to VLV, and gen8+ */
pte_flags = 0;
- if (i915_gem_object_is_readonly(vma->obj))
+ if (vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
- if (i915_gem_object_is_lmem(vma->obj))
+ if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
- vm->insert_entries(vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
wmb();
}
-void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+void ppgtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
{
- if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
- vm->clear_range(vm, vma->node.start, vma->size);
+ if (vma_res->allocated)
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
static unsigned long pd_count(u64 size, int shift)
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index cb5a67c98f30..a04e0cf4a94b 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -12,11 +12,12 @@
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_regs.h"
static int init_fake_lmem_bar(struct intel_memory_region *mem)
{
struct drm_i915_private *i915 = mem->i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
unsigned long n;
int ret;
@@ -132,7 +133,7 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt)
if (!i915->params.fake_lmem_start)
return ERR_PTR(-ENODEV);
- GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
+ GEM_BUG_ON(i915_ggtt_has_aperture(to_gt(i915)->ggtt));
/* Your mappable aperture belongs to me now! */
mappable_end = pci_resource_len(pdev, 2);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index ae7542f70afb..82713264b96c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -604,6 +604,15 @@ static int gen8_reset_engines(struct intel_gt *gt,
*/
}
+ /*
+ * Wa_22011100796:dg2, whenever Full soft reset is required,
+ * reset all individual engines firstly, and then do a full soft reset.
+ *
+ * This is best effort, so ignore any error from the initial reset.
+ */
+ if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
+ gen11_reset_engines(gt, gt->info.engine_mask, 0);
+
if (GRAPHICS_VER(gt->i915) >= 11)
ret = gen11_reset_engines(gt, engine_mask, retry);
else
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index eeda1692d845..26038066e90b 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1510,6 +1510,12 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS);
+
+ /* Wa_18018781329:dg2 */
+ wa_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
}
static void
@@ -2040,7 +2046,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ if (IS_DG2(i915)) {
+ /* Wa_14015227452:dg2 */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14013392000:dg2_g11 */
wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
@@ -2048,15 +2059,15 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) ||
- IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14012419201:dg2 */
wa_masked_en(wal, GEN9_ROW_CHICKEN4,
GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) ||
- IS_DG2_G11(engine->i915)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(i915)) {
/*
* Wa_22012826095:dg2
* Wa_22013059131:dg2
@@ -2071,14 +2082,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
/* Wa_1308578152:dg2_g10 when first gslice is fused off */
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) &&
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) &&
needs_wa_1308578152(engine)) {
wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON,
GEN12_REPLAY_MODE_GRANULARITY);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
- IS_DG2_G11(engine->i915)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
/* Wa_22013037850:dg2 */
wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
DISABLE_128B_EVICTION_COMMAND_UDW);
@@ -2095,7 +2106,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
/*
* Wa_1608949956:dg2_g10
* Wa_14010198302:dg2_g10
@@ -2114,7 +2125,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, false);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
/* Wa_22010430635:dg2 */
wa_masked_en(wal,
GEN9_ROW_CHICKEN4,
@@ -2124,8 +2135,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
- IS_DG2_G11(engine->i915)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_G11(i915)) {
/* Wa_22012654132:dg2 */
wa_add(wal, GEN10_CACHE_MODE_SS, 0,
_MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
@@ -2134,8 +2145,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
/* Wa_14013202645:dg2 */
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) ||
- IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
wa_write_or(wal, RT_CTRL, DIS_NULL_QUERY);
if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 9e9ccb139ba7..83ff4c2e57c5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1384,7 +1384,7 @@ static int evict_vma(void *data)
complete(&arg->completion);
mutex_lock(&vm->mutex);
- err = i915_gem_evict_for_node(vm, &evict, 0);
+ err = i915_gem_evict_for_node(vm, NULL, &evict, 0);
mutex_unlock(&vm->mutex);
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 8a873f6bda7f..37c38bdd5f47 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -19,7 +19,7 @@ __igt_reset_stolen(struct intel_gt *gt,
intel_engine_mask_t mask,
const char *msg)
{
- struct i915_ggtt *ggtt = &gt->i915->ggtt;
+ struct i915_ggtt *ggtt = gt->ggtt;
const struct resource *dsm = &gt->i915->dsm;
resource_size_t num_pages, page;
struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index fe5d7d261797..7afdadc7656f 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -7,9 +7,9 @@
#define _ABI_GUC_ACTIONS_ABI_H
/**
- * DOC: HOST2GUC_REGISTER_CTB
+ * DOC: HOST2GUC_SELF_CFG
*
- * This message is used as part of the `CTB based communication`_ setup.
+ * This message is used by Host KMD to setup of the `GuC Self Config KLVs`_.
*
* This message must be sent as `MMIO HXG Message`_.
*
@@ -22,20 +22,18 @@
* | +-------+--------------------------------------------------------------+
* | | 27:16 | DATA0 = MBZ |
* | +-------+--------------------------------------------------------------+
- * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_REGISTER_CTB` = 0x4505 |
+ * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_SELF_CFG` = 0x0508 |
* +---+-------+--------------------------------------------------------------+
- * | 1 | 31:12 | RESERVED = MBZ |
+ * | 1 | 31:16 | **KLV_KEY** - KLV key, see `GuC Self Config KLVs`_ |
* | +-------+--------------------------------------------------------------+
- * | | 11:8 | **TYPE** - type for the `CT Buffer`_ |
+ * | | 15:0 | **KLV_LEN** - KLV length |
* | | | |
- * | | | - _`GUC_CTB_TYPE_HOST2GUC` = 0 |
- * | | | - _`GUC_CTB_TYPE_GUC2HOST` = 1 |
- * | +-------+--------------------------------------------------------------+
- * | | 7:0 | **SIZE** - size of the `CT Buffer`_ in 4K units minus 1 |
+ * | | | - 32 bit KLV = 1 |
+ * | | | - 64 bit KLV = 2 |
* +---+-------+--------------------------------------------------------------+
- * | 2 | 31:0 | **DESC_ADDR** - GGTT address of the `CTB Descriptor`_ |
+ * | 2 | 31:0 | **VALUE32** - Bits 31-0 of the KLV value |
* +---+-------+--------------------------------------------------------------+
- * | 3 | 31:0 | **BUFF_ADDF** - GGTT address of the `CT Buffer`_ |
+ * | 3 | 31:0 | **VALUE64** - Bits 63-32 of the KLV value (**KLV_LEN** = 2) |
* +---+-------+--------------------------------------------------------------+
*
* +---+-------+--------------------------------------------------------------+
@@ -45,28 +43,25 @@
* | +-------+--------------------------------------------------------------+
* | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
* | +-------+--------------------------------------------------------------+
- * | | 27:0 | DATA0 = MBZ |
+ * | | 27:0 | DATA0 = **NUM** - 1 if KLV was parsed, 0 if not recognized |
* +---+-------+--------------------------------------------------------------+
*/
-#define GUC_ACTION_HOST2GUC_REGISTER_CTB 0x4505
+#define GUC_ACTION_HOST2GUC_SELF_CFG 0x0508
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_MBZ (0xfffff << 12)
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE (0xf << 8)
-#define GUC_CTB_TYPE_HOST2GUC 0u
-#define GUC_CTB_TYPE_GUC2HOST 1u
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE (0xff << 0)
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR GUC_HXG_REQUEST_MSG_n_DATAn
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR GUC_HXG_REQUEST_MSG_n_DATAn
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn
-#define HOST2GUC_REGISTER_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
-#define HOST2GUC_REGISTER_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+#define HOST2GUC_SELF_CFG_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define HOST2GUC_SELF_CFG_RESPONSE_MSG_0_NUM GUC_HXG_RESPONSE_MSG_0_DATA0
/**
- * DOC: HOST2GUC_DEREGISTER_CTB
+ * DOC: HOST2GUC_CONTROL_CTB
*
- * This message is used as part of the `CTB based communication`_ teardown.
+ * This H2G action allows Vf Host to enable or disable H2G and G2H `CT Buffer`_.
*
* This message must be sent as `MMIO HXG Message`_.
*
@@ -79,15 +74,12 @@
* | +-------+--------------------------------------------------------------+
* | | 27:16 | DATA0 = MBZ |
* | +-------+--------------------------------------------------------------+
- * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_DEREGISTER_CTB` = 0x4506 |
+ * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_CONTROL_CTB` = 0x4509 |
* +---+-------+--------------------------------------------------------------+
- * | 1 | 31:12 | RESERVED = MBZ |
- * | +-------+--------------------------------------------------------------+
- * | | 11:8 | **TYPE** - type of the `CT Buffer`_ |
+ * | 1 | 31:0 | **CONTROL** - control `CTB based communication`_ |
* | | | |
- * | | | see `GUC_ACTION_HOST2GUC_REGISTER_CTB`_ |
- * | +-------+--------------------------------------------------------------+
- * | | 7:0 | RESERVED = MBZ |
+ * | | | - _`GUC_CTB_CONTROL_DISABLE` = 0 |
+ * | | | - _`GUC_CTB_CONTROL_ENABLE` = 1 |
* +---+-------+--------------------------------------------------------------+
*
* +---+-------+--------------------------------------------------------------+
@@ -100,16 +92,16 @@
* | | 27:0 | DATA0 = MBZ |
* +---+-------+--------------------------------------------------------------+
*/
-#define GUC_ACTION_HOST2GUC_DEREGISTER_CTB 0x4506
+#define GUC_ACTION_HOST2GUC_CONTROL_CTB 0x4509
-#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
-#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_MBZ (0xfffff << 12)
-#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE (0xf << 8)
-#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_MBZ2 (0xff << 0)
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL GUC_HXG_REQUEST_MSG_n_DATAn
+#define GUC_CTB_CONTROL_DISABLE 0u
+#define GUC_CTB_CONTROL_ENABLE 1u
-#define HOST2GUC_DEREGISTER_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
-#define HOST2GUC_DEREGISTER_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+#define HOST2GUC_CONTROL_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define HOST2GUC_CONTROL_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
/* legacy definitions */
@@ -143,8 +135,12 @@ enum intel_guc_action {
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
- INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
+ INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
+ INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002,
+ INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
+ INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
+ INTEL_GUC_ACTION_NOTIFY_EXCEPTION = 0x8005,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index 488b6061ee89..c20658ee85a5 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -11,4 +11,27 @@ enum intel_guc_response_status {
INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
};
+enum intel_guc_load_status {
+ INTEL_GUC_LOAD_STATUS_DEFAULT = 0x00,
+ INTEL_GUC_LOAD_STATUS_START = 0x01,
+ INTEL_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH = 0x02,
+ INTEL_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH = 0x03,
+ INTEL_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE = 0x04,
+ INTEL_GUC_LOAD_STATUS_GDT_DONE = 0x10,
+ INTEL_GUC_LOAD_STATUS_IDT_DONE = 0x20,
+ INTEL_GUC_LOAD_STATUS_LAPIC_DONE = 0x30,
+ INTEL_GUC_LOAD_STATUS_GUCINT_DONE = 0x40,
+ INTEL_GUC_LOAD_STATUS_DPC_READY = 0x50,
+ INTEL_GUC_LOAD_STATUS_DPC_ERROR = 0x60,
+ INTEL_GUC_LOAD_STATUS_EXCEPTION = 0x70,
+ INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID = 0x71,
+ INTEL_GUC_LOAD_STATUS_PXP_TEARDOWN_CTRL_ENABLED = 0x72,
+ INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
+ INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
+ INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
+
+ INTEL_GUC_LOAD_STATUS_READY = 0xF0,
+};
+
#endif /* _ABI_GUC_ERRORS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
new file mode 100644
index 000000000000..f0814a57c191
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_KLVS_ABI_H
+#define _ABI_GUC_KLVS_ABI_H
+
+/**
+ * DOC: GuC KLV
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:16 | **KEY** - KLV key identifier |
+ * | | | - `GuC Self Config KLVs`_ |
+ * | | | |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **LEN** - length of VALUE (in 32bit dwords) |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VALUE** - actual value of the KLV (format depends on KEY) |
+ * +---+-------+ |
+ * |...| | |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_KLV_LEN_MIN 1u
+#define GUC_KLV_0_KEY (0xffff << 16)
+#define GUC_KLV_0_LEN (0xffff << 0)
+#define GUC_KLV_n_VALUE (0xffffffff << 0)
+
+/**
+ * DOC: GuC Self Config KLVs
+ *
+ * `GuC KLV`_ keys available for use with HOST2GUC_SELF_CFG_.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_ADDR` : 0x0902
+ * Refers to 64 bit Global Gfx address of H2G `CT Buffer`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR` : 0x0903
+ * Refers to 64 bit Global Gfx address of H2G `CTB Descriptor`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_SIZE` : 0x0904
+ * Refers to size of H2G `CT Buffer`_ in bytes.
+ * Should be a multiple of 4K.
+ *
+ * _`GUC_KLV_SELF_CFG_G2H_CTB_ADDR` : 0x0905
+ * Refers to 64 bit Global Gfx address of G2H `CT Buffer`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR` : 0x0906
+ * Refers to 64 bit Global Gfx address of G2H `CTB Descriptor`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_G2H_CTB_SIZE` : 0x0907
+ * Refers to size of G2H `CT Buffer`_ in bytes.
+ * Should be a multiple of 4K.
+ */
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY 0x0902
+#define GUC_KLV_SELF_CFG_H2G_CTB_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY 0x0903
+#define GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY 0x0904
+#define GUC_KLV_SELF_CFG_H2G_CTB_SIZE_LEN 1u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY 0x0905
+#define GUC_KLV_SELF_CFG_G2H_CTB_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY 0x0906
+#define GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY 0x0907
+#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u
+
+#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 5bab32fef120..447a976c9f25 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -184,6 +184,9 @@ void intel_guc_init_early(struct intel_guc *guc)
guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
}
+
+ intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION |
+ INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
}
void intel_guc_init_late(struct intel_guc *guc)
@@ -224,32 +227,48 @@ static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
u32 flags;
#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
- #define UNIT SZ_1M
- #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
+ #define LOG_UNIT SZ_1M
+ #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
#else
- #define UNIT SZ_4K
- #define FLAG 0
+ #define LOG_UNIT SZ_4K
+ #define LOG_FLAG 0
+ #endif
+
+ #if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
+ #define CAPTURE_UNIT SZ_1M
+ #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
+ #else
+ #define CAPTURE_UNIT SZ_4K
+ #define CAPTURE_FLAG 0
#endif
BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
+ BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, UNIT));
+ BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
+ BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
- BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
+ BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
- BUILD_BUG_ON((DEBUG_BUFFER_SIZE / UNIT - 1) >
+ BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
(GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
+ BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
+ (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
- FLAG |
- ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
- ((DEBUG_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
+ CAPTURE_FLAG |
+ LOG_FLAG |
+ ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
+ ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
+ ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) << GUC_LOG_CAPTURE_SHIFT) |
(offset << GUC_LOG_BUF_ADDR_SHIFT);
- #undef UNIT
- #undef FLAG
+ #undef LOG_UNIT
+ #undef LOG_FLAG
+ #undef CAPTURE_UNIT
+ #undef CAPTURE_FLAG
return flags;
}
@@ -262,6 +281,26 @@ static u32 guc_ctl_ads_flags(struct intel_guc *guc)
return flags;
}
+static u32 guc_ctl_wa_flags(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 flags = 0;
+
+ /* Wa_22012773006:gen11,gen12 < XeHP */
+ if (GRAPHICS_VER(gt->i915) >= 11 &&
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
+ flags |= GUC_WA_POLLCS;
+
+ return flags;
+}
+
+static u32 guc_ctl_devid(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
+}
+
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
@@ -278,6 +317,8 @@ static void guc_init_params(struct intel_guc *guc)
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
+ params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
+ params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
@@ -515,9 +556,10 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
/* Make sure to handle only enabled messages */
msg = payload[0] & guc->msg_enabled_mask;
- if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
- INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
- intel_guc_log_handle_flush_event(&guc->log);
+ if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)
+ drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC crash dump notification!\n");
+ if (msg & INTEL_GUC_RECV_MSG_EXCEPTION)
+ drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC exception notification!\n");
return 0;
}
@@ -551,7 +593,7 @@ int intel_guc_suspend(struct intel_guc *guc)
{
int ret;
u32 action[] = {
- INTEL_GUC_ACTION_RESET_CLIENT,
+ INTEL_GUC_ACTION_CLIENT_SOFT_RESET,
};
if (!intel_guc_is_ready(guc))
@@ -715,6 +757,56 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
return 0;
}
+static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
+{
+ u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_SELF_CFG),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32, lower_32_bits(value)),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64, upper_32_bits(value)),
+ };
+ int ret;
+
+ GEM_BUG_ON(len > 2);
+ GEM_BUG_ON(len == 1 && upper_32_bits(value));
+
+ /* Self config must go over MMIO */
+ ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+
+ if (unlikely(ret < 0))
+ return ret;
+ if (unlikely(ret > 1))
+ return -EPROTO;
+ if (unlikely(!ret))
+ return -ENOKEY;
+
+ return 0;
+}
+
+static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int err = __guc_action_self_cfg(guc, key, len, value);
+
+ if (unlikely(err))
+ i915_probe_error(i915, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
+ ERR_PTR(err), key, value);
+ return err;
+}
+
+int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value)
+{
+ return __guc_self_cfg(guc, key, 1, value);
+}
+
+int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value)
+{
+ return __guc_self_cfg(guc, key, 2, value);
+}
+
/**
* intel_guc_load_status - dump information about GuC load status
* @guc: the GuC
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index f9240d4baa69..9d779de16613 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -119,6 +119,15 @@ struct intel_guc {
* function as it might be in an atomic context (no sleeping)
*/
struct work_struct destroyed_worker;
+ /**
+ * @reset_fail_worker: worker to trigger a GT reset after an
+ * engine reset fails
+ */
+ struct work_struct reset_fail_worker;
+ /**
+ * @reset_fail_mask: mask of engines that failed to reset
+ */
+ intel_engine_mask_t reset_fail_mask;
} submission_state;
/**
@@ -141,6 +150,13 @@ struct intel_guc {
struct __guc_ads_blob *ads_blob;
/** @ads_regset_size: size of the save/restore regsets in the ADS */
u32 ads_regset_size;
+ /**
+ * @ads_regset_count: number of save/restore registers in the ADS for
+ * each engine
+ */
+ u32 ads_regset_count[I915_NUM_ENGINES];
+ /** @ads_regset: save/restore regsets in the ADS */
+ struct guc_mmio_reg *ads_regset;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
/** @ads_engine_usage_size: size of engine usage in the ADS */
@@ -206,6 +222,11 @@ struct intel_guc {
* context usage for overflows.
*/
struct delayed_work work;
+
+ /**
+ * @shift: Right shift value for the gpm timestamp
+ */
+ u32 shift;
} timestamp;
#ifdef CONFIG_DRM_I915_SELFTEST
@@ -328,6 +349,8 @@ int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
struct i915_vma **out_vma, void **out_vaddr);
+int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value);
+int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value);
static inline bool intel_guc_is_supported(struct intel_guc *guc)
{
@@ -404,6 +427,8 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len);
int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len);
+int intel_guc_error_capture_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len);
void intel_guc_find_hung_context(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 162b89198567..7e41175618f5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -42,6 +42,10 @@
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
+ * | capture lists |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
* | private data |
* +---------------------------------------+
* | padding |
@@ -67,6 +71,12 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
return PAGE_ALIGN(guc->ads_golden_ctxt_size);
}
+static u32 guc_ads_capture_size(struct intel_guc *guc)
+{
+ /* FIXME: Allocate a proper capture list */
+ return PAGE_ALIGN(PAGE_SIZE);
+}
+
static u32 guc_ads_private_data_size(struct intel_guc *guc)
{
return PAGE_ALIGN(guc->fw.private_data_size);
@@ -87,7 +97,7 @@ static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
-static u32 guc_ads_private_data_offset(struct intel_guc *guc)
+static u32 guc_ads_capture_offset(struct intel_guc *guc)
{
u32 offset;
@@ -97,6 +107,16 @@ static u32 guc_ads_private_data_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
+static u32 guc_ads_private_data_offset(struct intel_guc *guc)
+{
+ u32 offset;
+
+ offset = guc_ads_capture_offset(guc) +
+ guc_ads_capture_size(guc);
+
+ return PAGE_ALIGN(offset);
+}
+
static u32 guc_ads_blob_size(struct intel_guc *guc)
{
return guc_ads_private_data_offset(guc) +
@@ -188,14 +208,18 @@ static void guc_mapping_table_init(struct intel_gt *gt,
/*
* The save/restore register list must be pre-calculated to a temporary
- * buffer of driver defined size before it can be generated in place
- * inside the ADS.
+ * buffer before it can be copied inside the ADS.
*/
-#define MAX_MMIO_REGS 128 /* Arbitrary size, increase as needed */
struct temp_regset {
+ /*
+ * ptr to the section of the storage for the engine currently being
+ * worked on
+ */
struct guc_mmio_reg *registers;
- u32 used;
- u32 size;
+ /* ptr to the base of the allocated storage for all engines */
+ struct guc_mmio_reg *storage;
+ u32 storage_used;
+ u32 storage_max;
};
static int guc_mmio_reg_cmp(const void *a, const void *b)
@@ -206,18 +230,44 @@ static int guc_mmio_reg_cmp(const void *a, const void *b)
return (int)ra->offset - (int)rb->offset;
}
-static void guc_mmio_reg_add(struct temp_regset *regset,
- u32 offset, u32 flags)
+static struct guc_mmio_reg * __must_check
+__mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg)
{
- u32 count = regset->used;
+ u32 pos = regset->storage_used;
+ struct guc_mmio_reg *slot;
+
+ if (pos >= regset->storage_max) {
+ size_t size = ALIGN((pos + 1) * sizeof(*slot), PAGE_SIZE);
+ struct guc_mmio_reg *r = krealloc(regset->storage,
+ size, GFP_KERNEL);
+ if (!r) {
+ WARN_ONCE(1, "Incomplete regset list: can't add register (%d)\n",
+ -ENOMEM);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ regset->registers = r + (regset->registers - regset->storage);
+ regset->storage = r;
+ regset->storage_max = size / sizeof(*slot);
+ }
+
+ slot = &regset->storage[pos];
+ regset->storage_used++;
+ *slot = *reg;
+
+ return slot;
+}
+
+static long __must_check guc_mmio_reg_add(struct temp_regset *regset,
+ u32 offset, u32 flags)
+{
+ u32 count = regset->storage_used - (regset->registers - regset->storage);
struct guc_mmio_reg reg = {
.offset = offset,
.flags = flags,
};
struct guc_mmio_reg *slot;
- GEM_BUG_ON(count >= regset->size);
-
/*
* The mmio list is built using separate lists within the driver.
* It's possible that at some point we may attempt to add the same
@@ -226,11 +276,11 @@ static void guc_mmio_reg_add(struct temp_regset *regset,
*/
if (bsearch(&reg, regset->registers, count,
sizeof(reg), guc_mmio_reg_cmp))
- return;
+ return 0;
- slot = &regset->registers[count];
- regset->used++;
- *slot = reg;
+ slot = __mmio_reg_add(regset, &reg);
+ if (IS_ERR(slot))
+ return PTR_ERR(slot);
while (slot-- > regset->registers) {
GEM_BUG_ON(slot[0].offset == slot[1].offset);
@@ -239,6 +289,8 @@ static void guc_mmio_reg_add(struct temp_regset *regset,
swap(slot[1], slot[0]);
}
+
+ return 0;
}
#define GUC_MMIO_REG_ADD(regset, reg, masked) \
@@ -246,62 +298,71 @@ static void guc_mmio_reg_add(struct temp_regset *regset,
i915_mmio_reg_offset((reg)), \
(masked) ? GUC_REGSET_MASKED : 0)
-static void guc_mmio_regset_init(struct temp_regset *regset,
- struct intel_engine_cs *engine)
+static int guc_mmio_regset_init(struct temp_regset *regset,
+ struct intel_engine_cs *engine)
{
const u32 base = engine->mmio_base;
struct i915_wa_list *wal = &engine->wa_list;
struct i915_wa *wa;
unsigned int i;
+ int ret = 0;
- regset->used = 0;
+ /*
+ * Each engine's registers point to a new start relative to
+ * storage
+ */
+ regset->registers = regset->storage + regset->storage_used;
- GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true);
- GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false);
- GUC_MMIO_REG_ADD(regset, RING_IMR(base), false);
+ ret |= GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true);
+ ret |= GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false);
+ ret |= GUC_MMIO_REG_ADD(regset, RING_IMR(base), false);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg);
+ ret |= GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg);
/* Be extra paranoid and include all whitelist registers. */
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
- GUC_MMIO_REG_ADD(regset,
- RING_FORCE_TO_NONPRIV(base, i),
- false);
+ ret |= GUC_MMIO_REG_ADD(regset,
+ RING_FORCE_TO_NONPRIV(base, i),
+ false);
/* add in local MOCS registers */
for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++)
- GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false);
+ ret |= GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false);
+
+ return ret ? -1 : 0;
}
-static int guc_mmio_reg_state_query(struct intel_guc *guc)
+static long guc_mmio_reg_state_create(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- struct temp_regset temp_set;
- u32 total;
+ struct temp_regset temp_set = {};
+ long total = 0;
+ long ret;
- /*
- * Need to actually build the list in order to filter out
- * duplicates and other such data dependent constructions.
- */
- temp_set.size = MAX_MMIO_REGS;
- temp_set.registers = kmalloc_array(temp_set.size,
- sizeof(*temp_set.registers),
- GFP_KERNEL);
- if (!temp_set.registers)
- return -ENOMEM;
-
- total = 0;
for_each_engine(engine, gt, id) {
- guc_mmio_regset_init(&temp_set, engine);
- total += temp_set.used;
+ u32 used = temp_set.storage_used;
+
+ ret = guc_mmio_regset_init(&temp_set, engine);
+ if (ret < 0)
+ goto fail_regset_init;
+
+ guc->ads_regset_count[id] = temp_set.storage_used - used;
+ total += guc->ads_regset_count[id];
}
- kfree(temp_set.registers);
+ guc->ads_regset = temp_set.storage;
+
+ drm_dbg(&guc_to_gt(guc)->i915->drm, "Used %zu KB for temporary ADS regset\n",
+ (temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10);
return total * sizeof(struct guc_mmio_reg);
+
+fail_regset_init:
+ kfree(temp_set.storage);
+ return ret;
}
static void guc_mmio_reg_state_init(struct intel_guc *guc,
@@ -309,40 +370,38 @@ static void guc_mmio_reg_state_init(struct intel_guc *guc,
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
+ struct guc_mmio_reg *ads_registers;
enum intel_engine_id id;
- struct temp_regset temp_set;
- struct guc_mmio_reg_set *ads_reg_set;
u32 addr_ggtt, offset;
- u8 guc_class;
offset = guc_ads_regset_offset(guc);
addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
- temp_set.registers = (struct guc_mmio_reg *)(((u8 *)blob) + offset);
- temp_set.size = guc->ads_regset_size / sizeof(temp_set.registers[0]);
+ ads_registers = (struct guc_mmio_reg *)(((u8 *)blob) + offset);
+
+ memcpy(ads_registers, guc->ads_regset, guc->ads_regset_size);
for_each_engine(engine, gt, id) {
+ u32 count = guc->ads_regset_count[id];
+ struct guc_mmio_reg_set *ads_reg_set;
+ u8 guc_class;
+
/* Class index is checked in class converter */
GEM_BUG_ON(engine->instance >= GUC_MAX_INSTANCES_PER_CLASS);
guc_class = engine_class_to_guc_class(engine->class);
ads_reg_set = &blob->ads.reg_state_list[guc_class][engine->instance];
- guc_mmio_regset_init(&temp_set, engine);
- if (!temp_set.used) {
+ if (!count) {
ads_reg_set->address = 0;
ads_reg_set->count = 0;
continue;
}
ads_reg_set->address = addr_ggtt;
- ads_reg_set->count = temp_set.used;
+ ads_reg_set->count = count;
- temp_set.size -= temp_set.used;
- temp_set.registers += temp_set.used;
- addr_ggtt += temp_set.used * sizeof(struct guc_mmio_reg);
+ addr_ggtt += count * sizeof(struct guc_mmio_reg);
}
-
- GEM_BUG_ON(temp_set.size);
}
static void fill_engine_enable_masks(struct intel_gt *gt,
@@ -501,6 +560,26 @@ static void guc_init_golden_context(struct intel_guc *guc)
GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
}
+static void guc_capture_list_init(struct intel_guc *guc, struct __guc_ads_blob *blob)
+{
+ int i, j;
+ u32 addr_ggtt, offset;
+
+ offset = guc_ads_capture_offset(guc);
+ addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+
+ /* FIXME: Populate a proper capture list */
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
+ for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
+ blob->ads.capture_instance[i][j] = addr_ggtt;
+ blob->ads.capture_class[i][j] = addr_ggtt;
+ }
+
+ blob->ads.capture_global[i] = addr_ggtt;
+ }
+}
+
static void __guc_ads_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -534,6 +613,9 @@ static void __guc_ads_init(struct intel_guc *guc)
base = intel_guc_ggtt_offset(guc, guc->ads_vma);
+ /* Capture list for hang debug */
+ guc_capture_list_init(guc, blob);
+
/* ADS */
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
@@ -561,8 +643,11 @@ int intel_guc_ads_create(struct intel_guc *guc)
GEM_BUG_ON(guc->ads_vma);
- /* Need to calculate the reg state size dynamically: */
- ret = guc_mmio_reg_state_query(guc);
+ /*
+ * Create reg state size dynamically on system memory to be copied to
+ * the final ads blob on gt init/reset
+ */
+ ret = guc_mmio_reg_state_create(guc);
if (ret < 0)
return ret;
guc->ads_regset_size = ret;
@@ -602,6 +687,7 @@ void intel_guc_ads_destroy(struct intel_guc *guc)
{
i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
guc->ads_blob = NULL;
+ kfree(guc->ads_regset);
}
static void guc_ads_private_data_reset(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index aa6dd6415202..2f7fc87a78e1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -112,18 +112,6 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
init_waitqueue_head(&ct->wq);
}
-static inline const char *guc_ct_buffer_type_to_str(u32 type)
-{
- switch (type) {
- case GUC_CTB_TYPE_HOST2GUC:
- return "SEND";
- case GUC_CTB_TYPE_GUC2HOST:
- return "RECV";
- default:
- return "<invalid>";
- }
-}
-
static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
{
memset(desc, 0, sizeof(*desc));
@@ -156,71 +144,65 @@ static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
guc_ct_buffer_reset(ctb);
}
-static int guc_action_register_ct_buffer(struct intel_guc *guc, u32 type,
- u32 desc_addr, u32 buff_addr, u32 size)
+static int guc_action_control_ctb(struct intel_guc *guc, u32 control)
{
- u32 request[HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN] = {
+ u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
- FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_REGISTER_CTB),
- FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE, size / SZ_4K - 1) |
- FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE, type),
- FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR, desc_addr),
- FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR, buff_addr),
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_CONTROL_CTB),
+ FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, control),
};
int ret;
- GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
- GEM_BUG_ON(size % SZ_4K);
+ GEM_BUG_ON(control != GUC_CTB_CONTROL_DISABLE && control != GUC_CTB_CONTROL_ENABLE);
- /* CT registration must go over MMIO */
+ /* CT control must go over MMIO */
ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
return ret > 0 ? -EPROTO : ret;
}
-static int ct_register_buffer(struct intel_guc_ct *ct, u32 type,
- u32 desc_addr, u32 buff_addr, u32 size)
+static int ct_control_enable(struct intel_guc_ct *ct, bool enable)
{
int err;
- err = i915_inject_probe_error(guc_to_gt(ct_to_guc(ct))->i915, -ENXIO);
+ err = guc_action_control_ctb(ct_to_guc(ct), enable ?
+ GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE);
if (unlikely(err))
- return err;
+ CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n",
+ enabledisable(enable), ERR_PTR(err));
- err = guc_action_register_ct_buffer(ct_to_guc(ct), type,
- desc_addr, buff_addr, size);
- if (unlikely(err))
- CT_ERROR(ct, "Failed to register %s buffer (%pe)\n",
- guc_ct_buffer_type_to_str(type), ERR_PTR(err));
return err;
}
-static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
+static int ct_register_buffer(struct intel_guc_ct *ct, bool send,
+ u32 desc_addr, u32 buff_addr, u32 size)
{
- u32 request[HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN] = {
- FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
- FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
- FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_DEREGISTER_CTB),
- FIELD_PREP(HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE, type),
- };
- int ret;
-
- GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
-
- /* CT deregistration must go over MMIO */
- ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+ int err;
- return ret > 0 ? -EPROTO : ret;
-}
+ err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
+ GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY :
+ GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
+ desc_addr);
+ if (unlikely(err))
+ goto failed;
-static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
-{
- int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
+ err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
+ GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY :
+ GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
+ buff_addr);
+ if (unlikely(err))
+ goto failed;
+ err = intel_guc_self_cfg32(ct_to_guc(ct), send ?
+ GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY :
+ GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
+ size);
if (unlikely(err))
- CT_ERROR(ct, "Failed to deregister %s buffer (%pe)\n",
- guc_ct_buffer_type_to_str(type), ERR_PTR(err));
+failed:
+ CT_PROBE_ERROR(ct, "Failed to register %s buffer (%pe)\n",
+ send ? "SEND" : "RECV", ERR_PTR(err));
+
return err;
}
@@ -308,7 +290,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
- u32 base, desc, cmds;
+ u32 base, desc, cmds, size;
void *blob;
int err;
@@ -333,27 +315,27 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
*/
desc = base + ptrdiff(ct->ctbs.recv.desc, blob);
cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
- err = ct_register_buffer(ct, GUC_CTB_TYPE_GUC2HOST,
- desc, cmds, ct->ctbs.recv.size * 4);
-
+ size = ct->ctbs.recv.size * 4;
+ err = ct_register_buffer(ct, false, desc, cmds, size);
if (unlikely(err))
goto err_out;
desc = base + ptrdiff(ct->ctbs.send.desc, blob);
cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
- err = ct_register_buffer(ct, GUC_CTB_TYPE_HOST2GUC,
- desc, cmds, ct->ctbs.send.size * 4);
+ size = ct->ctbs.send.size * 4;
+ err = ct_register_buffer(ct, true, desc, cmds, size);
+ if (unlikely(err))
+ goto err_out;
+ err = ct_control_enable(ct, true);
if (unlikely(err))
- goto err_deregister;
+ goto err_out;
ct->enabled = true;
ct->stall_time = KTIME_MAX;
return 0;
-err_deregister:
- ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
err_out:
CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
return err;
@@ -372,8 +354,7 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
ct->enabled = false;
if (intel_guc_is_fw_running(guc)) {
- ct_deregister_buffer(ct, GUC_CTB_TYPE_HOST2GUC);
- ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
+ ct_control_enable(ct, false);
}
}
@@ -662,6 +643,7 @@ static int ct_send(struct intel_guc_ct *ct,
struct ct_request request;
unsigned long flags;
unsigned int sleep_period_ms = 1;
+ bool send_again;
u32 fence;
int err;
@@ -671,6 +653,9 @@ static int ct_send(struct intel_guc_ct *ct,
GEM_BUG_ON(!response_buf && response_buf_size);
might_sleep();
+resend:
+ send_again = false;
+
/*
* We use a lazy spin wait loop here as we believe that if the CT
* buffers are sized correctly the flow control condition should be
@@ -725,6 +710,13 @@ retry:
goto unlink;
}
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
+ CT_DEBUG(ct, "retrying request %#x (%u)\n", *action,
+ FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, *status));
+ send_again = true;
+ goto unlink;
+ }
+
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
err = -EIO;
goto unlink;
@@ -747,6 +739,9 @@ unlink:
list_del(&request.link);
spin_unlock_irqrestore(&ct->requests.lock, flags);
+ if (unlikely(send_again))
+ goto resend;
+
return err;
}
@@ -789,7 +784,7 @@ static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
{
struct ct_incoming_msg *msg;
- msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC);
+ msg = kmalloc(struct_size(msg, msg, num_dwords), GFP_ATOMIC);
if (msg)
msg->size = num_dwords;
return msg;
@@ -918,6 +913,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r
GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN);
GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC);
GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS &&
+ FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_NO_RESPONSE_RETRY &&
FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE);
CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]);
@@ -990,9 +986,27 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r
case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
ret = intel_guc_context_reset_process_msg(guc, payload, len);
break;
+ case INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
+ ret = intel_guc_error_capture_process_msg(guc, payload, len);
+ if (unlikely(ret))
+ CT_ERROR(ct, "error capture notification failed %x %*ph\n",
+ action, 4 * len, payload);
+ break;
case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
ret = intel_guc_engine_failure_process_msg(guc, payload, len);
break;
+ case INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
+ intel_guc_log_handle_flush_event(&guc->log);
+ ret = 0;
+ break;
+ case INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
+ CT_ERROR(ct, "Received GuC crash dump notification!\n");
+ ret = 0;
+ break;
+ case INTEL_GUC_ACTION_NOTIFY_EXCEPTION:
+ CT_ERROR(ct, "Received GuC exception notification!\n");
+ ret = 0;
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1098,6 +1112,7 @@ static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
break;
case GUC_HXG_TYPE_RESPONSE_SUCCESS:
case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
err = ct_handle_response(ct, msg);
break;
default:
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index dcb51b53b495..a0372735cddb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -16,13 +16,15 @@
static void guc_prepare_xfer(struct intel_uncore *uncore)
{
- u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
- GUC_ENABLE_READ_CACHE_LOGIC |
- GUC_ENABLE_MIA_CACHING |
+ u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
GUC_ENABLE_MIA_CLOCK_GATING;
+ if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50))
+ shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
+ GUC_ENABLE_MIA_CACHING;
+
/* Must program this register before loading the ucode with DMA */
intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags);
@@ -91,11 +93,10 @@ static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
{
u32 val = intel_uncore_read(uncore, GUC_STATUS);
- u32 uk_val = val & GS_UKERNEL_MASK;
+ u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, val);
*status = val;
- return (uk_val == GS_UKERNEL_READY) ||
- ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
+ return uk_val == INTEL_GUC_LOAD_STATUS_READY;
}
static int guc_wait_ucode(struct intel_uncore *uncore)
@@ -106,17 +107,26 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
/*
* Wait for the GuC to start up.
* NB: Docs recommend not using the interrupt for completion.
- * Measurements indicate this should take no more than 20ms, so a
+ * Measurements indicate this should take no more than 20ms
+ * (assuming the GT clock is at maximum frequency). So, a
* timeout here indicates that the GuC has failed and is unusable.
* (Higher levels of the driver may decide to reset the GuC and
* attempt the ucode load again if this happens.)
+ *
+ * FIXME: There is a known (but exceedingly unlikely) race condition
+ * where the asynchronous frequency management code could reduce
+ * the GT clock while a GuC reload is in progress (during a full
+ * GT reset). A fix is in progress but there are complex locking
+ * issues to be resolved. In the meantime bump the timeout to
+ * 200ms. Even at slowest clock, this should be sufficient. And
+ * in the working case, a larger timeout makes no difference.
*/
- ret = wait_for(guc_ready(uncore, &status), 100);
+ ret = wait_for(guc_ready(uncore, &status), 200);
if (ret) {
struct drm_device *drm = &uncore->i915->drm;
- drm_dbg(drm, "GuC load failed: status = 0x%08X\n", status);
- drm_dbg(drm, "GuC load failed: status: Reset = %d, "
+ drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
+ drm_info(drm, "GuC load failed: status: Reset = %d, "
"BootROM = 0x%02X, UKernel = 0x%02X, "
"MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
@@ -126,13 +136,13 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- drm_dbg(drm, "GuC firmware signature verification failed\n");
+ drm_info(drm, "GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
- if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
- drm_dbg(drm, "GuC firmware exception. EIP: %#x\n",
- intel_uncore_read(uncore, SOFT_SCRATCH(13)));
+ if (REG_FIELD_GET(GS_UKERNEL_MASK, status) == INTEL_GUC_LOAD_STATUS_EXCEPTION) {
+ drm_info(drm, "GuC firmware exception. EIP: %#x\n",
+ intel_uncore_read(uncore, SOFT_SCRATCH(13)));
ret = -ENXIO;
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 7072e30e99f4..6a4612a852e2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -16,6 +16,7 @@
#include "abi/guc_errors_abi.h"
#include "abi/guc_communication_mmio_abi.h"
#include "abi/guc_communication_ctb_abi.h"
+#include "abi/guc_klvs_abi.h"
#include "abi/guc_messages_abi.h"
/* Payload length only i.e. don't include G2H header length */
@@ -84,19 +85,24 @@
#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
#define GUC_CTL_LOG_PARAMS 0
-#define GUC_LOG_VALID (1 << 0)
-#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
-#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
+#define GUC_LOG_VALID BIT(0)
+#define GUC_LOG_NOTIFY_ON_HALF_FULL BIT(1)
+#define GUC_LOG_CAPTURE_ALLOC_UNITS BIT(2)
+#define GUC_LOG_LOG_ALLOC_UNITS BIT(3)
#define GUC_LOG_CRASH_SHIFT 4
#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
#define GUC_LOG_DEBUG_SHIFT 6
#define GUC_LOG_DEBUG_MASK (0xF << GUC_LOG_DEBUG_SHIFT)
+#define GUC_LOG_CAPTURE_SHIFT 10
+#define GUC_LOG_CAPTURE_MASK (0x3 << GUC_LOG_CAPTURE_SHIFT)
#define GUC_LOG_BUF_ADDR_SHIFT 12
#define GUC_CTL_WA 1
+#define GUC_WA_POLLCS BIT(18)
+
#define GUC_CTL_FEATURE 2
-#define GUC_CTL_DISABLE_SCHEDULER (1 << 14)
#define GUC_CTL_ENABLE_SLPC BIT(2)
+#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
#define GUC_CTL_DEBUG 3
#define GUC_LOG_VERBOSITY_SHIFT 0
@@ -116,6 +122,8 @@
#define GUC_ADS_ADDR_SHIFT 1
#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
+#define GUC_CTL_DEVID 5
+
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
/* Generic GT SysInfo data types */
@@ -263,7 +271,10 @@ struct guc_mmio_reg {
u32 offset;
u32 value;
u32 flags;
-#define GUC_REGSET_MASKED (1 << 0)
+ u32 mask;
+#define GUC_REGSET_MASKED BIT(0)
+#define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
+#define GUC_REGSET_RESTORE_ONLY BIT(3)
} __packed;
/* GuC register sets */
@@ -280,6 +291,12 @@ struct guc_gt_system_info {
u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX];
} __packed;
+enum {
+ GUC_CAPTURE_LIST_INDEX_PF = 0,
+ GUC_CAPTURE_LIST_INDEX_VF = 1,
+ GUC_CAPTURE_LIST_INDEX_MAX = 2,
+};
+
/* GuC Additional Data Struct */
struct guc_ads {
struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
@@ -291,7 +308,11 @@ struct guc_ads {
u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
u32 private_data;
- u32 reserved[15];
+ u32 reserved2;
+ u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
+ u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
+ u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
+ u32 reserved[14];
} __packed;
/* Engine usage stats */
@@ -312,6 +333,7 @@ struct guc_engine_usage {
enum guc_log_buffer_type {
GUC_DEBUG_LOG_BUFFER,
GUC_CRASH_DUMP_LOG_BUFFER,
+ GUC_CAPTURE_LOG_BUFFER,
GUC_MAX_LOG_BUFFER
};
@@ -342,6 +364,7 @@ struct guc_log_buffer_state {
u32 write_ptr;
u32 size;
u32 sampled_write_ptr;
+ u32 wrap_offset;
union {
struct {
u32 flush_to_file:1;
@@ -382,7 +405,7 @@ struct guc_shared_ctx_data {
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
enum intel_guc_recv_message {
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
- INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3)
+ INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30),
};
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 7b0b43e87244..b53f61f3101f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -56,20 +56,6 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static void guc_log_enable_flush_events(struct intel_guc_log *log)
-{
- intel_guc_enable_msg(log_to_guc(log),
- INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
- INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
-}
-
-static void guc_log_disable_flush_events(struct intel_guc_log *log)
-{
- intel_guc_disable_msg(log_to_guc(log),
- INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
- INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
-}
-
/*
* Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned.
@@ -202,6 +188,8 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
return DEBUG_BUFFER_SIZE;
case GUC_CRASH_DUMP_LOG_BUFFER:
return CRASH_BUFFER_SIZE;
+ case GUC_CAPTURE_LOG_BUFFER:
+ return CAPTURE_BUFFER_SIZE;
default:
MISSING_CASE(type);
}
@@ -464,14 +452,19 @@ int intel_guc_log_create(struct intel_guc_log *log)
* +-------------------------------+ 32B
* | Debug state header |
* +-------------------------------+ 64B
+ * | Capture state header |
+ * +-------------------------------+ 96B
* | |
* +===============================+ PAGE_SIZE (4KB)
* | Crash Dump logs |
* +===============================+ + CRASH_SIZE
* | Debug logs |
* +===============================+ + DEBUG_SIZE
+ * | Capture logs |
+ * +===============================+ + CAPTURE_SIZE
*/
- guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE;
+ guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE +
+ CAPTURE_BUFFER_SIZE;
vma = intel_guc_allocate_vma(guc, guc_log_size);
if (IS_ERR(vma)) {
@@ -593,8 +586,6 @@ int intel_guc_log_relay_start(struct intel_guc_log *log)
if (log->relay.started)
return -EEXIST;
- guc_log_enable_flush_events(log);
-
/*
* When GuC is logging without us relaying to userspace, we're ignoring
* the flush notification. This means that we need to unconditionally
@@ -641,7 +632,6 @@ static void guc_log_relay_stop(struct intel_guc_log *log)
if (!log->relay.started)
return;
- guc_log_disable_flush_events(log);
intel_synchronize_irq(i915);
flush_work(&log->relay.flush_work);
@@ -662,7 +652,8 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
{
- queue_work(system_highpri_wq, &log->relay.flush_work);
+ if (log->relay.started)
+ queue_work(system_highpri_wq, &log->relay.flush_work);
}
static const char *
@@ -673,6 +664,8 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
return "DEBUG";
case GUC_CRASH_DUMP_LOG_BUFFER:
return "CRASH";
+ case GUC_CAPTURE_LOG_BUFFER:
+ return "CAPTURE";
default:
MISSING_CASE(type);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index fe6ab7550a14..d7e1b6471fed 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -18,12 +18,15 @@ struct intel_guc;
#if defined(CONFIG_DRM_I915_DEBUG_GUC)
#define CRASH_BUFFER_SIZE SZ_2M
#define DEBUG_BUFFER_SIZE SZ_16M
+#define CAPTURE_BUFFER_SIZE SZ_4M
#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
#define CRASH_BUFFER_SIZE SZ_1M
#define DEBUG_BUFFER_SIZE SZ_2M
+#define CAPTURE_BUFFER_SIZE SZ_1M
#else
#define CRASH_BUFFER_SIZE SZ_8K
#define DEBUG_BUFFER_SIZE SZ_64K
+#define CAPTURE_BUFFER_SIZE SZ_16K
#endif
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index 85846c5570c5..66027a42cda9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -22,10 +22,6 @@
#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT)
#define GS_UKERNEL_SHIFT 8
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
-#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
-#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT)
-#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT)
-#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
#define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT)
@@ -98,6 +94,9 @@
#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15)
#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
+#define GUC_SHIM_CONTROL2 _MMIO(0xc068)
+#define GUC_IS_PRIVILEGED (1<<29)
+
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 0cb51c5bb765..b3a429a92c0d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1115,6 +1115,19 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
if (new_start == lower_32_bits(*prev_start))
return;
+ /*
+ * When gt is unparked, we update the gt timestamp and start the ping
+ * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
+ * is unparked, all switched in contexts will have a start time that is
+ * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
+ *
+ * If neither gt_stamp nor new_start has rolled over, then the
+ * gt_stamp_hi does not need to be adjusted, however if one of them has
+ * rolled over, we need to adjust gt_stamp_hi accordingly.
+ *
+ * The below conditions address the cases of new_start rollover and
+ * gt_stamp_last rollover respectively.
+ */
if (new_start < gt_stamp_last &&
(new_start - gt_stamp_last) <= POLL_TIME_CLKS)
gt_stamp_hi++;
@@ -1126,17 +1139,45 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
}
-static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+/*
+ * GuC updates shared memory and KMD reads it. Since this is not synchronized,
+ * we run into a race where the value read is inconsistent. Sometimes the
+ * inconsistency is in reading the upper MSB bytes of the last_in value when
+ * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
+ * 24 bits are zero. Since these are non-zero values, it is non-trivial to
+ * determine validity of these values. Instead we read the values multiple times
+ * until they are consistent. In test runs, 3 attempts results in consistent
+ * values. The upper bound is set to 6 attempts and may need to be tuned as per
+ * any new occurences.
+ */
+static void __get_engine_usage_record(struct intel_engine_cs *engine,
+ u32 *last_in, u32 *id, u32 *total)
{
struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
+ int i = 0;
+
+ do {
+ *last_in = READ_ONCE(rec->last_switch_in_stamp);
+ *id = READ_ONCE(rec->current_context_index);
+ *total = READ_ONCE(rec->total_runtime);
+
+ if (READ_ONCE(rec->last_switch_in_stamp) == *last_in &&
+ READ_ONCE(rec->current_context_index) == *id &&
+ READ_ONCE(rec->total_runtime) == *total)
+ break;
+ } while (++i < 6);
+}
+
+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
struct intel_guc *guc = &engine->gt->uc.guc;
- u32 last_switch = rec->last_switch_in_stamp;
- u32 ctx_id = rec->current_context_index;
- u32 total = rec->total_runtime;
+ u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
+ __get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
+
stats->running = ctx_id != ~0U && last_switch;
if (stats->running)
__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
@@ -1151,23 +1192,51 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
}
}
-static void guc_update_pm_timestamp(struct intel_guc *guc,
- struct intel_engine_cs *engine,
- ktime_t *now)
+static u32 gpm_timestamp_shift(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+ u32 reg, shift;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
+
+ shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
+
+ return 3 - shift;
+}
+
+static u64 gpm_timestamp(struct intel_gt *gt)
{
- u32 gt_stamp_now, gt_stamp_hi;
+ u32 lo, hi, old_hi, loop = 0;
+
+ hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
+ do {
+ lo = intel_uncore_read(gt->uncore, MISC_STATUS0);
+ old_hi = hi;
+ hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
+ } while (old_hi != hi && loop++ < 2);
+
+ return ((u64)hi << 32) | lo;
+}
+
+static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 gt_stamp_lo, gt_stamp_hi;
+ u64 gpm_ts;
lockdep_assert_held(&guc->timestamp.lock);
gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
- gt_stamp_now = intel_uncore_read(engine->uncore,
- RING_TIMESTAMP(engine->mmio_base));
+ gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift;
+ gt_stamp_lo = lower_32_bits(gpm_ts);
*now = ktime_get();
- if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
+ if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
gt_stamp_hi++;
- guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
+ guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
}
/*
@@ -1210,8 +1279,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
+ /*
+ * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
+ * start_gt_clk' calculation below for active engines.
+ */
guc_update_engine_gt_clks(engine);
- guc_update_pm_timestamp(guc, engine, now);
+ guc_update_pm_timestamp(guc, now);
intel_gt_pm_put_async(gt);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
@@ -1243,8 +1316,8 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
for_each_engine(engine, gt, id) {
- guc_update_pm_timestamp(guc, engine, &unused);
guc_update_engine_gt_clks(engine);
engine->stats.guc.prev_total = 0;
}
@@ -1261,10 +1334,11 @@ static void __update_guc_busyness_stats(struct intel_guc *guc)
ktime_t unused;
spin_lock_irqsave(&guc->timestamp.lock, flags);
- for_each_engine(engine, gt, id) {
- guc_update_pm_timestamp(guc, engine, &unused);
+
+ guc_update_pm_timestamp(guc, &unused);
+ for_each_engine(engine, gt, id)
guc_update_engine_gt_clks(engine);
- }
+
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
@@ -1337,10 +1411,15 @@ void intel_guc_busyness_park(struct intel_gt *gt)
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
struct intel_guc *guc = &gt->uc.guc;
+ unsigned long flags;
+ ktime_t unused;
if (!guc_submission_initialized(guc))
return;
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
guc->timestamp.ping_delay);
}
@@ -1351,7 +1430,8 @@ submission_disabled(struct intel_guc *guc)
struct i915_sched_engine * const sched_engine = guc->sched_engine;
return unlikely(!sched_engine ||
- !__tasklet_is_enabled(&sched_engine->tasklet));
+ !__tasklet_is_enabled(&sched_engine->tasklet) ||
+ intel_gt_is_wedged(guc_to_gt(guc)));
}
static void disable_submission(struct intel_guc *guc)
@@ -1396,8 +1476,6 @@ static void guc_flush_destroyed_contexts(struct intel_guc *guc);
void intel_guc_submission_reset_prepare(struct intel_guc *guc)
{
- int i;
-
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
return;
@@ -1414,21 +1492,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
guc_flush_submissions(guc);
guc_flush_destroyed_contexts(guc);
-
- /*
- * Handle any outstanding G2Hs before reset. Call IRQ handler directly
- * each pass as interrupt have been disabled. We always scrub for
- * outstanding G2H as it is possible for outstanding_submission_g2h to
- * be incremented after the context state update.
- */
- for (i = 0; i < 4 && atomic_read(&guc->outstanding_submission_g2h); ++i) {
- intel_guc_to_host_event_handler(guc);
-#define wait_for_reset(guc, wait_var) \
- intel_guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20))
- do {
- wait_for_reset(guc, &guc->outstanding_submission_g2h);
- } while (!list_empty(&guc->ct.requests.incoming));
- }
+ flush_work(&guc->ct.requests.worker);
scrub_guc_desc_for_outstanding_g2h(guc);
}
@@ -1533,7 +1597,6 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
unsigned long flags;
u32 head;
int i, number_children = ce->parallel.number_children;
- bool skip = false;
struct intel_context *parent = ce;
GEM_BUG_ON(intel_context_is_child(ce));
@@ -1544,23 +1607,10 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
* GuC will implicitly mark the context as non-schedulable when it sends
* the reset notification. Make sure our state reflects this change. The
* context will be marked enabled on resubmission.
- *
- * XXX: If the context is reset as a result of the request cancellation
- * this G2H is received after the schedule disable complete G2H which is
- * wrong as this creates a race between the request cancellation code
- * re-submitting the context and this G2H handler. This is a bug in the
- * GuC but can be worked around in the meantime but converting this to a
- * NOP if a pending enable is in flight as this indicates that a request
- * cancellation has occurred.
*/
spin_lock_irqsave(&ce->guc_state.lock, flags);
- if (likely(!context_pending_enable(ce)))
- clr_context_enabled(ce);
- else
- skip = true;
+ clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- if (unlikely(skip))
- goto out_put;
/*
* For each context in the relationship find the hanging request
@@ -1592,7 +1642,6 @@ next_context:
}
__unwind_incomplete_requests(parent);
-out_put:
intel_context_put(parent);
}
@@ -1727,7 +1776,7 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
- test_bit(I915_WEDGED, &guc_to_gt(guc)->reset.flags))) {
+ intel_gt_is_wedged(guc_to_gt(guc)))) {
return;
}
@@ -1746,6 +1795,7 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
}
static void destroyed_worker_func(struct work_struct *w);
+static void reset_fail_worker_func(struct work_struct *w);
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
@@ -1776,6 +1826,8 @@ int intel_guc_submission_init(struct intel_guc *guc)
INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
INIT_WORK(&guc->submission_state.destroyed_worker,
destroyed_worker_func);
+ INIT_WORK(&guc->submission_state.reset_fail_worker,
+ reset_fail_worker_func);
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
@@ -1785,6 +1837,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
+ guc->timestamp.shift = gpm_timestamp_shift(gt);
return 0;
}
@@ -2531,12 +2584,6 @@ static void guc_context_cancel_request(struct intel_context *ce,
true);
}
- /*
- * XXX: Racey if context is reset, see comment in
- * __guc_reset_context().
- */
- flush_work(&ce_to_guc(ce)->ct.requests.worker);
-
guc_context_unblock(block_context);
intel_context_put(ce);
}
@@ -3250,8 +3297,6 @@ static void guc_parent_context_unpin(struct intel_context *ce)
GEM_BUG_ON(!intel_context_is_parent(ce));
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
- if (ce->parallel.last_rq)
- i915_request_put(ce->parallel.last_rq);
unpin_guc_id(guc, ce);
lrc_unpin(ce);
}
@@ -3973,14 +4018,14 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- /*
- * XXX: Racey if request cancellation has occurred, see comment in
- * __guc_reset_context().
- */
- if (likely(!intel_context_is_banned(ce) &&
- !context_blocked(ce))) {
+ if (likely(!intel_context_is_banned(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
+ } else {
+ drm_err(&guc_to_gt(guc)->i915->drm,
+ "Invalid GuC engine reset notificaion for 0x%04X on %s: banned = %d, blocked = %d",
+ ce->guc_id.id, ce->engine->name, intel_context_is_banned(ce),
+ context_blocked(ce));
}
}
@@ -4019,6 +4064,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
return 0;
}
+int intel_guc_error_capture_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len)
+{
+ int status;
+
+ if (unlikely(len != 1)) {
+ drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ status = msg[0];
+ drm_info(&guc_to_gt(guc)->i915->drm, "Got error capture: status = %d", status);
+
+ /* FIXME: Do something with the capture */
+
+ return 0;
+}
+
static struct intel_engine_cs *
guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
{
@@ -4031,6 +4094,26 @@ guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
return gt->engine_class[engine_class][instance];
}
+static void reset_fail_worker_func(struct work_struct *w)
+{
+ struct intel_guc *guc = container_of(w, struct intel_guc,
+ submission_state.reset_fail_worker);
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_engine_mask_t reset_fail_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ reset_fail_mask = guc->submission_state.reset_fail_mask;
+ guc->submission_state.reset_fail_mask = 0;
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ if (likely(reset_fail_mask))
+ intel_gt_handle_error(gt, reset_fail_mask,
+ I915_ERROR_CAPTURE,
+ "GuC failed to reset engine mask=0x%x\n",
+ reset_fail_mask);
+}
+
int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
@@ -4038,6 +4121,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
struct intel_gt *gt = guc_to_gt(guc);
u8 guc_class, instance;
u32 reason;
+ unsigned long flags;
if (unlikely(len != 3)) {
drm_err(&gt->i915->drm, "Invalid length %u", len);
@@ -4062,10 +4146,15 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
drm_err(&gt->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
guc_class, instance, engine->name, reason);
- intel_gt_handle_error(gt, engine->mask,
- I915_ERROR_CAPTURE,
- "GuC failed to reset %s (reason=0x%08x)\n",
- engine->name, reason);
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ guc->submission_state.reset_fail_mask |= engine->mask;
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ /*
+ * A GT reset flushes this worker queue (G2H handler) so we must use
+ * another worker to trigger a GT reset.
+ */
+ queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
return 0;
}
@@ -4434,27 +4523,31 @@ static inline bool skip_handshake(struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
}
+#define NON_SKIP_LEN 6
static u32 *
emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
u32 *cs)
{
struct intel_context *ce = rq->context;
+ __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
+ __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
GEM_BUG_ON(!intel_context_is_parent(ce));
if (unlikely(skip_handshake(rq))) {
/*
* NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
- * the -6 comes from the length of the emits below.
+ * the NON_SKIP_LEN comes from the length of the emits below.
*/
memset(cs, 0, sizeof(u32) *
- (ce->engine->emit_fini_breadcrumb_dw - 6));
- cs += ce->engine->emit_fini_breadcrumb_dw - 6;
+ (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
+ cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
} else {
cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
}
/* Emit fini breadcrumb */
+ before_fini_breadcrumb_user_interrupt_cs = cs;
cs = gen8_emit_ggtt_write(cs,
rq->fence.seqno,
i915_request_active_timeline(rq)->hwsp_offset,
@@ -4464,6 +4557,12 @@ emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
+ /* Ensure our math for skip + emit is correct */
+ GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
+ cs);
+ GEM_BUG_ON(start_fini_breadcrumb_cs +
+ ce->engine->emit_fini_breadcrumb_dw != cs);
+
rq->tail = intel_ring_offset(rq, cs);
return cs;
@@ -4506,22 +4605,25 @@ emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
u32 *cs)
{
struct intel_context *ce = rq->context;
+ __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
+ __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
GEM_BUG_ON(!intel_context_is_child(ce));
if (unlikely(skip_handshake(rq))) {
/*
* NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
- * the -6 comes from the length of the emits below.
+ * the NON_SKIP_LEN comes from the length of the emits below.
*/
memset(cs, 0, sizeof(u32) *
- (ce->engine->emit_fini_breadcrumb_dw - 6));
- cs += ce->engine->emit_fini_breadcrumb_dw - 6;
+ (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
+ cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
} else {
cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
}
/* Emit fini breadcrumb */
+ before_fini_breadcrumb_user_interrupt_cs = cs;
cs = gen8_emit_ggtt_write(cs,
rq->fence.seqno,
i915_request_active_timeline(rq)->hwsp_offset,
@@ -4531,11 +4633,19 @@ emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
+ /* Ensure our math for skip + emit is correct */
+ GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
+ cs);
+ GEM_BUG_ON(start_fini_breadcrumb_cs +
+ ce->engine->emit_fini_breadcrumb_dw != cs);
+
rq->tail = intel_ring_offset(rq, cs);
return cs;
}
+#undef NON_SKIP_LEN
+
static struct intel_context *
guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
unsigned long flags)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index d10b227ac4aa..556829de9c17 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -124,6 +124,7 @@ int intel_huc_auth(struct intel_huc *huc)
}
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
+ drm_info(&gt->i915->drm, "HuC authenticated\n");
return 0;
fail:
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 09ed29df67bc..da199aa6989f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -432,6 +432,15 @@ static int __uc_check_hw(struct intel_uc *uc)
return 0;
}
+static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ drm_info(&i915->drm, "%s firmware %s version %u.%u\n",
+ intel_uc_fw_type_repr(fw->type), fw->path,
+ fw->major_ver_found, fw->minor_ver_found);
+}
+
static int __uc_init_hw(struct intel_uc *uc)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
@@ -442,6 +451,11 @@ static int __uc_init_hw(struct intel_uc *uc)
GEM_BUG_ON(!intel_uc_supports_guc(uc));
GEM_BUG_ON(!intel_uc_wants_guc(uc));
+ print_fw_ver(uc, &guc->fw);
+
+ if (intel_uc_uses_huc(uc))
+ print_fw_ver(uc, &huc->fw);
+
if (!intel_uc_fw_is_loadable(&guc->fw)) {
ret = __uc_check_hw(uc) ||
intel_uc_fw_is_overridden(&guc->fw) ||
@@ -507,24 +521,11 @@ static int __uc_init_hw(struct intel_uc *uc)
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
- drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
- intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
- guc->fw.major_ver_found, guc->fw.minor_ver_found,
- "submission",
+ drm_info(&i915->drm, "GuC submission %s\n",
enableddisabled(intel_uc_uses_guc_submission(uc)));
-
- drm_info(&i915->drm, "GuC SLPC: %s\n",
+ drm_info(&i915->drm, "GuC SLPC %s\n",
enableddisabled(intel_uc_uses_guc_slpc(uc)));
- if (intel_uc_uses_huc(uc)) {
- drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
- intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
- huc->fw.path,
- huc->fw.major_ver_found, huc->fw.minor_ver_found,
- "authenticated",
- yesno(intel_huc_is_authenticated(huc)));
- }
-
return 0;
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 549d5919dc70..c88113044494 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -52,21 +52,21 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* firmware as TGL.
*/
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0)) \
- fw_def(DG1, 0, guc_def(dg1, 62, 0, 0)) \
- fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0)) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0)) \
- fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0)) \
- fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0)) \
- fw_def(ICELAKE, 0, guc_def(icl, 62, 0, 0)) \
- fw_def(COMETLAKE, 5, guc_def(cml, 62, 0, 0)) \
- fw_def(COMETLAKE, 0, guc_def(kbl, 62, 0, 0)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 62, 0, 0)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 62, 0, 0)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 62, 0, 0)) \
- fw_def(BROXTON, 0, guc_def(bxt, 62, 0, 0)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 62, 0, 0))
+ fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3)) \
+ fw_def(DG1, 0, guc_def(dg1, 69, 0, 3)) \
+ fw_def(ROCKETLAKE, 0, guc_def(tgl, 69, 0, 3)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 69, 0, 3)) \
+ fw_def(JASPERLAKE, 0, guc_def(ehl, 69, 0, 3)) \
+ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 69, 0, 3)) \
+ fw_def(ICELAKE, 0, guc_def(icl, 69, 0, 3)) \
+ fw_def(COMETLAKE, 5, guc_def(cml, 69, 0, 3)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 69, 0, 3)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 69, 0, 3)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 69, 0, 3)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 69, 0, 3)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 69, 0, 3)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 69, 0, 3))
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
@@ -451,20 +451,19 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
- struct i915_vma *dummy = &uc_fw->dummy;
+ struct i915_vma_resource *dummy = &uc_fw->dummy;
u32 pte_flags = 0;
- dummy->node.start = uc_fw_ggtt_offset(uc_fw);
- dummy->node.size = obj->base.size;
- dummy->pages = obj->mm.pages;
- dummy->vm = &ggtt->vm;
+ dummy->start = uc_fw_ggtt_offset(uc_fw);
+ dummy->node_size = obj->base.size;
+ dummy->bi.pages = obj->mm.pages;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(dummy->node.size > ggtt->uc_fw.size);
+ GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
/* uc_fw->obj cache domains were not controlled across suspend */
if (i915_gem_object_has_struct_page(obj))
- drm_clflush_sg(dummy->pages);
+ drm_clflush_sg(dummy->bi.pages);
if (i915_gem_object_is_lmem(obj))
pte_flags |= PTE_LM;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index d9d1dc0b4cbb..3229018877d3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -85,7 +85,7 @@ struct intel_uc_fw {
* threaded as it done during driver load (inherently single threaded)
* or during a GT reset (mutex guarantees single threaded).
*/
- struct i915_vma dummy;
+ struct i915_vma_resource dummy;
struct i915_vma *rsa_data;
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index d3327b802b76..a115894d5896 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -157,7 +157,7 @@ static int intel_guc_steal_guc_ids(void *arg)
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine = intel_selftest_find_any_engine(gt);
sv = guc->submission_state.num_guc_ids;
- guc->submission_state.num_guc_ids = 4096;
+ guc->submission_state.num_guc_ids = 512;
/* Create spinner to block requests in below loop */
ce[context_index] = intel_context_create(engine);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 6b3dedd321bb..557f3314291a 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -64,7 +64,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
mutex_lock(&gt->ggtt->vm.mutex);
mmio_hw_access_pre(gt);
- ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
+ ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 12b7c36d8e8f..c95c25d2addb 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -90,7 +90,7 @@ static int vgpu_gem_get_pages(
kfree(st);
return ret;
}
- gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+ gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm +
(fb_info->start >> PAGE_SHIFT);
for_each_sg(st->sgl, sg, page_num, i) {
dma_addr_t dma_addr =
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
deleted file mode 100644
index 6e2ad68f8f3f..000000000000
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ /dev/null
@@ -1,466 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#include <linux/kmemleak.h>
-#include <linux/sizes.h>
-
-#include "i915_buddy.h"
-
-#include "i915_gem.h"
-#include "i915_utils.h"
-
-static struct kmem_cache *slab_blocks;
-
-static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_mm *mm,
- struct i915_buddy_block *parent,
- unsigned int order,
- u64 offset)
-{
- struct i915_buddy_block *block;
-
- GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER);
-
- block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
- if (!block)
- return NULL;
-
- block->header = offset;
- block->header |= order;
- block->parent = parent;
-
- GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED);
- return block;
-}
-
-static void i915_block_free(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- kmem_cache_free(slab_blocks, block);
-}
-
-static void mark_allocated(struct i915_buddy_block *block)
-{
- block->header &= ~I915_BUDDY_HEADER_STATE;
- block->header |= I915_BUDDY_ALLOCATED;
-
- list_del(&block->link);
-}
-
-static void mark_free(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- block->header &= ~I915_BUDDY_HEADER_STATE;
- block->header |= I915_BUDDY_FREE;
-
- list_add(&block->link,
- &mm->free_list[i915_buddy_block_order(block)]);
-}
-
-static void mark_split(struct i915_buddy_block *block)
-{
- block->header &= ~I915_BUDDY_HEADER_STATE;
- block->header |= I915_BUDDY_SPLIT;
-
- list_del(&block->link);
-}
-
-int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
-{
- unsigned int i;
- u64 offset;
-
- if (size < chunk_size)
- return -EINVAL;
-
- if (chunk_size < PAGE_SIZE)
- return -EINVAL;
-
- if (!is_power_of_2(chunk_size))
- return -EINVAL;
-
- size = round_down(size, chunk_size);
-
- mm->size = size;
- mm->avail = size;
- mm->chunk_size = chunk_size;
- mm->max_order = ilog2(size) - ilog2(chunk_size);
-
- GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
-
- mm->free_list = kmalloc_array(mm->max_order + 1,
- sizeof(struct list_head),
- GFP_KERNEL);
- if (!mm->free_list)
- return -ENOMEM;
-
- for (i = 0; i <= mm->max_order; ++i)
- INIT_LIST_HEAD(&mm->free_list[i]);
-
- mm->n_roots = hweight64(size);
-
- mm->roots = kmalloc_array(mm->n_roots,
- sizeof(struct i915_buddy_block *),
- GFP_KERNEL);
- if (!mm->roots)
- goto out_free_list;
-
- offset = 0;
- i = 0;
-
- /*
- * Split into power-of-two blocks, in case we are given a size that is
- * not itself a power-of-two.
- */
- do {
- struct i915_buddy_block *root;
- unsigned int order;
- u64 root_size;
-
- root_size = rounddown_pow_of_two(size);
- order = ilog2(root_size) - ilog2(chunk_size);
-
- root = i915_block_alloc(mm, NULL, order, offset);
- if (!root)
- goto out_free_roots;
-
- mark_free(mm, root);
-
- GEM_BUG_ON(i > mm->max_order);
- GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
-
- mm->roots[i] = root;
-
- offset += root_size;
- size -= root_size;
- i++;
- } while (size);
-
- return 0;
-
-out_free_roots:
- while (i--)
- i915_block_free(mm, mm->roots[i]);
- kfree(mm->roots);
-out_free_list:
- kfree(mm->free_list);
- return -ENOMEM;
-}
-
-void i915_buddy_fini(struct i915_buddy_mm *mm)
-{
- int i;
-
- for (i = 0; i < mm->n_roots; ++i) {
- GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
- i915_block_free(mm, mm->roots[i]);
- }
-
- GEM_WARN_ON(mm->avail != mm->size);
-
- kfree(mm->roots);
- kfree(mm->free_list);
-}
-
-static int split_block(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- unsigned int block_order = i915_buddy_block_order(block) - 1;
- u64 offset = i915_buddy_block_offset(block);
-
- GEM_BUG_ON(!i915_buddy_block_is_free(block));
- GEM_BUG_ON(!i915_buddy_block_order(block));
-
- block->left = i915_block_alloc(mm, block, block_order, offset);
- if (!block->left)
- return -ENOMEM;
-
- block->right = i915_block_alloc(mm, block, block_order,
- offset + (mm->chunk_size << block_order));
- if (!block->right) {
- i915_block_free(mm, block->left);
- return -ENOMEM;
- }
-
- mark_free(mm, block->left);
- mark_free(mm, block->right);
-
- mark_split(block);
-
- return 0;
-}
-
-static struct i915_buddy_block *
-get_buddy(struct i915_buddy_block *block)
-{
- struct i915_buddy_block *parent;
-
- parent = block->parent;
- if (!parent)
- return NULL;
-
- if (parent->left == block)
- return parent->right;
-
- return parent->left;
-}
-
-static void __i915_buddy_free(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- struct i915_buddy_block *parent;
-
- while ((parent = block->parent)) {
- struct i915_buddy_block *buddy;
-
- buddy = get_buddy(block);
-
- if (!i915_buddy_block_is_free(buddy))
- break;
-
- list_del(&buddy->link);
-
- i915_block_free(mm, block);
- i915_block_free(mm, buddy);
-
- block = parent;
- }
-
- mark_free(mm, block);
-}
-
-void i915_buddy_free(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
- mm->avail += i915_buddy_block_size(mm, block);
- __i915_buddy_free(mm, block);
-}
-
-void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
-{
- struct i915_buddy_block *block, *on;
-
- list_for_each_entry_safe(block, on, objects, link) {
- i915_buddy_free(mm, block);
- cond_resched();
- }
- INIT_LIST_HEAD(objects);
-}
-
-/*
- * Allocate power-of-two block. The order value here translates to:
- *
- * 0 = 2^0 * mm->chunk_size
- * 1 = 2^1 * mm->chunk_size
- * 2 = 2^2 * mm->chunk_size
- * ...
- */
-struct i915_buddy_block *
-i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
-{
- struct i915_buddy_block *block = NULL;
- unsigned int i;
- int err;
-
- for (i = order; i <= mm->max_order; ++i) {
- block = list_first_entry_or_null(&mm->free_list[i],
- struct i915_buddy_block,
- link);
- if (block)
- break;
- }
-
- if (!block)
- return ERR_PTR(-ENOSPC);
-
- GEM_BUG_ON(!i915_buddy_block_is_free(block));
-
- while (i != order) {
- err = split_block(mm, block);
- if (unlikely(err))
- goto out_free;
-
- /* Go low */
- block = block->left;
- i--;
- }
-
- mark_allocated(block);
- mm->avail -= i915_buddy_block_size(mm, block);
- kmemleak_update_trace(block);
- return block;
-
-out_free:
- if (i != order)
- __i915_buddy_free(mm, block);
- return ERR_PTR(err);
-}
-
-static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
-{
- return s1 <= e2 && e1 >= s2;
-}
-
-static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
-{
- return s1 <= s2 && e1 >= e2;
-}
-
-/*
- * Allocate range. Note that it's safe to chain together multiple alloc_ranges
- * with the same blocks list.
- *
- * Intended for pre-allocating portions of the address space, for example to
- * reserve a block for the initial framebuffer or similar, hence the expectation
- * here is that i915_buddy_alloc() is still the main vehicle for
- * allocations, so if that's not the case then the drm_mm range allocator is
- * probably a much better fit, and so you should probably go use that instead.
- */
-int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
- struct list_head *blocks,
- u64 start, u64 size)
-{
- struct i915_buddy_block *block;
- struct i915_buddy_block *buddy;
- LIST_HEAD(allocated);
- LIST_HEAD(dfs);
- u64 end;
- int err;
- int i;
-
- if (size < mm->chunk_size)
- return -EINVAL;
-
- if (!IS_ALIGNED(size | start, mm->chunk_size))
- return -EINVAL;
-
- if (range_overflows(start, size, mm->size))
- return -EINVAL;
-
- for (i = 0; i < mm->n_roots; ++i)
- list_add_tail(&mm->roots[i]->tmp_link, &dfs);
-
- end = start + size - 1;
-
- do {
- u64 block_start;
- u64 block_end;
-
- block = list_first_entry_or_null(&dfs,
- struct i915_buddy_block,
- tmp_link);
- if (!block)
- break;
-
- list_del(&block->tmp_link);
-
- block_start = i915_buddy_block_offset(block);
- block_end = block_start + i915_buddy_block_size(mm, block) - 1;
-
- if (!overlaps(start, end, block_start, block_end))
- continue;
-
- if (i915_buddy_block_is_allocated(block)) {
- err = -ENOSPC;
- goto err_free;
- }
-
- if (contains(start, end, block_start, block_end)) {
- if (!i915_buddy_block_is_free(block)) {
- err = -ENOSPC;
- goto err_free;
- }
-
- mark_allocated(block);
- mm->avail -= i915_buddy_block_size(mm, block);
- list_add_tail(&block->link, &allocated);
- continue;
- }
-
- if (!i915_buddy_block_is_split(block)) {
- err = split_block(mm, block);
- if (unlikely(err))
- goto err_undo;
- }
-
- list_add(&block->right->tmp_link, &dfs);
- list_add(&block->left->tmp_link, &dfs);
- } while (1);
-
- list_splice_tail(&allocated, blocks);
- return 0;
-
-err_undo:
- /*
- * We really don't want to leave around a bunch of split blocks, since
- * bigger is better, so make sure we merge everything back before we
- * free the allocated blocks.
- */
- buddy = get_buddy(block);
- if (buddy &&
- (i915_buddy_block_is_free(block) &&
- i915_buddy_block_is_free(buddy)))
- __i915_buddy_free(mm, block);
-
-err_free:
- i915_buddy_free_list(mm, &allocated);
- return err;
-}
-
-void i915_buddy_block_print(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block,
- struct drm_printer *p)
-{
- u64 start = i915_buddy_block_offset(block);
- u64 size = i915_buddy_block_size(mm, block);
-
- drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
-}
-
-void i915_buddy_print(struct i915_buddy_mm *mm, struct drm_printer *p)
-{
- int order;
-
- drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
- mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
-
- for (order = mm->max_order; order >= 0; order--) {
- struct i915_buddy_block *block;
- u64 count = 0, free;
-
- list_for_each_entry(block, &mm->free_list[order], link) {
- GEM_BUG_ON(!i915_buddy_block_is_free(block));
- count++;
- }
-
- drm_printf(p, "order-%d ", order);
-
- free = count * (mm->chunk_size << order);
- if (free < SZ_1M)
- drm_printf(p, "free: %lluKiB", free >> 10);
- else
- drm_printf(p, "free: %lluMiB", free >> 20);
-
- drm_printf(p, ", pages: %llu\n", count);
- }
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/i915_buddy.c"
-#endif
-
-void i915_buddy_module_exit(void)
-{
- kmem_cache_destroy(slab_blocks);
-}
-
-int __init i915_buddy_module_init(void)
-{
- slab_blocks = KMEM_CACHE(i915_buddy_block, 0);
- if (!slab_blocks)
- return -ENOMEM;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
deleted file mode 100644
index 7077742112ac..000000000000
--- a/drivers/gpu/drm/i915/i915_buddy.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#ifndef __I915_BUDDY_H__
-#define __I915_BUDDY_H__
-
-#include <linux/bitops.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <drm/drm_print.h>
-
-struct i915_buddy_block {
-#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
-#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
-#define I915_BUDDY_ALLOCATED (1 << 10)
-#define I915_BUDDY_FREE (2 << 10)
-#define I915_BUDDY_SPLIT (3 << 10)
-/* Free to be used, if needed in the future */
-#define I915_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
-#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
- u64 header;
-
- struct i915_buddy_block *left;
- struct i915_buddy_block *right;
- struct i915_buddy_block *parent;
-
- void *private; /* owned by creator */
-
- /*
- * While the block is allocated by the user through i915_buddy_alloc*,
- * the user has ownership of the link, for example to maintain within
- * a list, if so desired. As soon as the block is freed with
- * i915_buddy_free* ownership is given back to the mm.
- */
- struct list_head link;
- struct list_head tmp_link;
-};
-
-/* Order-zero must be at least PAGE_SIZE */
-#define I915_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
-
-/*
- * Binary Buddy System.
- *
- * Locking should be handled by the user, a simple mutex around
- * i915_buddy_alloc* and i915_buddy_free* should suffice.
- */
-struct i915_buddy_mm {
- /* Maintain a free list for each order. */
- struct list_head *free_list;
-
- /*
- * Maintain explicit binary tree(s) to track the allocation of the
- * address space. This gives us a simple way of finding a buddy block
- * and performing the potentially recursive merge step when freeing a
- * block. Nodes are either allocated or free, in which case they will
- * also exist on the respective free list.
- */
- struct i915_buddy_block **roots;
-
- /*
- * Anything from here is public, and remains static for the lifetime of
- * the mm. Everything above is considered do-not-touch.
- */
- unsigned int n_roots;
- unsigned int max_order;
-
- /* Must be at least PAGE_SIZE */
- u64 chunk_size;
- u64 size;
- u64 avail;
-};
-
-static inline u64
-i915_buddy_block_offset(struct i915_buddy_block *block)
-{
- return block->header & I915_BUDDY_HEADER_OFFSET;
-}
-
-static inline unsigned int
-i915_buddy_block_order(struct i915_buddy_block *block)
-{
- return block->header & I915_BUDDY_HEADER_ORDER;
-}
-
-static inline unsigned int
-i915_buddy_block_state(struct i915_buddy_block *block)
-{
- return block->header & I915_BUDDY_HEADER_STATE;
-}
-
-static inline bool
-i915_buddy_block_is_allocated(struct i915_buddy_block *block)
-{
- return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
-}
-
-static inline bool
-i915_buddy_block_is_free(struct i915_buddy_block *block)
-{
- return i915_buddy_block_state(block) == I915_BUDDY_FREE;
-}
-
-static inline bool
-i915_buddy_block_is_split(struct i915_buddy_block *block)
-{
- return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
-}
-
-static inline u64
-i915_buddy_block_size(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- return mm->chunk_size << i915_buddy_block_order(block);
-}
-
-int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);
-
-void i915_buddy_fini(struct i915_buddy_mm *mm);
-
-struct i915_buddy_block *
-i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
-
-int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
- struct list_head *blocks,
- u64 start, u64 size);
-
-void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
-
-void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
-
-void i915_buddy_print(struct i915_buddy_mm *mm, struct drm_printer *p);
-void i915_buddy_block_print(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block,
- struct drm_printer *p);
-
-void i915_buddy_module_exit(void);
-int i915_buddy_module_init(void);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 54c66d4b6887..946bbe57bfe5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -183,7 +183,8 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
stringify_vma_type(vma),
vma->node.start, vma->node.size,
- stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
+ stringify_page_sizes(vma->resource->page_sizes_gtt,
+ NULL, 0));
if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
switch (vma->ggtt_view.type) {
case I915_GGTT_VIEW_NORMAL:
@@ -403,9 +404,9 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
- swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
+ swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
- swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
+ swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_y));
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index a892a1b546a8..1c67ff735f18 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -577,6 +577,10 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
i915_perf_init(dev_priv);
+ ret = intel_gt_assign_ggtt(to_gt(dev_priv));
+ if (ret)
+ goto err_perf;
+
ret = i915_ggtt_probe_hw(dev_priv);
if (ret)
goto err_perf;
@@ -593,8 +597,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
- intel_gt_init_hw_early(to_gt(dev_priv), &dev_priv->ggtt);
-
ret = intel_gt_probe_lmem(to_gt(dev_priv));
if (ret)
goto err_mem_regions;
@@ -1152,7 +1154,7 @@ static int i915_drm_suspend(struct drm_device *dev)
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(dev_priv);
- i915_ggtt_suspend(&dev_priv->ggtt);
+ i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
i915_save_display(dev_priv);
@@ -1276,7 +1278,7 @@ static int i915_drm_resume(struct drm_device *dev)
if (ret)
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
- i915_ggtt_resume(&dev_priv->ggtt);
+ i915_ggtt_resume(to_gt(dev_priv)->ggtt);
/* Must be called after GGTT is resumed. */
intel_dpt_resume(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ffde71b6b3f1..f600d1cb01b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -641,8 +641,6 @@ struct drm_i915_private {
struct drm_atomic_state *modeset_restore_state;
struct drm_modeset_acquire_ctx reset_ctx;
- struct i915_ggtt ggtt; /* VM representing the global address space */
-
struct i915_gem_mm mm;
/* Kernel Modesetting */
@@ -1089,6 +1087,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
+#define IS_DG2_G12(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12)
#define IS_ADLS_RPLS(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S)
#define IS_ADLP_N(dev_priv) \
@@ -1205,16 +1205,17 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
/*
- * DG2 hardware steppings are a bit unusual. The hardware design was forked
- * to create two variants (G10 and G11) which have distinct workaround sets.
- * The G11 fork of the DG2 design resets the GT stepping back to "A0" for its
- * first iteration, even though it's more similar to a G10 B0 stepping in terms
- * of functionality and workarounds. However the display stepping does not
- * reset in the same manner --- a specific stepping like "B0" has a consistent
- * meaning regardless of whether it belongs to a G10 or G11 DG2.
+ * DG2 hardware steppings are a bit unusual. The hardware design was forked to
+ * create three variants (G10, G11, and G12) which each have distinct
+ * workaround sets. The G11 and G12 forks of the DG2 design reset the GT
+ * stepping back to "A0" for their first iterations, even though they're more
+ * similar to a G10 B0 stepping and G10 C0 stepping respectively in terms of
+ * functionality and workarounds. However the display stepping does not reset
+ * in the same manner --- a specific stepping like "B0" has a consistent
+ * meaning regardless of whether it belongs to a G10, G11, or G12 DG2.
*
* TLDR: All GT workarounds and stepping-specific logic must be applied in
- * relation to a specific subplatform (G10 or G11), whereas display workarounds
+ * relation to a specific subplatform (G10/G11/G12), whereas display workarounds
* and stepping-specific logic will be applied with a general DG2-wide stepping
* number.
*/
@@ -1384,6 +1385,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define INTEL_DISPLAY_ENABLED(dev_priv) \
(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
+#define HAS_GUC_DEPRIVILEGE(dev_priv) \
+ (INTEL_INFO(dev_priv)->has_guc_deprivilege)
+
static inline bool run_as_guest(void)
{
return !hypervisor_is_type(X86_HYPER_NATIVE);
@@ -1480,6 +1484,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
+#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
@@ -1498,7 +1503,7 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ return to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
i915_gem_object_is_tiled(obj);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index db897fb11f9b..2e10187cd0a0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -92,7 +92,8 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct drm_i915_gem_get_aperture *args = data;
struct i915_vma *vma;
u64 pinned;
@@ -122,6 +123,8 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
+ assert_object_held(obj);
+
if (list_empty(&obj->vma.list))
return 0;
@@ -159,10 +162,16 @@ try_again:
spin_unlock(&obj->vma.lock);
if (vma) {
+ bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
ret = -EBUSY;
- if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
- !i915_vma_is_active(vma)) {
- if (flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK) {
+ if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
+ assert_object_held(vma->obj);
+ ret = i915_vma_unbind_async(vma, vm_trylock);
+ }
+
+ if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+ !i915_vma_is_active(vma))) {
+ if (vm_trylock) {
if (mutex_trylock(&vma->vm->mutex)) {
ret = __i915_vma_unbind(vma);
mutex_unlock(&vma->vm->mutex);
@@ -293,7 +302,7 @@ static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
bool write)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma;
struct i915_gem_ww_ctx ww;
int ret;
@@ -354,7 +363,7 @@ static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
struct i915_vma *vma)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
i915_gem_object_unpin_pages(obj);
if (drm_mm_node_allocated(node)) {
@@ -370,7 +379,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
void __user *user_data;
@@ -526,7 +535,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
struct drm_mm_node node;
@@ -827,7 +836,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
*/
list_for_each_entry_safe(obj, on,
- &i915->ggtt.userfault_list, userfault_link)
+ &to_gt(i915)->ggtt->userfault_list, userfault_link)
__i915_gem_object_release_mmap_gtt(obj);
/*
@@ -835,8 +844,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
* in use by hardware (i.e. they are pinned), we should not be powering
* down! All other fences will be reacquired by the user upon waking.
*/
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
+ struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
/*
* Ideally we want to assert that the fence register is not
@@ -877,7 +886,7 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma;
int ret;
@@ -1127,7 +1136,7 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
- i915_ggtt_resume(&dev_priv->ggtt);
+ i915_ggtt_resume(to_gt(dev_priv)->ggtt);
intel_init_clock_gating(dev_priv);
}
@@ -1150,7 +1159,7 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
- intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
+ intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
i915_gem_suspend_late(dev_priv);
intel_gt_driver_remove(to_gt(dev_priv));
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 24eee0c2055f..f025ee4fa526 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -38,6 +38,11 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1;
} igt_evict_ctl;)
+static bool dying_vma(struct i915_vma *vma)
+{
+ return !kref_read(&vma->obj->base.refcount);
+}
+
static int ggtt_flush(struct intel_gt *gt)
{
/*
@@ -50,8 +55,37 @@ static int ggtt_flush(struct intel_gt *gt)
return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}
+static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
+{
+ /*
+ * We add the extra refcount so the object doesn't drop to zero until
+ * after ungrab_vma(), this way trylock is always paired with unlock.
+ */
+ if (i915_gem_object_get_rcu(vma->obj)) {
+ if (!i915_gem_object_trylock(vma->obj, ww)) {
+ i915_gem_object_put(vma->obj);
+ return false;
+ }
+ } else {
+ /* Dead objects don't need pins */
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ }
+
+ return true;
+}
+
+static void ungrab_vma(struct i915_vma *vma)
+{
+ if (dying_vma(vma))
+ return;
+
+ i915_gem_object_unlock(vma->obj);
+ i915_gem_object_put(vma->obj);
+}
+
static bool
mark_free(struct drm_mm_scan *scan,
+ struct i915_gem_ww_ctx *ww,
struct i915_vma *vma,
unsigned int flags,
struct list_head *unwind)
@@ -59,6 +93,9 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
+ if (!grab_vma(vma, ww))
+ return false;
+
list_add(&vma->evict_link, unwind);
return drm_mm_scan_add_block(scan, &vma->node);
}
@@ -77,6 +114,7 @@ static bool defer_evict(struct i915_vma *vma)
/**
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @vm: address space to evict from
+ * @ww: An optional struct i915_gem_ww_ctx.
* @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
* @color: color for the desired space
@@ -99,6 +137,7 @@ static bool defer_evict(struct i915_vma *vma)
*/
int
i915_gem_evict_something(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
u64 min_size, u64 alignment,
unsigned long color,
u64 start, u64 end,
@@ -171,7 +210,7 @@ search_again:
continue;
}
- if (mark_free(&scan, vma, flags, &eviction_list))
+ if (mark_free(&scan, ww, vma, flags, &eviction_list))
goto found;
}
@@ -179,6 +218,7 @@ search_again:
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
ret = drm_mm_scan_remove_block(&scan, &vma->node);
BUG_ON(ret);
+ ungrab_vma(vma);
}
/*
@@ -223,10 +263,12 @@ found:
* of any of our objects, thus corrupting the list).
*/
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
- if (drm_mm_scan_remove_block(&scan, &vma->node))
+ if (drm_mm_scan_remove_block(&scan, &vma->node)) {
__i915_vma_pin(vma);
- else
+ } else {
list_del(&vma->evict_link);
+ ungrab_vma(vma);
+ }
}
/* Unbinding will emit any required flushes */
@@ -235,16 +277,20 @@ found:
__i915_vma_unpin(vma);
if (ret == 0)
ret = __i915_vma_unbind(vma);
+ ungrab_vma(vma);
}
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node);
/* If we find any non-objects (!vma), we cannot evict them */
- if (vma->node.color != I915_COLOR_UNEVICTABLE)
+ if (vma->node.color != I915_COLOR_UNEVICTABLE &&
+ grab_vma(vma, ww)) {
ret = __i915_vma_unbind(vma);
- else
- ret = -ENOSPC; /* XXX search failed, try again? */
+ ungrab_vma(vma);
+ } else {
+ ret = -ENOSPC;
+ }
}
return ret;
@@ -253,6 +299,7 @@ found:
/**
* i915_gem_evict_for_node - Evict vmas to make room for binding a new one
* @vm: address space to evict from
+ * @ww: An optional struct i915_gem_ww_ctx.
* @target: range (and color) to evict for
* @flags: additional flags to control the eviction algorithm
*
@@ -262,6 +309,7 @@ found:
* memory in e.g. the shrinker.
*/
int i915_gem_evict_for_node(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *target,
unsigned int flags)
{
@@ -334,6 +382,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
+ if (!grab_vma(vma, ww)) {
+ ret = -ENOSPC;
+ break;
+ }
+
/*
* Never show fear in the face of dragons!
*
@@ -351,6 +404,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
__i915_vma_unpin(vma);
if (ret == 0)
ret = __i915_vma_unbind(vma);
+
+ ungrab_vma(vma);
}
return ret;
@@ -359,6 +414,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
/**
* i915_gem_evict_vm - Evict all idle vmas from a vm
* @vm: Address space to cleanse
+ * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
+ * will be able to evict vma's locked by the ww as well.
*
* This function evicts all vmas from a vm.
*
@@ -368,7 +425,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
-int i915_gem_evict_vm(struct i915_address_space *vm)
+int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
{
int ret = 0;
@@ -389,24 +446,52 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
do {
struct i915_vma *vma, *vn;
LIST_HEAD(eviction_list);
+ LIST_HEAD(locked_eviction_list);
list_for_each_entry(vma, &vm->bound_list, vm_link) {
if (i915_vma_is_pinned(vma))
continue;
+ /*
+ * If we already own the lock, trylock fails. In case
+ * the resv is shared among multiple objects, we still
+ * need the object ref.
+ */
+ if (dying_vma(vma) ||
+ (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
+ __i915_vma_pin(vma);
+ list_add(&vma->evict_link, &locked_eviction_list);
+ continue;
+ }
+
+ if (!i915_gem_object_trylock(vma->obj, ww))
+ continue;
+
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
}
- if (list_empty(&eviction_list))
+ if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
break;
ret = 0;
+ /* Unbind locked objects first, before unlocking the eviction_list */
+ list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
+ __i915_vma_unpin(vma);
+
+ if (ret == 0)
+ ret = __i915_vma_unbind(vma);
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
+
list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
ret = __i915_vma_unbind(vma);
if (ret != -EINTR) /* "Get me out of here!" */
ret = 0;
+
+ i915_gem_object_unlock(vma->obj);
}
} while (ret == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h
index d4478b6ad11b..e593c530f9bd 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.h
+++ b/drivers/gpu/drm/i915/i915_gem_evict.h
@@ -10,15 +10,19 @@
struct drm_mm_node;
struct i915_address_space;
+struct i915_gem_ww_ctx;
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
u64 min_size, u64 alignment,
unsigned long color,
u64 start, u64 end,
unsigned flags);
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node,
unsigned int flags);
-int i915_gem_evict_vm(struct i915_address_space *vm);
+int i915_gem_evict_vm(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww);
#endif /* __I915_GEM_EVICT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8a7f0d92b56f..329ff75b80b9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
/* XXX This does not prevent more requests being submitted! */
if (unlikely(ggtt->do_idle_maps))
@@ -71,6 +71,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
/**
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm: the &struct i915_address_space
+ * @ww: An optional struct i915_gem_ww_ctx.
* @node: the &struct drm_mm_node (typically i915_vma.mode)
* @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned
@@ -94,6 +95,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
* asked to wait for eviction and interrupted.
*/
int i915_gem_gtt_reserve(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node,
u64 size, u64 offset, unsigned long color,
unsigned int flags)
@@ -104,7 +106,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
+ GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
@@ -118,7 +120,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
if (flags & PIN_NOEVICT)
return -ENOSPC;
- err = i915_gem_evict_for_node(vm, node, flags);
+ err = i915_gem_evict_for_node(vm, ww, node, flags);
if (err == 0)
err = drm_mm_reserve_node(&vm->mm, node);
@@ -153,6 +155,7 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
/**
* i915_gem_gtt_insert - insert a node into an address_space (GTT)
* @vm: the &struct i915_address_space
+ * @ww: An optional struct i915_gem_ww_ctx.
* @node: the &struct drm_mm_node (typically i915_vma.node)
* @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned
@@ -185,6 +188,7 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
* asked to wait for eviction and interrupted.
*/
int i915_gem_gtt_insert(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color,
u64 start, u64 end, unsigned int flags)
@@ -202,7 +206,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
+ GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
@@ -270,7 +274,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
*/
offset = random_offset(start, end,
size, alignment ?: I915_GTT_MIN_ALIGNMENT);
- err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
+ err = i915_gem_gtt_reserve(vm, ww, node, size, offset, color, flags);
if (err != -ENOSPC)
return err;
@@ -278,7 +282,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
return -ENOSPC;
/* Randomly selected placement is pinned, do a search */
- err = i915_gem_evict_something(vm, size, alignment, color,
+ err = i915_gem_evict_something(vm, ww, size, alignment, color,
start, end, flags);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c9b0ee5e1d23..8c2f57eb5dda 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -16,6 +16,7 @@
struct drm_i915_gem_object;
struct i915_address_space;
+struct i915_gem_ww_ctx;
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
@@ -23,11 +24,13 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
int i915_gem_gtt_reserve(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node,
u64 size, u64 offset, unsigned long color,
unsigned int flags);
int i915_gem_gtt_insert(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color,
u64 start, u64 end, unsigned int flags);
@@ -41,6 +44,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_HIGH BIT_ULL(5)
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
+#define PIN_VALIDATE BIT_ULL(8) /* validate placement only, no need to call unpin() */
#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index dbe49fd87283..c12a0adefda5 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -33,7 +33,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = pdev->revision;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
- value = i915->ggtt.num_fences;
+ value = to_gt(i915)->ggtt->num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
value = !!i915->overlay;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 53d64c217de1..1d042551619e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -52,7 +52,6 @@
#include "i915_gpu_error.h"
#include "i915_memcpy.h"
#include "i915_scatterlist.h"
-#include "i915_vma_snapshot.h"
#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
@@ -1017,8 +1016,10 @@ void __i915_gpu_coredump_free(struct kref *error_ref)
static struct i915_vma_coredump *
i915_vma_coredump_create(const struct intel_gt *gt,
- const struct i915_vma_snapshot *vsnap,
- struct i915_vma_compress *compress)
+ const struct i915_vma_resource *vma_res,
+ struct i915_vma_compress *compress,
+ const char *name)
+
{
struct i915_ggtt *ggtt = gt->ggtt;
const u64 slot = ggtt->error_capture.start;
@@ -1028,7 +1029,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
might_sleep();
- if (!vsnap || !vsnap->pages || !compress)
+ if (!vma_res || !vma_res->bi.pages || !compress)
return NULL;
dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
@@ -1041,12 +1042,12 @@ i915_vma_coredump_create(const struct intel_gt *gt,
}
INIT_LIST_HEAD(&dst->page_list);
- strcpy(dst->name, vsnap->name);
+ strcpy(dst->name, name);
dst->next = NULL;
- dst->gtt_offset = vsnap->gtt_offset;
- dst->gtt_size = vsnap->gtt_size;
- dst->gtt_page_sizes = vsnap->page_sizes;
+ dst->gtt_offset = vma_res->start;
+ dst->gtt_size = vma_res->node_size;
+ dst->gtt_page_sizes = vma_res->page_sizes_gtt;
dst->unused = 0;
ret = -EINVAL;
@@ -1054,7 +1055,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
void __iomem *s;
dma_addr_t dma;
- for_each_sgt_daddr(dma, iter, vsnap->pages) {
+ for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
mutex_lock(&ggtt->error_mutex);
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
I915_CACHE_NONE, 0);
@@ -1072,11 +1073,11 @@ i915_vma_coredump_create(const struct intel_gt *gt,
if (ret)
break;
}
- } else if (vsnap->mr && vsnap->mr->type != INTEL_MEMORY_SYSTEM) {
- struct intel_memory_region *mem = vsnap->mr;
+ } else if (vma_res->bi.lmem) {
+ struct intel_memory_region *mem = vma_res->mr;
dma_addr_t dma;
- for_each_sgt_daddr(dma, iter, vsnap->pages) {
+ for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
void __iomem *s;
s = io_mapping_map_wc(&mem->iomap,
@@ -1092,7 +1093,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
} else {
struct page *page;
- for_each_sgt_page(page, iter, vsnap->pages) {
+ for_each_sgt_page(page, iter, vma_res->bi.pages) {
void *s;
drm_clflush_pages(&page, 1);
@@ -1328,33 +1329,32 @@ static bool record_context(struct i915_gem_context_coredump *e,
struct intel_engine_capture_vma {
struct intel_engine_capture_vma *next;
- struct i915_vma_snapshot *vsnap;
+ struct i915_vma_resource *vma_res;
char name[16];
bool lockdep_cookie;
};
static struct intel_engine_capture_vma *
capture_vma_snapshot(struct intel_engine_capture_vma *next,
- struct i915_vma_snapshot *vsnap,
- gfp_t gfp)
+ struct i915_vma_resource *vma_res,
+ gfp_t gfp, const char *name)
{
struct intel_engine_capture_vma *c;
- if (!i915_vma_snapshot_present(vsnap))
+ if (!vma_res)
return next;
c = kmalloc(sizeof(*c), gfp);
if (!c)
return next;
- if (!i915_vma_snapshot_resource_pin(vsnap, &c->lockdep_cookie)) {
+ if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
kfree(c);
return next;
}
- strcpy(c->name, vsnap->name);
- c->vsnap = vsnap;
- i915_vma_snapshot_get(vsnap);
+ strcpy(c->name, name);
+ c->vma_res = i915_vma_resource_get(vma_res);
c->next = next;
return c;
@@ -1366,8 +1366,6 @@ capture_vma(struct intel_engine_capture_vma *next,
const char *name,
gfp_t gfp)
{
- struct i915_vma_snapshot *vsnap;
-
if (!vma)
return next;
@@ -1376,19 +1374,10 @@ capture_vma(struct intel_engine_capture_vma *next,
* to a struct i915_vma_snapshot at command submission time.
* Not here.
*/
- GEM_WARN_ON(!i915_vma_is_pinned(vma));
- if (!i915_vma_is_pinned(vma))
- return next;
-
- vsnap = i915_vma_snapshot_alloc(gfp);
- if (!vsnap)
+ if (GEM_WARN_ON(!i915_vma_is_pinned(vma)))
return next;
- i915_vma_snapshot_init(vsnap, vma, name);
- next = capture_vma_snapshot(next, vsnap, gfp);
-
- /* FIXME: Replace on async unbind. */
- i915_vma_snapshot_put(vsnap);
+ next = capture_vma_snapshot(next, vma->resource, gfp, name);
return next;
}
@@ -1401,7 +1390,8 @@ capture_user(struct intel_engine_capture_vma *capture,
struct i915_capture_list *c;
for (c = rq->capture_list; c; c = c->next)
- capture = capture_vma_snapshot(capture, c->vma_snapshot, gfp);
+ capture = capture_vma_snapshot(capture, c->vma_res, gfp,
+ "user");
return capture;
}
@@ -1419,16 +1409,19 @@ static struct i915_vma_coredump *
create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
const char *name, struct i915_vma_compress *compress)
{
- struct i915_vma_coredump *ret;
- struct i915_vma_snapshot tmp;
+ struct i915_vma_coredump *ret = NULL;
+ struct i915_vma_resource *vma_res;
+ bool lockdep_cookie;
if (!vma)
return NULL;
- GEM_WARN_ON(!i915_vma_is_pinned(vma));
- i915_vma_snapshot_init_onstack(&tmp, vma, name);
- ret = i915_vma_coredump_create(gt, &tmp, compress);
- i915_vma_snapshot_put_onstack(&tmp);
+ vma_res = vma->resource;
+
+ if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
+ ret = i915_vma_coredump_create(gt, vma_res, compress, name);
+ i915_vma_resource_unhold(vma_res, lockdep_cookie);
+ }
return ret;
}
@@ -1475,7 +1468,7 @@ intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
* as the simplest method to avoid being overwritten
* by userspace.
*/
- vma = capture_vma_snapshot(vma, &rq->batch_snapshot, gfp);
+ vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
vma = capture_user(vma, rq, gfp);
vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
vma = capture_vma(vma, rq->context->state, "HW context", gfp);
@@ -1496,14 +1489,14 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
while (capture) {
struct intel_engine_capture_vma *this = capture;
- struct i915_vma_snapshot *vsnap = this->vsnap;
+ struct i915_vma_resource *vma_res = this->vma_res;
add_vma(ee,
- i915_vma_coredump_create(engine->gt,
- vsnap, compress));
+ i915_vma_coredump_create(engine->gt, vma_res,
+ compress, this->name));
- i915_vma_snapshot_resource_unpin(vsnap, this->lockdep_cookie);
- i915_vma_snapshot_put(vsnap);
+ i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
+ i915_vma_resource_put(vma_res);
capture = this->next;
kfree(this);
@@ -1526,7 +1519,7 @@ capture_engine(struct intel_engine_cs *engine,
struct i915_request *rq = NULL;
unsigned long flags;
- ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
+ ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL);
if (!ee)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index 5d6fdf37fb5a..65acd7bf75d0 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -9,7 +9,6 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_object.h"
#include "i915_active.h"
-#include "i915_buddy.h"
#include "i915_driver.h"
#include "i915_params.h"
#include "i915_pci.h"
@@ -18,6 +17,7 @@
#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "i915_vma.h"
+#include "i915_vma_resource.h"
static int i915_check_nomodeset(void)
{
@@ -51,8 +51,6 @@ static const struct {
{ .init = i915_check_nomodeset },
{ .init = i915_active_module_init,
.exit = i915_active_module_exit },
- { .init = i915_buddy_module_init,
- .exit = i915_buddy_module_exit },
{ .init = i915_context_module_init,
.exit = i915_context_module_exit },
{ .init = i915_gem_context_module_init,
@@ -65,6 +63,8 @@ static const struct {
.exit = i915_scheduler_module_exit },
{ .init = i915_vma_module_init,
.exit = i915_vma_module_exit },
+ { .init = i915_vma_resource_module_init,
+ .exit = i915_vma_resource_module_exit },
{ .init = i915_mock_selftests },
{ .init = i915_pmu_init,
.exit = i915_pmu_exit },
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 76e590fcb903..8246cbe9b01d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -1047,6 +1047,7 @@ static const struct intel_device_info dg2_info = {
.graphics.rel = 55,
.media.rel = 55,
PLATFORM(INTEL_DG2),
+ .has_guc_deprivilege = 1,
.has_64k_pages = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 3ecb89e96fb6..00fb40029f43 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1636,8 +1636,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
const u64 delay_ticks = 0xffffffffffffffff -
- intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
- atomic64_read(&stream->perf->noa_programming_delay));
+ intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
+ atomic64_read(&stream->perf->noa_programming_delay));
const u32 base = stream->engine->mmio_base;
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
u32 *batch, *ts0, *cs, *jump;
@@ -2120,7 +2120,7 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce,
u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
- i915_reg_t flex_regs[] = {
+ static const i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
EU_PERF_CNTL1,
EU_PERF_CNTL2,
@@ -3548,7 +3548,7 @@ err:
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
- return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
+ return intel_gt_clock_interval_to_ns(to_gt(perf->i915),
2ULL << exponent);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b9783584841b..2b8a3086ed35 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8835,4 +8835,8 @@ enum skl_power_gate {
#define CLKGATE_DIS_MISC _MMIO(0x46534)
#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
+#define GEN12_CULLBIT1 _MMIO(0x6100)
+#define GEN12_CULLBIT2 _MMIO(0x7030)
+#define GEN12_STATE_ACK_DEBUG _MMIO(0x20BC)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 505276373cc2..582770360ad1 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -118,8 +118,10 @@ static void i915_fence_release(struct dma_fence *fence)
rq->guc_prio != GUC_PRIO_FINI);
i915_request_free_capture_list(fetch_and_zero(&rq->capture_list));
- if (i915_vma_snapshot_present(&rq->batch_snapshot))
- i915_vma_snapshot_put_onstack(&rq->batch_snapshot);
+ if (rq->batch_res) {
+ i915_vma_resource_put(rq->batch_res);
+ rq->batch_res = NULL;
+ }
/*
* The request is put onto a RCU freelist (i.e. the address
@@ -310,7 +312,7 @@ void i915_request_free_capture_list(struct i915_capture_list *capture)
while (capture) {
struct i915_capture_list *next = capture->next;
- i915_vma_snapshot_put(capture->vma_snapshot);
+ i915_vma_resource_put(capture->vma_res);
kfree(capture);
capture = next;
}
@@ -856,7 +858,7 @@ static void __i915_request_ctor(void *arg)
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
clear_capture_list(rq);
- rq->batch_snapshot.present = false;
+ rq->batch_res = NULL;
init_llist_head(&rq->execute_cb);
}
@@ -962,7 +964,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
__rq_init_watchdog(rq);
assert_capture_list_is_null(rq);
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
- GEM_BUG_ON(i915_vma_snapshot_present(&rq->batch_snapshot));
+ GEM_BUG_ON(rq->batch_res);
/*
* Reserve space in the ring buffer for all the commands required to
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 170ee78c2858..28b1f9db5487 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -40,7 +40,7 @@
#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "i915_sw_fence.h"
-#include "i915_vma_snapshot.h"
+#include "i915_vma_resource.h"
#include <uapi/drm/i915_drm.h>
@@ -52,7 +52,7 @@ struct i915_request;
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
struct i915_capture_list {
- struct i915_vma_snapshot *vma_snapshot;
+ struct i915_vma_resource *vma_res;
struct i915_capture_list *next;
};
@@ -300,7 +300,7 @@ struct i915_request {
/** Batch buffer pointer for selftest internal use. */
I915_SELFTEST_DECLARE(struct i915_vma *batch);
- struct i915_vma_snapshot batch_snapshot;
+ struct i915_vma_resource *batch_res;
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
/**
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
index 41f2adb6a583..159571b9bd24 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -5,10 +5,9 @@
*/
#include "i915_scatterlist.h"
-
-#include "i915_buddy.h"
#include "i915_ttm_buddy_manager.h"
+#include <drm/drm_buddy.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
@@ -153,9 +152,9 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
const u64 size = res->num_pages << PAGE_SHIFT;
const u64 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
- struct i915_buddy_mm *mm = bman_res->mm;
+ struct drm_buddy *mm = bman_res->mm;
struct list_head *blocks = &bman_res->blocks;
- struct i915_buddy_block *block;
+ struct drm_buddy_block *block;
struct i915_refct_sgt *rsgt;
struct scatterlist *sg;
struct sg_table *st;
@@ -181,8 +180,8 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
list_for_each_entry(block, blocks, link) {
u64 block_size, offset;
- block_size = min_t(u64, size, i915_buddy_block_size(mm, block));
- offset = i915_buddy_block_offset(block);
+ block_size = min_t(u64, size, drm_buddy_block_size(mm, block));
+ offset = drm_buddy_block_offset(block);
while (block_size) {
u64 len;
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index d59fbb019032..247714bab044 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -8,14 +8,15 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_buddy.h>
+
#include "i915_ttm_buddy_manager.h"
-#include "i915_buddy.h"
#include "i915_gem.h"
struct i915_ttm_buddy_manager {
struct ttm_resource_manager manager;
- struct i915_buddy_mm mm;
+ struct drm_buddy mm;
struct list_head reserved;
struct mutex lock;
u64 default_page_size;
@@ -34,7 +35,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
{
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct i915_ttm_buddy_resource *bman_res;
- struct i915_buddy_mm *mm = &bman->mm;
+ struct drm_buddy *mm = &bman->mm;
unsigned long n_pages;
unsigned int min_order;
u64 min_page_size;
@@ -73,7 +74,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
n_pages = size >> ilog2(mm->chunk_size);
do {
- struct i915_buddy_block *block;
+ struct drm_buddy_block *block;
unsigned int order;
order = fls(n_pages) - 1;
@@ -82,7 +83,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
do {
mutex_lock(&bman->lock);
- block = i915_buddy_alloc(mm, order);
+ block = drm_buddy_alloc_blocks(mm, order);
mutex_unlock(&bman->lock);
if (!IS_ERR(block))
break;
@@ -106,9 +107,10 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
err_free_blocks:
mutex_lock(&bman->lock);
- i915_buddy_free_list(mm, &bman_res->blocks);
+ drm_buddy_free_list(mm, &bman_res->blocks);
mutex_unlock(&bman->lock);
err_free_res:
+ ttm_resource_fini(man, &bman_res->base);
kfree(bman_res);
return err;
}
@@ -120,9 +122,10 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
mutex_lock(&bman->lock);
- i915_buddy_free_list(&bman->mm, &bman_res->blocks);
+ drm_buddy_free_list(&bman->mm, &bman_res->blocks);
mutex_unlock(&bman->lock);
+ ttm_resource_fini(man, res);
kfree(bman_res);
}
@@ -130,17 +133,17 @@ static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
- struct i915_buddy_block *block;
+ struct drm_buddy_block *block;
mutex_lock(&bman->lock);
drm_printf(printer, "default_page_size: %lluKiB\n",
bman->default_page_size >> 10);
- i915_buddy_print(&bman->mm, printer);
+ drm_buddy_print(&bman->mm, printer);
drm_printf(printer, "reserved:\n");
list_for_each_entry(block, &bman->reserved, link)
- i915_buddy_block_print(&bman->mm, block, printer);
+ drm_buddy_block_print(&bman->mm, block, printer);
mutex_unlock(&bman->lock);
}
@@ -190,7 +193,7 @@ int i915_ttm_buddy_man_init(struct ttm_device *bdev,
if (!bman)
return -ENOMEM;
- err = i915_buddy_init(&bman->mm, size, chunk_size);
+ err = drm_buddy_init(&bman->mm, size, chunk_size);
if (err)
goto err_free_bman;
@@ -202,7 +205,7 @@ int i915_ttm_buddy_man_init(struct ttm_device *bdev,
man = &bman->manager;
man->use_tt = use_tt;
man->func = &i915_ttm_buddy_manager_func;
- ttm_resource_manager_init(man, bman->mm.size >> PAGE_SHIFT);
+ ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT);
ttm_resource_manager_set_used(man, true);
ttm_set_driver_manager(bdev, type, man);
@@ -228,7 +231,7 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
- struct i915_buddy_mm *mm = &bman->mm;
+ struct drm_buddy *mm = &bman->mm;
int ret;
ttm_resource_manager_set_used(man, false);
@@ -240,8 +243,8 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
ttm_set_driver_manager(bdev, type, NULL);
mutex_lock(&bman->lock);
- i915_buddy_free_list(mm, &bman->reserved);
- i915_buddy_fini(mm);
+ drm_buddy_free_list(mm, &bman->reserved);
+ drm_buddy_fini(mm);
mutex_unlock(&bman->lock);
ttm_resource_manager_cleanup(man);
@@ -264,11 +267,11 @@ int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
u64 start, u64 size)
{
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
- struct i915_buddy_mm *mm = &bman->mm;
+ struct drm_buddy *mm = &bman->mm;
int ret;
mutex_lock(&bman->lock);
- ret = i915_buddy_alloc_range(mm, &bman->reserved, start, size);
+ ret = drm_buddy_alloc_range(mm, &bman->reserved, start, size);
mutex_unlock(&bman->lock);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
index 0722d33f3e14..312077941411 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
@@ -13,7 +13,7 @@
struct ttm_device;
struct ttm_resource_manager;
-struct i915_buddy_mm;
+struct drm_buddy;
/**
* struct i915_ttm_buddy_resource
@@ -28,7 +28,7 @@ struct i915_buddy_mm;
struct i915_ttm_buddy_resource {
struct ttm_resource base;
struct list_head blocks;
- struct i915_buddy_mm *mm;
+ struct drm_buddy *mm;
};
/**
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 31a105bc1792..c97323973f9b 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
drm_info(&dev_priv->drm,
"balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
- ret = i915_gem_gtt_reserve(&ggtt->vm, node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
if (!ret)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 68cf1d392250..845cd88f8313 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -38,6 +38,18 @@
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_vma.h"
+#include "i915_vma_resource.h"
+
+static inline void assert_vma_held_evict(const struct i915_vma *vma)
+{
+ /*
+ * We may be forced to unbind when the vm is dead, to clean it up.
+ * This is the only exception to the requirement of the object lock
+ * being held.
+ */
+ if (atomic_read(&vma->vm->open))
+ assert_object_held_shared(vma->obj);
+}
static struct kmem_cache *slab_vmas;
@@ -285,7 +297,7 @@ struct i915_vma_work {
struct dma_fence_work base;
struct i915_address_space *vm;
struct i915_vm_pt_stash stash;
- struct i915_vma *vma;
+ struct i915_vma_resource *vma_res;
struct drm_i915_gem_object *pinned;
struct i915_sw_dma_fence_cb cb;
enum i915_cache_level cache_level;
@@ -295,23 +307,24 @@ struct i915_vma_work {
static void __vma_bind(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
- struct i915_vma *vma = vw->vma;
+ struct i915_vma_resource *vma_res = vw->vma_res;
+
+ vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
+ vma_res, vw->cache_level, vw->flags);
- vma->ops->bind_vma(vw->vm, &vw->stash,
- vma, vw->cache_level, vw->flags);
}
static void __vma_release(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
- if (vw->pinned) {
- __i915_gem_object_unpin_pages(vw->pinned);
+ if (vw->pinned)
i915_gem_object_put(vw->pinned);
- }
i915_vm_free_pt_stash(vw->vm, &vw->stash);
i915_vm_put(vw->vm);
+ if (vw->vma_res)
+ i915_vma_resource_put(vw->vma_res);
}
static const struct dma_fence_work_ops bind_ops = {
@@ -375,12 +388,27 @@ static int i915_vma_verify_bind_complete(struct i915_vma *vma)
#define i915_vma_verify_bind_complete(_vma) 0
#endif
+I915_SELFTEST_EXPORT void
+i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
+ struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
+ obj->mm.rsgt, i915_gem_object_is_readonly(obj),
+ i915_gem_object_is_lmem(obj), obj->mm.region,
+ vma->ops, vma->private, vma->node.start,
+ vma->node.size, vma->size);
+}
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
* @cache_level: mapping cache level
* @flags: flags like global or local mapping
* @work: preallocated worker for allocating and binding the PTE
+ * @vma_res: pointer to a preallocated vma resource. The resource is either
+ * consumed or freed.
*
* DMA addresses are taken from the scatter-gather table of this object (or of
* this VMA in case of non-default GGTT views) and PTE entries set up.
@@ -389,10 +417,12 @@ static int i915_vma_verify_bind_complete(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags,
- struct i915_vma_work *work)
+ struct i915_vma_work *work,
+ struct i915_vma_resource *vma_res)
{
u32 bind_flags;
u32 vma_flags;
+ int ret;
lockdep_assert_held(&vma->vm->mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -400,11 +430,15 @@ int i915_vma_bind(struct i915_vma *vma,
if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
- vma->vm->total)))
+ vma->vm->total))) {
+ i915_vma_resource_free(vma_res);
return -ENODEV;
+ }
- if (GEM_DEBUG_WARN_ON(!flags))
+ if (GEM_DEBUG_WARN_ON(!flags)) {
+ i915_vma_resource_free(vma_res);
return -EINVAL;
+ }
bind_flags = flags;
bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
@@ -413,16 +447,44 @@ int i915_vma_bind(struct i915_vma *vma,
vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
bind_flags &= ~vma_flags;
- if (bind_flags == 0)
+ if (bind_flags == 0) {
+ i915_vma_resource_free(vma_res);
return 0;
+ }
GEM_BUG_ON(!atomic_read(&vma->pages_count));
+ /* Wait for or await async unbinds touching our range */
+ if (work && bind_flags & vma->vm->bind_async_flags)
+ ret = i915_vma_resource_bind_dep_await(vma->vm,
+ &work->base.chain,
+ vma->node.start,
+ vma->node.size,
+ true,
+ GFP_NOWAIT |
+ __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN);
+ else
+ ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
+ vma->node.size, true);
+ if (ret) {
+ i915_vma_resource_free(vma_res);
+ return ret;
+ }
+
+ if (vma->resource || !vma_res) {
+ /* Rebinding with an additional I915_VMA_*_BIND */
+ GEM_WARN_ON(!vma_flags);
+ i915_vma_resource_free(vma_res);
+ } else {
+ i915_vma_resource_init_from_vma(vma_res, vma);
+ vma->resource = vma_res;
+ }
trace_i915_vma_bind(vma, bind_flags);
if (work && bind_flags & vma->vm->bind_async_flags) {
struct dma_fence *prev;
- work->vma = vma;
+ work->vma_res = i915_vma_resource_get(vma->resource);
work->cache_level = cache_level;
work->flags = bind_flags;
@@ -445,17 +507,25 @@ int i915_vma_bind(struct i915_vma *vma,
work->base.dma.error = 0; /* enable the queue_work() */
- __i915_gem_object_pin_pages(vma->obj);
- work->pinned = i915_gem_object_get(vma->obj);
+ /*
+ * If we don't have the refcounted pages list, keep a reference
+ * on the object to avoid waiting for the async bind to
+ * complete in the object destruction path.
+ */
+ if (!work->vma_res->bi.pages_rsgt)
+ work->pinned = i915_gem_object_get(vma->obj);
} else {
if (vma->obj) {
- int ret;
-
ret = i915_gem_object_wait_moving_fence(vma->obj, true);
- if (ret)
+ if (ret) {
+ i915_vma_resource_free(vma->resource);
+ vma->resource = NULL;
+
return ret;
+ }
}
- vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
+ vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
+ bind_flags);
}
if (vma->obj)
@@ -655,7 +725,8 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
* 0 on success, negative error code otherwise.
*/
static int
-i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u64 size, u64 alignment, u64 flags)
{
unsigned long color;
u64 start, end;
@@ -707,7 +778,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
range_overflows(offset, size, end))
return -EINVAL;
- ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
+ ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
size, offset, color,
flags);
if (ret)
@@ -746,7 +817,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
}
- ret = i915_gem_gtt_insert(vma->vm, &vma->node,
+ ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
size, alignment, color,
start, end, flags);
if (ret)
@@ -780,9 +851,17 @@ i915_vma_detach(struct i915_vma *vma)
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
{
unsigned int bound;
- bool pinned = true;
bound = atomic_read(&vma->flags);
+
+ if (flags & PIN_VALIDATE) {
+ flags &= I915_VMA_BIND_MASK;
+
+ return (flags & bound) == flags;
+ }
+
+ /* with the lock mandatory for unbind, we don't race here */
+ flags &= I915_VMA_BIND_MASK;
do {
if (unlikely(flags & ~bound))
return false;
@@ -790,34 +869,10 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
return false;
- if (!(bound & I915_VMA_PIN_MASK))
- goto unpinned;
-
GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
return true;
-
-unpinned:
- /*
- * If pin_count==0, but we are bound, check under the lock to avoid
- * racing with a concurrent i915_vma_unbind().
- */
- mutex_lock(&vma->vm->mutex);
- do {
- if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
- pinned = false;
- break;
- }
-
- if (unlikely(flags & ~bound)) {
- pinned = false;
- break;
- }
- } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
- mutex_unlock(&vma->vm->mutex);
-
- return pinned;
}
static struct scatterlist *
@@ -913,30 +968,39 @@ err_st_alloc:
}
static struct scatterlist *
-remap_pages(struct drm_i915_gem_object *obj,
- unsigned int offset, unsigned int alignment_pad,
- unsigned int width, unsigned int height,
- unsigned int src_stride, unsigned int dst_stride,
- struct sg_table *st, struct scatterlist *sg)
+add_padding_pages(unsigned int count,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a convenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
+ sg = sg_next(sg);
+
+ return sg;
+}
+
+static struct scatterlist *
+remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
+ unsigned int offset, unsigned int alignment_pad,
+ unsigned int width, unsigned int height,
+ unsigned int src_stride, unsigned int dst_stride,
+ struct sg_table *st, struct scatterlist *sg,
+ unsigned int *gtt_offset)
{
unsigned int row;
if (!width || !height)
return sg;
- if (alignment_pad) {
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a convenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, alignment_pad * 4096, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = alignment_pad * 4096;
- sg = sg_next(sg);
- }
+ if (alignment_pad)
+ sg = add_padding_pages(alignment_pad, st, sg);
for (row = 0; row < height; row++) {
unsigned int left = width * I915_GTT_PAGE_SIZE;
@@ -973,18 +1037,98 @@ remap_pages(struct drm_i915_gem_object *obj,
if (!left)
continue;
+ sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
+ }
+
+ *gtt_offset += alignment_pad + dst_stride * height;
+
+ return sg;
+}
+
+static struct scatterlist *
+remap_contiguous_pages(struct drm_i915_gem_object *obj,
+ unsigned int obj_offset,
+ unsigned int count,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ struct scatterlist *iter;
+ unsigned int offset;
+
+ iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
+ GEM_BUG_ON(!iter);
+
+ do {
+ unsigned int len;
+
+ len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
+
st->nents++;
+ count -= len >> PAGE_SHIFT;
+ if (count == 0)
+ return sg;
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, left, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = left;
- sg = sg_next(sg);
- }
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
+}
+
+static struct scatterlist *
+remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
+ unsigned int obj_offset, unsigned int alignment_pad,
+ unsigned int size,
+ struct sg_table *st, struct scatterlist *sg,
+ unsigned int *gtt_offset)
+{
+ if (!size)
+ return sg;
+
+ if (alignment_pad)
+ sg = add_padding_pages(alignment_pad, st, sg);
+
+ sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
+ sg = sg_next(sg);
+
+ *gtt_offset += alignment_pad + size;
+
+ return sg;
+}
+
+static struct scatterlist *
+remap_color_plane_pages(const struct intel_remapped_info *rem_info,
+ struct drm_i915_gem_object *obj,
+ int color_plane,
+ struct sg_table *st, struct scatterlist *sg,
+ unsigned int *gtt_offset)
+{
+ unsigned int alignment_pad = 0;
+
+ if (rem_info->plane_alignment)
+ alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
+
+ if (rem_info->plane[color_plane].linear)
+ sg = remap_linear_color_plane_pages(obj,
+ rem_info->plane[color_plane].offset,
+ alignment_pad,
+ rem_info->plane[color_plane].size,
+ st, sg,
+ gtt_offset);
+
+ else
+ sg = remap_tiled_color_plane_pages(obj,
+ rem_info->plane[color_plane].offset,
+ alignment_pad,
+ rem_info->plane[color_plane].width,
+ rem_info->plane[color_plane].height,
+ rem_info->plane[color_plane].src_stride,
+ rem_info->plane[color_plane].dst_stride,
+ st, sg,
+ gtt_offset);
return sg;
}
@@ -1013,21 +1157,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
st->nents = 0;
sg = st->sgl;
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- unsigned int alignment_pad = 0;
-
- if (rem_info->plane_alignment)
- alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset;
-
- sg = remap_pages(obj,
- rem_info->plane[i].offset, alignment_pad,
- rem_info->plane[i].width, rem_info->plane[i].height,
- rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
- st, sg);
-
- gtt_offset += alignment_pad +
- rem_info->plane[i].dst_stride * rem_info->plane[i].height;
- }
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
+ sg = remap_color_plane_pages(rem_info, obj, i, st, sg, &gtt_offset);
i915_sg_trim(st);
@@ -1049,9 +1180,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
- struct scatterlist *sg, *iter;
+ struct scatterlist *sg;
unsigned int count = view->partial.size;
- unsigned int offset;
int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -1062,34 +1192,14 @@ intel_partial_pages(const struct i915_ggtt_view *view,
if (ret)
goto err_sg_alloc;
- iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
- GEM_BUG_ON(!iter);
-
- sg = st->sgl;
st->nents = 0;
- do {
- unsigned int len;
-
- len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
- count << PAGE_SHIFT);
- sg_set_page(sg, NULL, len, 0);
- sg_dma_address(sg) =
- sg_dma_address(iter) + (offset << PAGE_SHIFT);
- sg_dma_len(sg) = len;
- st->nents++;
- count -= len >> PAGE_SHIFT;
- if (count == 0) {
- sg_mark_end(sg);
- i915_sg_trim(st); /* Drop any unused tail entries. */
+ sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
- return st;
- }
+ sg_mark_end(sg);
+ i915_sg_trim(st); /* Drop any unused tail entries. */
- sg = __sg_next(sg);
- iter = __sg_next(iter);
- offset = 0;
- } while (1);
+ return st;
err_sg_alloc:
kfree(st);
@@ -1101,7 +1211,6 @@ static int
__i915_vma_get_pages(struct i915_vma *vma)
{
struct sg_table *pages;
- int ret;
/*
* The vma->pages are only valid within the lifespan of the borrowed
@@ -1134,18 +1243,16 @@ __i915_vma_get_pages(struct i915_vma *vma)
break;
}
- ret = 0;
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- pages = NULL;
drm_err(&vma->vm->i915->drm,
- "Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
+ "Failed to get pages for VMA view type %u (%ld)!\n",
+ vma->ggtt_view.type, PTR_ERR(pages));
+ return PTR_ERR(pages);
}
vma->pages = pages;
- return ret;
+ return 0;
}
I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
@@ -1177,25 +1284,14 @@ err_unpin:
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
/* We allocate under vma_get_pages, so beware the shrinker */
- struct sg_table *pages = READ_ONCE(vma->pages);
-
GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
if (atomic_sub_return(count, &vma->pages_count) == 0) {
- /*
- * The atomic_sub_return is a read barrier for the READ_ONCE of
- * vma->pages above.
- *
- * READ_ONCE is safe because this is either called from the same
- * function (i915_vma_pin_ww), or guarded by vma->vm->mutex.
- *
- * TODO: We're leaving vma->pages dangling, until vma->obj->resv
- * lock is required.
- */
- if (pages != vma->obj->mm.pages) {
- sg_free_table(pages);
- kfree(pages);
+ if (vma->pages != vma->obj->mm.pages) {
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
}
+ vma->pages = NULL;
i915_gem_object_unpin_pages(vma->obj);
}
@@ -1228,6 +1324,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
{
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
+ struct i915_vma_resource *vma_res = NULL;
intel_wakeref_t wakeref = 0;
unsigned int bound;
int err;
@@ -1241,7 +1338,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
/* First try and grab the pin without rebinding the vma */
- if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
+ if (try_qad_pin(vma, flags))
return 0;
err = i915_vma_get_pages(vma);
@@ -1282,6 +1379,12 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
}
}
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res)) {
+ err = PTR_ERR(vma_res);
+ goto err_fence;
+ }
+
/*
* Differentiate between user/kernel vma inside the aliasing-ppgtt.
*
@@ -1302,7 +1405,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
err = mutex_lock_interruptible_nested(&vma->vm->mutex,
!(flags & PIN_GLOBAL));
if (err)
- goto err_fence;
+ goto err_vma_res;
/* No more allocations allowed now we hold vm->mutex */
@@ -1323,7 +1426,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
}
if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
- __i915_vma_pin(vma);
+ if (!(flags & PIN_VALIDATE))
+ __i915_vma_pin(vma);
goto err_unlock;
}
@@ -1332,7 +1436,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
goto err_unlock;
if (!(bound & I915_VMA_BIND_MASK)) {
- err = i915_vma_insert(vma, size, alignment, flags);
+ err = i915_vma_insert(vma, ww, size, alignment, flags);
if (err)
goto err_active;
@@ -1343,7 +1447,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!vma->pages);
err = i915_vma_bind(vma,
vma->obj->cache_level,
- flags, work);
+ flags, work, vma_res);
+ vma_res = NULL;
if (err)
goto err_remove;
@@ -1352,8 +1457,10 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- __i915_vma_pin(vma);
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ if (!(flags & PIN_VALIDATE)) {
+ __i915_vma_pin(vma);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ }
GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
@@ -1366,6 +1473,8 @@ err_active:
i915_active_release(&vma->active);
err_unlock:
mutex_unlock(&vma->vm->mutex);
+err_vma_res:
+ i915_vma_resource_free(vma_res);
err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
@@ -1412,7 +1521,12 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
/* Unlike i915_vma_pin, we don't take no for an answer! */
flush_idle_contexts(vm->gt);
if (mutex_lock_interruptible(&vm->mutex) == 0) {
- i915_gem_evict_vm(vm);
+ /*
+ * We pass NULL ww here, as we don't want to unbind
+ * locked objects when called from execbuf when pinning
+ * is removed. This would probably regress badly.
+ */
+ i915_gem_evict_vm(vm, NULL);
mutex_unlock(&vm->mutex);
}
} while (1);
@@ -1516,6 +1630,7 @@ void i915_vma_release(struct kref *ref)
i915_vm_put(vma->vm);
i915_active_fini(&vma->active);
+ GEM_WARN_ON(vma->resource);
i915_vma_free(vma);
}
@@ -1548,8 +1663,16 @@ void i915_vma_parked(struct intel_gt *gt)
struct drm_i915_gem_object *obj = vma->obj;
struct i915_address_space *vm = vma->vm;
- INIT_LIST_HEAD(&vma->closed_link);
- __i915_vma_put(vma);
+ if (i915_gem_object_trylock(obj, NULL)) {
+ INIT_LIST_HEAD(&vma->closed_link);
+ __i915_vma_put(vma);
+ i915_gem_object_unlock(obj);
+ } else {
+ /* back you go.. */
+ spin_lock_irq(&gt->closed_lock);
+ list_add(&vma->closed_link, &gt->closed_vma);
+ spin_unlock_irq(&gt->closed_lock);
+ }
i915_gem_object_put(obj);
i915_vm_close(vm);
@@ -1600,8 +1723,6 @@ static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *
{
int err;
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
-
/* Wait for the vma to be bound before we start! */
err = __i915_request_await_bind(rq, vma);
if (err)
@@ -1620,6 +1741,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
assert_object_held(obj);
+ GEM_BUG_ON(!vma->pages);
+
err = __i915_vma_move_to_active(vma, rq);
if (unlikely(err))
return err;
@@ -1662,9 +1785,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
return 0;
}
-void __i915_vma_evict(struct i915_vma *vma)
+struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
{
+ struct i915_vma_resource *vma_res = vma->resource;
+ struct dma_fence *unbind_fence;
+
GEM_BUG_ON(i915_vma_is_pinned(vma));
+ assert_vma_held_evict(vma);
if (i915_vma_is_map_and_fenceable(vma)) {
/* Force a pagefault for domain tracking on next user access */
@@ -1694,15 +1821,36 @@ void __i915_vma_evict(struct i915_vma *vma)
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
- if (likely(atomic_read(&vma->vm->open))) {
- trace_i915_vma_unbind(vma);
- vma->ops->unbind_vma(vma->vm, vma);
- }
+ /* Object backend must be async capable. */
+ GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
+
+ /* If vm is not open, unbind is a nop. */
+ vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
+ atomic_read(&vma->vm->open);
+ trace_i915_vma_unbind(vma);
+
+ unbind_fence = i915_vma_resource_unbind(vma_res);
+ vma->resource = NULL;
+
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags);
i915_vma_detach(vma);
+
+ if (!async && unbind_fence) {
+ dma_fence_wait(unbind_fence, false);
+ dma_fence_put(unbind_fence);
+ unbind_fence = NULL;
+ }
+
+ /*
+ * Binding itself may not have completed until the unbind fence signals,
+ * so don't drop the pages until that happens, unless the resource is
+ * async_capable.
+ */
+
vma_unbind_pages(vma);
+ return unbind_fence;
}
int __i915_vma_unbind(struct i915_vma *vma)
@@ -1710,6 +1858,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
int ret;
lockdep_assert_held(&vma->vm->mutex);
+ assert_vma_held_evict(vma);
if (!drm_mm_node_allocated(&vma->node))
return 0;
@@ -1729,18 +1878,55 @@ int __i915_vma_unbind(struct i915_vma *vma)
return ret;
GEM_BUG_ON(i915_vma_is_active(vma));
- __i915_vma_evict(vma);
+ __i915_vma_evict(vma, false);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
}
+static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
+{
+ struct dma_fence *fence;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return NULL;
+
+ if (i915_vma_is_pinned(vma) ||
+ &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
+ return ERR_PTR(-EAGAIN);
+
+ /*
+ * We probably need to replace this with awaiting the fences of the
+ * object's dma_resv when the vma active goes away. When doing that
+ * we need to be careful to not add the vma_resource unbind fence
+ * immediately to the object's dma_resv, because then unbinding
+ * the next vma from the object, in case there are many, will
+ * actually await the unbinding of the previous vmas, which is
+ * undesirable.
+ */
+ if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
+ I915_ACTIVE_AWAIT_EXCL |
+ I915_ACTIVE_AWAIT_ACTIVE) < 0) {
+ return ERR_PTR(-EBUSY);
+ }
+
+ fence = __i915_vma_evict(vma, true);
+
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
+
+ return fence;
+}
+
int i915_vma_unbind(struct i915_vma *vma)
{
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref = 0;
int err;
+ assert_object_held_shared(vma->obj);
+
/* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma);
if (err)
@@ -1771,6 +1957,79 @@ out_rpm:
return err;
}
+int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
+ intel_wakeref_t wakeref = 0;
+ struct dma_fence *fence;
+ int err;
+
+ /*
+ * We need the dma-resv lock since we add the
+ * unbind fence to the dma-resv object.
+ */
+ assert_object_held(obj);
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
+ return -EAGAIN;
+ }
+
+ if (!obj->mm.rsgt)
+ return -EBUSY;
+
+ err = dma_resv_reserve_shared(obj->base.resv, 1);
+ if (err)
+ return -EBUSY;
+
+ /*
+ * It would be great if we could grab this wakeref from the
+ * async unbind work if needed, but we can't because it uses
+ * kmalloc and it's in the dma-fence signalling critical path.
+ */
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ if (trylock_vm && !mutex_trylock(&vm->mutex)) {
+ err = -EBUSY;
+ goto out_rpm;
+ } else if (!trylock_vm) {
+ err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
+ if (err)
+ goto out_rpm;
+ }
+
+ fence = __i915_vma_unbind_async(vma);
+ mutex_unlock(&vm->mutex);
+ if (IS_ERR_OR_NULL(fence)) {
+ err = PTR_ERR_OR_ZERO(fence);
+ goto out_rpm;
+ }
+
+ dma_resv_add_shared_fence(obj->base.resv, fence);
+ dma_fence_put(fence);
+
+out_rpm:
+ if (wakeref)
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+ return err;
+}
+
+int i915_vma_unbind_unlocked(struct i915_vma *vma)
+{
+ int err;
+
+ i915_gem_object_lock(vma->obj, NULL);
+ err = i915_vma_unbind(vma);
+ i915_gem_object_unlock(vma->obj);
+
+ return err;
+}
+
struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
{
i915_gem_object_make_unshrinkable(vma->obj);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 32719431b3df..011af044ad4f 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -37,6 +37,7 @@
#include "i915_active.h"
#include "i915_request.h"
+#include "i915_vma_resource.h"
#include "i915_vma_types.h"
struct i915_vma *
@@ -204,16 +205,19 @@ struct i915_vma_work *i915_vma_work(void);
int i915_vma_bind(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags,
- struct i915_vma_work *work);
+ struct i915_vma_work *work,
+ struct i915_vma_resource *vma_res);
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
-void __i915_vma_evict(struct i915_vma *vma);
+struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
+int __must_check i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm);
+int __must_check i915_vma_unbind_unlocked(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
@@ -337,12 +341,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
*/
void i915_vma_unpin_iomap(struct i915_vma *vma);
-static inline struct page *i915_vma_first_page(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
- return sg_page(vma->pages->sgl);
-}
-
/**
* i915_vma_pin_fence - pin fencing state
* @vma: vma to pin fencing for
@@ -428,6 +426,26 @@ static inline int i915_vma_sync(struct i915_vma *vma)
return i915_active_wait(&vma->active);
}
+/**
+ * i915_vma_get_current_resource - Get the current resource of the vma
+ * @vma: The vma to get the current resource from.
+ *
+ * It's illegal to call this function if the vma is not bound.
+ *
+ * Return: A refcounted pointer to the current vma resource
+ * of the vma, assuming the vma is bound.
+ */
+static inline struct i915_vma_resource *
+i915_vma_get_current_resource(struct i915_vma *vma)
+{
+ return i915_vma_resource_get(vma->resource);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+void i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
+ struct i915_vma *vma);
+#endif
+
void i915_vma_module_exit(void);
int i915_vma_module_init(void);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
new file mode 100644
index 000000000000..57ae92ba8af1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/interval_tree_generic.h>
+#include <linux/sched/mm.h>
+
+#include "i915_sw_fence.h"
+#include "i915_vma_resource.h"
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+
+#include "gt/intel_gtt.h"
+
+static struct kmem_cache *slab_vma_resources;
+
+/**
+ * DOC:
+ * We use a per-vm interval tree to keep track of vma_resources
+ * scheduled for unbind but not yet unbound. The tree is protected by
+ * the vm mutex, and nodes are removed just after the unbind fence signals.
+ * The removal takes the vm mutex from a kernel thread which we need to
+ * keep in mind so that we don't grab the mutex and try to wait for all
+ * pending unbinds to complete, because that will temporaryily block many
+ * of the workqueue threads, and people will get angry.
+ *
+ * We should consider using a single ordered fence per VM instead but that
+ * requires ordering the unbinds and might introduce unnecessary waiting
+ * for unrelated unbinds. Amount of code will probably be roughly the same
+ * due to the simplicity of using the interval tree interface.
+ *
+ * Another drawback of this interval tree is that the complexity of insertion
+ * and removal of fences increases as O(ln(pending_unbinds)) instead of
+ * O(1) for a single fence without interval tree.
+ */
+#define VMA_RES_START(_node) ((_node)->start)
+#define VMA_RES_LAST(_node) ((_node)->start + (_node)->node_size - 1)
+INTERVAL_TREE_DEFINE(struct i915_vma_resource, rb,
+ u64, __subtree_last,
+ VMA_RES_START, VMA_RES_LAST, static, vma_res_itree);
+
+/* Callbacks for the unbind dma-fence. */
+
+/**
+ * i915_vma_resource_alloc - Allocate a vma resource
+ *
+ * Return: A pointer to a cleared struct i915_vma_resource or
+ * a -ENOMEM error pointer if allocation fails.
+ */
+struct i915_vma_resource *i915_vma_resource_alloc(void)
+{
+ struct i915_vma_resource *vma_res =
+ kmem_cache_zalloc(slab_vma_resources, GFP_KERNEL);
+
+ return vma_res ? vma_res : ERR_PTR(-ENOMEM);
+}
+
+/**
+ * i915_vma_resource_free - Free a vma resource
+ * @vma_res: The vma resource to free.
+ */
+void i915_vma_resource_free(struct i915_vma_resource *vma_res)
+{
+ if (vma_res)
+ kmem_cache_free(slab_vma_resources, vma_res);
+}
+
+static const char *get_driver_name(struct dma_fence *fence)
+{
+ return "vma unbind fence";
+}
+
+static const char *get_timeline_name(struct dma_fence *fence)
+{
+ return "unbound";
+}
+
+static void unbind_fence_free_rcu(struct rcu_head *head)
+{
+ struct i915_vma_resource *vma_res =
+ container_of(head, typeof(*vma_res), unbind_fence.rcu);
+
+ i915_vma_resource_free(vma_res);
+}
+
+static void unbind_fence_release(struct dma_fence *fence)
+{
+ struct i915_vma_resource *vma_res =
+ container_of(fence, typeof(*vma_res), unbind_fence);
+
+ i915_sw_fence_fini(&vma_res->chain);
+
+ call_rcu(&fence->rcu, unbind_fence_free_rcu);
+}
+
+static struct dma_fence_ops unbind_fence_ops = {
+ .get_driver_name = get_driver_name,
+ .get_timeline_name = get_timeline_name,
+ .release = unbind_fence_release,
+};
+
+static void __i915_vma_resource_unhold(struct i915_vma_resource *vma_res)
+{
+ struct i915_address_space *vm;
+
+ if (!refcount_dec_and_test(&vma_res->hold_count))
+ return;
+
+ dma_fence_signal(&vma_res->unbind_fence);
+
+ vm = vma_res->vm;
+ if (vma_res->wakeref)
+ intel_runtime_pm_put(&vm->i915->runtime_pm, vma_res->wakeref);
+
+ vma_res->vm = NULL;
+ if (!RB_EMPTY_NODE(&vma_res->rb)) {
+ mutex_lock(&vm->mutex);
+ vma_res_itree_remove(vma_res, &vm->pending_unbind);
+ mutex_unlock(&vm->mutex);
+ }
+
+ if (vma_res->bi.pages_rsgt)
+ i915_refct_sgt_put(vma_res->bi.pages_rsgt);
+}
+
+/**
+ * i915_vma_resource_unhold - Unhold the signaling of the vma resource unbind
+ * fence.
+ * @vma_res: The vma resource.
+ * @lockdep_cookie: The lockdep cookie returned from i915_vma_resource_hold.
+ *
+ * The function may leave a dma_fence critical section.
+ */
+void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
+ bool lockdep_cookie)
+{
+ dma_fence_end_signalling(lockdep_cookie);
+
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ unsigned long irq_flags;
+
+ /* Inefficient open-coded might_lock_irqsave() */
+ spin_lock_irqsave(&vma_res->lock, irq_flags);
+ spin_unlock_irqrestore(&vma_res->lock, irq_flags);
+ }
+
+ __i915_vma_resource_unhold(vma_res);
+}
+
+/**
+ * i915_vma_resource_hold - Hold the signaling of the vma resource unbind fence.
+ * @vma_res: The vma resource.
+ * @lockdep_cookie: Pointer to a bool serving as a lockdep cooke that should
+ * be given as an argument to the pairing i915_vma_resource_unhold.
+ *
+ * If returning true, the function enters a dma_fence signalling critical
+ * section if not in one already.
+ *
+ * Return: true if holding successful, false if not.
+ */
+bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
+ bool *lockdep_cookie)
+{
+ bool held = refcount_inc_not_zero(&vma_res->hold_count);
+
+ if (held)
+ *lockdep_cookie = dma_fence_begin_signalling();
+
+ return held;
+}
+
+static void i915_vma_resource_unbind_work(struct work_struct *work)
+{
+ struct i915_vma_resource *vma_res =
+ container_of(work, typeof(*vma_res), work);
+ struct i915_address_space *vm = vma_res->vm;
+ bool lockdep_cookie;
+
+ lockdep_cookie = dma_fence_begin_signalling();
+ if (likely(atomic_read(&vm->open)))
+ vma_res->ops->unbind_vma(vm, vma_res);
+
+ dma_fence_end_signalling(lockdep_cookie);
+ __i915_vma_resource_unhold(vma_res);
+ i915_vma_resource_put(vma_res);
+}
+
+static int
+i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify state)
+{
+ struct i915_vma_resource *vma_res =
+ container_of(fence, typeof(*vma_res), chain);
+ struct dma_fence *unbind_fence =
+ &vma_res->unbind_fence;
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ dma_fence_get(unbind_fence);
+ if (vma_res->immediate_unbind) {
+ i915_vma_resource_unbind_work(&vma_res->work);
+ } else {
+ INIT_WORK(&vma_res->work, i915_vma_resource_unbind_work);
+ queue_work(system_unbound_wq, &vma_res->work);
+ }
+ break;
+ case FENCE_FREE:
+ i915_vma_resource_put(vma_res);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * i915_vma_resource_unbind - Unbind a vma resource
+ * @vma_res: The vma resource to unbind.
+ *
+ * At this point this function does little more than publish a fence that
+ * signals immediately unless signaling is held back.
+ *
+ * Return: A refcounted pointer to a dma-fence that signals when unbinding is
+ * complete.
+ */
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res)
+{
+ struct i915_address_space *vm = vma_res->vm;
+
+ /* Reference for the sw fence */
+ i915_vma_resource_get(vma_res);
+
+ /* Caller must already have a wakeref in this case. */
+ if (vma_res->needs_wakeref)
+ vma_res->wakeref = intel_runtime_pm_get_if_in_use(&vm->i915->runtime_pm);
+
+ if (atomic_read(&vma_res->chain.pending) <= 1) {
+ RB_CLEAR_NODE(&vma_res->rb);
+ vma_res->immediate_unbind = 1;
+ } else {
+ vma_res_itree_insert(vma_res, &vma_res->vm->pending_unbind);
+ }
+
+ i915_sw_fence_commit(&vma_res->chain);
+
+ return &vma_res->unbind_fence;
+}
+
+/**
+ * __i915_vma_resource_init - Initialize a vma resource.
+ * @vma_res: The vma resource to initialize
+ *
+ * Initializes the private members of a vma resource.
+ */
+void __i915_vma_resource_init(struct i915_vma_resource *vma_res)
+{
+ spin_lock_init(&vma_res->lock);
+ dma_fence_init(&vma_res->unbind_fence, &unbind_fence_ops,
+ &vma_res->lock, 0, 0);
+ refcount_set(&vma_res->hold_count, 1);
+ i915_sw_fence_init(&vma_res->chain, i915_vma_resource_fence_notify);
+}
+
+static void
+i915_vma_resource_color_adjust_range(struct i915_address_space *vm,
+ u64 *start,
+ u64 *end)
+{
+ if (i915_vm_has_cache_coloring(vm)) {
+ if (*start)
+ *start -= I915_GTT_PAGE_SIZE;
+ *end += I915_GTT_PAGE_SIZE;
+ }
+}
+
+/**
+ * i915_vma_resource_bind_dep_sync - Wait for / sync all unbinds touching a
+ * certain vm range.
+ * @vm: The vm to look at.
+ * @offset: The range start.
+ * @size: The range size.
+ * @intr: Whether to wait interrubtible.
+ *
+ * The function needs to be called with the vm lock held.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted and @intr==true
+ */
+int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
+ u64 offset,
+ u64 size,
+ bool intr)
+{
+ struct i915_vma_resource *node;
+ u64 last = offset + size - 1;
+
+ lockdep_assert_held(&vm->mutex);
+ might_sleep();
+
+ i915_vma_resource_color_adjust_range(vm, &offset, &last);
+ node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
+ while (node) {
+ int ret = dma_fence_wait(&node->unbind_fence, intr);
+
+ if (ret)
+ return ret;
+
+ node = vma_res_itree_iter_next(node, offset, last);
+ }
+
+ return 0;
+}
+
+/**
+ * i915_vma_resource_bind_dep_sync_all - Wait for / sync all unbinds of a vm,
+ * releasing the vm lock while waiting.
+ * @vm: The vm to look at.
+ *
+ * The function may not be called with the vm lock held.
+ * Typically this is called at vm destruction to finish any pending
+ * unbind operations. The vm mutex is released while waiting to avoid
+ * stalling kernel workqueues trying to grab the mutex.
+ */
+void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm)
+{
+ struct i915_vma_resource *node;
+ struct dma_fence *fence;
+
+ do {
+ fence = NULL;
+ mutex_lock(&vm->mutex);
+ node = vma_res_itree_iter_first(&vm->pending_unbind, 0,
+ U64_MAX);
+ if (node)
+ fence = dma_fence_get_rcu(&node->unbind_fence);
+ mutex_unlock(&vm->mutex);
+
+ if (fence) {
+ /*
+ * The wait makes sure the node eventually removes
+ * itself from the tree.
+ */
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+ } while (node);
+}
+
+/**
+ * i915_vma_resource_bind_dep_await - Have a struct i915_sw_fence await all
+ * pending unbinds in a certain range of a vm.
+ * @vm: The vm to look at.
+ * @sw_fence: The struct i915_sw_fence that will be awaiting the unbinds.
+ * @offset: The range start.
+ * @size: The range size.
+ * @intr: Whether to wait interrubtible.
+ * @gfp: Allocation mode for memory allocations.
+ *
+ * The function makes @sw_fence await all pending unbinds in a certain
+ * vm range before calling the complete notifier. To be able to await
+ * each individual unbind, the function needs to allocate memory using
+ * the @gpf allocation mode. If that fails, the function will instead
+ * wait for the unbind fence to signal, using @intr to judge whether to
+ * wait interruptible or not. Note that @gfp should ideally be selected so
+ * as to avoid any expensive memory allocation stalls and rather fail and
+ * synchronize itself. For now the vm mutex is required when calling this
+ * function with means that @gfp can't call into direct reclaim. In reality
+ * this means that during heavy memory pressure, we will sync in this
+ * function.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted and @intr==true
+ */
+int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
+ struct i915_sw_fence *sw_fence,
+ u64 offset,
+ u64 size,
+ bool intr,
+ gfp_t gfp)
+{
+ struct i915_vma_resource *node;
+ u64 last = offset + size - 1;
+
+ lockdep_assert_held(&vm->mutex);
+ might_alloc(gfp);
+ might_sleep();
+
+ i915_vma_resource_color_adjust_range(vm, &offset, &last);
+ node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
+ while (node) {
+ int ret;
+
+ ret = i915_sw_fence_await_dma_fence(sw_fence,
+ &node->unbind_fence,
+ 0, gfp);
+ if (ret < 0) {
+ ret = dma_fence_wait(&node->unbind_fence, intr);
+ if (ret)
+ return ret;
+ }
+
+ node = vma_res_itree_iter_next(node, offset, last);
+ }
+
+ return 0;
+}
+
+void i915_vma_resource_module_exit(void)
+{
+ kmem_cache_destroy(slab_vma_resources);
+}
+
+int __init i915_vma_resource_module_init(void)
+{
+ slab_vma_resources = KMEM_CACHE(i915_vma_resource, SLAB_HWCACHE_ALIGN);
+ if (!slab_vma_resources)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
new file mode 100644
index 000000000000..25913913baa6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_VMA_RESOURCE_H__
+#define __I915_VMA_RESOURCE_H__
+
+#include <linux/dma-fence.h>
+#include <linux/refcount.h>
+
+#include "i915_gem.h"
+#include "i915_scatterlist.h"
+#include "i915_sw_fence.h"
+#include "intel_runtime_pm.h"
+
+struct intel_memory_region;
+
+struct i915_page_sizes {
+ /**
+ * The sg mask of the pages sg_table. i.e the mask of
+ * the lengths for each sg entry.
+ */
+ unsigned int phys;
+
+ /**
+ * The gtt page sizes we are allowed to use given the
+ * sg mask and the supported page sizes. This will
+ * express the smallest unit we can use for the whole
+ * object, as well as the larger sizes we may be able
+ * to use opportunistically.
+ */
+ unsigned int sg;
+};
+
+/**
+ * struct i915_vma_resource - Snapshotted unbind information.
+ * @unbind_fence: Fence to mark unbinding complete. Note that this fence
+ * is not considered published until unbind is scheduled, and as such it
+ * is illegal to access this fence before scheduled unbind other than
+ * for refcounting.
+ * @lock: The @unbind_fence lock.
+ * @hold_count: Number of holders blocking the fence from finishing.
+ * The vma itself is keeping a hold, which is released when unbind
+ * is scheduled.
+ * @work: Work struct for deferred unbind work.
+ * @chain: Pointer to struct i915_sw_fence used to await dependencies.
+ * @rb: Rb node for the vm's pending unbind interval tree.
+ * @__subtree_last: Interval tree private member.
+ * @vm: non-refcounted pointer to the vm. This is for internal use only and
+ * this member is cleared after vm_resource unbind.
+ * @mr: The memory region of the object pointed to by the vma.
+ * @ops: Pointer to the backend i915_vma_ops.
+ * @private: Bind backend private info.
+ * @start: Offset into the address space of bind range start.
+ * @node_size: Size of the allocated range manager node.
+ * @vma_size: Bind size.
+ * @page_sizes_gtt: Resulting page sizes from the bind operation.
+ * @bound_flags: Flags indicating binding status.
+ * @allocated: Backend private data. TODO: Should move into @private.
+ * @immediate_unbind: Unbind can be done immediately and doesn't need to be
+ * deferred to a work item awaiting unsignaled fences. This is a hack.
+ * (dma_fence_work uses a fence flag for this, but this seems slightly
+ * cleaner).
+ *
+ * The lifetime of a struct i915_vma_resource is from a binding request to
+ * the actual possible asynchronous unbind has completed.
+ */
+struct i915_vma_resource {
+ struct dma_fence unbind_fence;
+ /* See above for description of the lock. */
+ spinlock_t lock;
+ refcount_t hold_count;
+ struct work_struct work;
+ struct i915_sw_fence chain;
+ struct rb_node rb;
+ u64 __subtree_last;
+ struct i915_address_space *vm;
+ intel_wakeref_t wakeref;
+
+ /**
+ * struct i915_vma_bindinfo - Information needed for async bind
+ * only but that can be dropped after the bind has taken place.
+ * Consider making this a separate argument to the bind_vma
+ * op, coalescing with other arguments like vm, stash, cache_level
+ * and flags
+ * @pages: The pages sg-table.
+ * @page_sizes: Page sizes of the pages.
+ * @pages_rsgt: Refcounted sg-table when delayed object destruction
+ * is supported. May be NULL.
+ * @readonly: Whether the vma should be bound read-only.
+ * @lmem: Whether the vma points to lmem.
+ */
+ struct i915_vma_bindinfo {
+ struct sg_table *pages;
+ struct i915_page_sizes page_sizes;
+ struct i915_refct_sgt *pages_rsgt;
+ bool readonly:1;
+ bool lmem:1;
+ } bi;
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+ struct intel_memory_region *mr;
+#endif
+ const struct i915_vma_ops *ops;
+ void *private;
+ u64 start;
+ u64 node_size;
+ u64 vma_size;
+ u32 page_sizes_gtt;
+
+ u32 bound_flags;
+ bool allocated:1;
+ bool immediate_unbind:1;
+ bool needs_wakeref:1;
+};
+
+bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
+ bool *lockdep_cookie);
+
+void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
+ bool lockdep_cookie);
+
+struct i915_vma_resource *i915_vma_resource_alloc(void);
+
+void i915_vma_resource_free(struct i915_vma_resource *vma_res);
+
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res);
+
+void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
+
+/**
+ * i915_vma_resource_get - Take a reference on a vma resource
+ * @vma_res: The vma resource on which to take a reference.
+ *
+ * Return: The @vma_res pointer
+ */
+static inline struct i915_vma_resource
+*i915_vma_resource_get(struct i915_vma_resource *vma_res)
+{
+ dma_fence_get(&vma_res->unbind_fence);
+ return vma_res;
+}
+
+/**
+ * i915_vma_resource_put - Release a reference to a struct i915_vma_resource
+ * @vma_res: The resource
+ */
+static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
+{
+ dma_fence_put(&vma_res->unbind_fence);
+}
+
+/**
+ * i915_vma_resource_init - Initialize a vma resource.
+ * @vma_res: The vma resource to initialize
+ * @vm: Pointer to the vm.
+ * @pages: The pages sg-table.
+ * @page_sizes: Page sizes of the pages.
+ * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with
+ * delayed destruction.
+ * @readonly: Whether the vma should be bound read-only.
+ * @lmem: Whether the vma points to lmem.
+ * @mr: The memory region of the object the vma points to.
+ * @ops: The backend ops.
+ * @private: Bind backend private info.
+ * @start: Offset into the address space of bind range start.
+ * @node_size: Size of the allocated range manager node.
+ * @size: Bind size.
+ *
+ * Initializes a vma resource allocated using i915_vma_resource_alloc().
+ * The reason for having separate allocate and initialize function is that
+ * initialization may need to be performed from under a lock where
+ * allocation is not allowed.
+ */
+static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
+ struct i915_address_space *vm,
+ struct sg_table *pages,
+ const struct i915_page_sizes *page_sizes,
+ struct i915_refct_sgt *pages_rsgt,
+ bool readonly,
+ bool lmem,
+ struct intel_memory_region *mr,
+ const struct i915_vma_ops *ops,
+ void *private,
+ u64 start,
+ u64 node_size,
+ u64 size)
+{
+ __i915_vma_resource_init(vma_res);
+ vma_res->vm = vm;
+ vma_res->bi.pages = pages;
+ vma_res->bi.page_sizes = *page_sizes;
+ if (pages_rsgt)
+ vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt);
+ vma_res->bi.readonly = readonly;
+ vma_res->bi.lmem = lmem;
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+ vma_res->mr = mr;
+#endif
+ vma_res->ops = ops;
+ vma_res->private = private;
+ vma_res->start = start;
+ vma_res->node_size = node_size;
+ vma_res->vma_size = size;
+}
+
+static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
+{
+ GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1);
+ if (vma_res->bi.pages_rsgt)
+ i915_refct_sgt_put(vma_res->bi.pages_rsgt);
+ i915_sw_fence_fini(&vma_res->chain);
+}
+
+int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
+ u64 first,
+ u64 last,
+ bool intr);
+
+int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
+ struct i915_sw_fence *sw_fence,
+ u64 first,
+ u64 last,
+ bool intr,
+ gfp_t gfp);
+
+void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
+
+void i915_vma_resource_module_exit(void);
+
+int i915_vma_resource_module_init(void);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.c b/drivers/gpu/drm/i915/i915_vma_snapshot.c
deleted file mode 100644
index 2949ceea9884..000000000000
--- a/drivers/gpu/drm/i915/i915_vma_snapshot.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#include "i915_vma_snapshot.h"
-#include "i915_vma_types.h"
-#include "i915_vma.h"
-
-/**
- * i915_vma_snapshot_init - Initialize a struct i915_vma_snapshot from
- * a struct i915_vma.
- * @vsnap: The i915_vma_snapshot to init.
- * @vma: A struct i915_vma used to initialize @vsnap.
- * @name: Name associated with the snapshot. The character pointer needs to
- * stay alive over the lifitime of the shapsot
- */
-void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap,
- struct i915_vma *vma,
- const char *name)
-{
- if (!i915_vma_is_pinned(vma))
- assert_object_held(vma->obj);
-
- vsnap->name = name;
- vsnap->size = vma->size;
- vsnap->obj_size = vma->obj->base.size;
- vsnap->gtt_offset = vma->node.start;
- vsnap->gtt_size = vma->node.size;
- vsnap->page_sizes = vma->page_sizes.gtt;
- vsnap->pages = vma->pages;
- vsnap->pages_rsgt = NULL;
- vsnap->mr = NULL;
- if (vma->obj->mm.rsgt)
- vsnap->pages_rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt);
- vsnap->mr = vma->obj->mm.region;
- kref_init(&vsnap->kref);
- vsnap->vma_resource = &vma->active;
- vsnap->onstack = false;
- vsnap->present = true;
-}
-
-/**
- * i915_vma_snapshot_init_onstack - Initialize a struct i915_vma_snapshot from
- * a struct i915_vma, but avoid kfreeing it on last put.
- * @vsnap: The i915_vma_snapshot to init.
- * @vma: A struct i915_vma used to initialize @vsnap.
- * @name: Name associated with the snapshot. The character pointer needs to
- * stay alive over the lifitime of the shapsot
- */
-void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap,
- struct i915_vma *vma,
- const char *name)
-{
- i915_vma_snapshot_init(vsnap, vma, name);
- vsnap->onstack = true;
-}
-
-static void vma_snapshot_release(struct kref *ref)
-{
- struct i915_vma_snapshot *vsnap =
- container_of(ref, typeof(*vsnap), kref);
-
- vsnap->present = false;
- if (vsnap->pages_rsgt)
- i915_refct_sgt_put(vsnap->pages_rsgt);
- if (!vsnap->onstack)
- kfree(vsnap);
-}
-
-/**
- * i915_vma_snapshot_put - Put an i915_vma_snapshot pointer reference
- * @vsnap: The pointer reference
- */
-void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap)
-{
- kref_put(&vsnap->kref, vma_snapshot_release);
-}
-
-/**
- * i915_vma_snapshot_put_onstack - Put an onstcak i915_vma_snapshot pointer
- * reference and varify that the structure is released
- * @vsnap: The pointer reference
- *
- * This function is intended to be paired with a i915_vma_init_onstack()
- * and should be called before exiting the scope that declared or
- * freeing the structure that embedded @vsnap to verify that all references
- * have been released.
- */
-void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap)
-{
- if (!kref_put(&vsnap->kref, vma_snapshot_release))
- GEM_BUG_ON(1);
-}
-
-/**
- * i915_vma_snapshot_resource_pin - Temporarily block the memory the
- * vma snapshot is pointing to from being released.
- * @vsnap: The vma snapshot.
- * @lockdep_cookie: Pointer to bool needed for lockdep support. This needs
- * to be passed to the paired i915_vma_snapshot_resource_unpin.
- *
- * This function will temporarily try to hold up a fence or similar structure
- * and will therefore enter a fence signaling critical section.
- *
- * Return: true if we succeeded in blocking the memory from being released,
- * false otherwise.
- */
-bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap,
- bool *lockdep_cookie)
-{
- bool pinned = i915_active_acquire_if_busy(vsnap->vma_resource);
-
- if (pinned)
- *lockdep_cookie = dma_fence_begin_signalling();
-
- return pinned;
-}
-
-/**
- * i915_vma_snapshot_resource_unpin - Unblock vma snapshot memory from
- * being released.
- * @vsnap: The vma snapshot.
- * @lockdep_cookie: Cookie returned from matching i915_vma_resource_pin().
- *
- * Might leave a fence signalling critical section and signal a fence.
- */
-void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap,
- bool lockdep_cookie)
-{
- dma_fence_end_signalling(lockdep_cookie);
-
- return i915_active_release(vsnap->vma_resource);
-}
diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.h b/drivers/gpu/drm/i915/i915_vma_snapshot.h
deleted file mode 100644
index 940581df4622..000000000000
--- a/drivers/gpu/drm/i915/i915_vma_snapshot.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-#ifndef _I915_VMA_SNAPSHOT_H_
-#define _I915_VMA_SNAPSHOT_H_
-
-#include <linux/kref.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-struct i915_active;
-struct i915_refct_sgt;
-struct i915_vma;
-struct intel_memory_region;
-struct sg_table;
-
-/**
- * DOC: Simple utilities for snapshotting GPU vma metadata, later used for
- * error capture. Vi use a separate header for this to avoid issues due to
- * recursive header includes.
- */
-
-/**
- * struct i915_vma_snapshot - Snapshot of vma metadata.
- * @size: The vma size in bytes.
- * @obj_size: The size of the underlying object in bytes.
- * @gtt_offset: The gtt offset the vma is bound to.
- * @gtt_size: The size in bytes allocated for the vma in the GTT.
- * @pages: The struct sg_table pointing to the pages bound.
- * @pages_rsgt: The refcounted sg_table holding the reference for @pages if any.
- * @mr: The memory region pointed for the pages bound.
- * @kref: Reference for this structure.
- * @vma_resource: FIXME: A means to keep the unbind fence from signaling.
- * Temporarily while we have only sync unbinds, and still use the vma
- * active, we use that. With async unbinding we need a signaling refcount
- * for the unbind fence.
- * @page_sizes: The vma GTT page sizes information.
- * @onstack: Whether the structure shouldn't be freed on final put.
- * @present: Whether the structure is present and initialized.
- */
-struct i915_vma_snapshot {
- const char *name;
- size_t size;
- size_t obj_size;
- size_t gtt_offset;
- size_t gtt_size;
- struct sg_table *pages;
- struct i915_refct_sgt *pages_rsgt;
- struct intel_memory_region *mr;
- struct kref kref;
- struct i915_active *vma_resource;
- u32 page_sizes;
- bool onstack:1;
- bool present:1;
-};
-
-void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap,
- struct i915_vma *vma,
- const char *name);
-
-void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap,
- struct i915_vma *vma,
- const char *name);
-
-void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap);
-
-void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap);
-
-bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap,
- bool *lockdep_cookie);
-
-void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap,
- bool lockdep_cookie);
-
-/**
- * i915_vma_snapshot_alloc - Allocate a struct i915_vma_snapshot
- * @gfp: Allocation mode.
- *
- * Return: A pointer to a struct i915_vma_snapshot if successful.
- * NULL otherwise.
- */
-static inline struct i915_vma_snapshot *i915_vma_snapshot_alloc(gfp_t gfp)
-{
- return kmalloc(sizeof(struct i915_vma_snapshot), gfp);
-}
-
-/**
- * i915_vma_snapshot_get - Take a reference on a struct i915_vma_snapshot
- *
- * Return: A pointer to a struct i915_vma_snapshot.
- */
-static inline struct i915_vma_snapshot *
-i915_vma_snapshot_get(struct i915_vma_snapshot *vsnap)
-{
- kref_get(&vsnap->kref);
- return vsnap;
-}
-
-/**
- * i915_vma_snapshot_present - Whether a struct i915_vma_snapshot is
- * present and initialized.
- *
- * Return: true if present and initialized; false otherwise.
- */
-static inline bool
-i915_vma_snapshot_present(const struct i915_vma_snapshot *vsnap)
-{
- return vsnap && vsnap->present;
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index ca575e129ced..88370dadca82 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -95,6 +95,8 @@ enum i915_cache_level;
*
*/
+struct i915_vma_resource;
+
struct intel_remapped_plane_info {
/* in gtt pages */
u32 offset:31;
@@ -247,22 +249,20 @@ struct i915_vma {
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
-#define I915_VMA_ALLOC_BIT 12
-
-#define I915_VMA_ERROR_BIT 13
+#define I915_VMA_ERROR_BIT 12
#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
-#define I915_VMA_GGTT_BIT 14
-#define I915_VMA_CAN_FENCE_BIT 15
-#define I915_VMA_USERFAULT_BIT 16
-#define I915_VMA_GGTT_WRITE_BIT 17
+#define I915_VMA_GGTT_BIT 13
+#define I915_VMA_CAN_FENCE_BIT 14
+#define I915_VMA_USERFAULT_BIT 15
+#define I915_VMA_GGTT_WRITE_BIT 16
#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
-#define I915_VMA_SCANOUT_BIT 18
+#define I915_VMA_SCANOUT_BIT 17
#define I915_VMA_SCANOUT ((int)BIT(I915_VMA_SCANOUT_BIT))
struct i915_active active;
@@ -291,6 +291,9 @@ struct i915_vma {
struct list_head evict_link;
struct list_head closed_link;
+
+ /** The async vma resource. Protected by the vm_mutex */
+ struct i915_vma_resource *resource;
};
#endif
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 3699b1c539ea..27dcfe6f2429 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -96,7 +96,7 @@ enum intel_platform {
* it is fine for the same bit to be used on multiple parent platforms.
*/
-#define INTEL_SUBPLATFORM_BITS (2)
+#define INTEL_SUBPLATFORM_BITS (3)
#define INTEL_SUBPLATFORM_MASK (BIT(INTEL_SUBPLATFORM_BITS) - 1)
/* HSW/BDW/SKL/KBL/CFL */
@@ -109,6 +109,7 @@ enum intel_platform {
/* DG2 */
#define INTEL_SUBPLATFORM_G10 0
#define INTEL_SUBPLATFORM_G11 1
+#define INTEL_SUBPLATFORM_G12 2
/* ADL-S */
#define INTEL_SUBPLATFORM_RPL_S 0
@@ -134,6 +135,7 @@ enum intel_ppgtt_type {
func(has_reset_engine); \
func(has_global_mocs); \
func(has_gt_uc); \
+ func(has_guc_deprivilege); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index a4b16b9e2e55..ac1a796b2808 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -122,6 +122,15 @@ static const struct intel_step_info dg2_g11_revid_step_tbl[] = {
[0x5] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 },
};
+static const struct intel_step_info dg2_g12_revid_step_tbl[] = {
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_C0 },
+};
+
+static const struct intel_step_info adls_rpls_revids[] = {
+ [0x4] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_D0 },
+ [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 },
+};
+
void intel_step_init(struct drm_i915_private *i915)
{
const struct intel_step_info *revids = NULL;
@@ -135,12 +144,18 @@ void intel_step_init(struct drm_i915_private *i915)
} else if (IS_DG2_G11(i915)) {
revids = dg2_g11_revid_step_tbl;
size = ARRAY_SIZE(dg2_g11_revid_step_tbl);
+ } else if (IS_DG2_G12(i915)) {
+ revids = dg2_g12_revid_step_tbl;
+ size = ARRAY_SIZE(dg2_g12_revid_step_tbl);
} else if (IS_XEHPSDV(i915)) {
revids = xehpsdv_revids;
size = ARRAY_SIZE(xehpsdv_revids);
} else if (IS_ALDERLAKE_P(i915)) {
revids = adlp_revids;
size = ARRAY_SIZE(adlp_revids);
+ } else if (IS_ADLS_RPLS(i915)) {
+ revids = adls_rpls_revids;
+ size = ARRAY_SIZE(adls_rpls_revids);
} else if (IS_ALDERLAKE_S(i915)) {
revids = adls_revids;
size = ARRAY_SIZE(adls_revids);
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index f06d21005106..322fb9eeb880 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -43,6 +43,7 @@
/* Default WOPCM size is 2MB from Gen11, 1MB on previous platforms */
#define GEN11_WOPCM_SIZE SZ_2M
#define GEN9_WOPCM_SIZE SZ_1M
+#define MAX_WOPCM_SIZE SZ_8M
/* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */
#define WOPCM_RESERVED_SIZE SZ_16K
@@ -207,6 +208,14 @@ static bool __wopcm_regs_locked(struct intel_uncore *uncore,
return true;
}
+static bool __wopcm_regs_writable(struct intel_uncore *uncore)
+{
+ if (!HAS_GUC_DEPRIVILEGE(uncore->i915))
+ return true;
+
+ return intel_uncore_read(uncore, GUC_SHIM_CONTROL2) & GUC_IS_PRIVILEGED;
+}
+
/**
* intel_wopcm_init() - Initialize the WOPCM structure.
* @wopcm: pointer to intel_wopcm.
@@ -224,18 +233,19 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
u32 guc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.guc.fw);
u32 huc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.huc.fw);
u32 ctx_rsvd = context_reserved_size(i915);
+ u32 wopcm_size = wopcm->size;
u32 guc_wopcm_base;
u32 guc_wopcm_size;
if (!guc_fw_size)
return;
- GEM_BUG_ON(!wopcm->size);
+ GEM_BUG_ON(!wopcm_size);
GEM_BUG_ON(wopcm->guc.base);
GEM_BUG_ON(wopcm->guc.size);
- GEM_BUG_ON(guc_fw_size >= wopcm->size);
- GEM_BUG_ON(huc_fw_size >= wopcm->size);
- GEM_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm->size);
+ GEM_BUG_ON(guc_fw_size >= wopcm_size);
+ GEM_BUG_ON(huc_fw_size >= wopcm_size);
+ GEM_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm_size);
if (i915_inject_probe_failure(i915))
return;
@@ -243,6 +253,24 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
drm_dbg(&i915->drm, "GuC WOPCM is already locked [%uK, %uK)\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
+ /*
+ * Note that to keep things simple (i.e. avoid different
+ * defines per platform) our WOPCM math doesn't always use the
+ * actual WOPCM size, but a value that is less or equal to it.
+ * This is perfectly fine when i915 programs the registers, but
+ * on platforms with GuC deprivilege the registers are not
+ * writable from i915 and are instead pre-programmed by the
+ * bios/IFWI, so there might be a mismatch of sizes.
+ * Instead of handling the size difference, we trust that the
+ * programmed values make sense and disable the relevant check
+ * by using the maximum possible WOPCM size in the verification
+ * math. In the extremely unlikely case that the registers
+ * were pre-programmed with an invalid value, we will still
+ * gracefully fail later during the GuC/HuC dma.
+ */
+ if (!__wopcm_regs_writable(gt->uncore))
+ wopcm_size = MAX_WOPCM_SIZE;
+
goto check;
}
@@ -257,17 +285,17 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
* Need to clamp guc_wopcm_base now to make sure the following math is
* correct. Formal check of whole WOPCM layout will be done below.
*/
- guc_wopcm_base = min(guc_wopcm_base, wopcm->size - ctx_rsvd);
+ guc_wopcm_base = min(guc_wopcm_base, wopcm_size - ctx_rsvd);
/* Aligned remainings of usable WOPCM space can be assigned to GuC. */
- guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base;
+ guc_wopcm_size = wopcm_size - ctx_rsvd - guc_wopcm_base;
guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
drm_dbg(&i915->drm, "Calculated GuC WOPCM [%uK, %uK)\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
check:
- if (__check_layout(i915, wopcm->size, guc_wopcm_base, guc_wopcm_size,
+ if (__check_layout(i915, wopcm_size, guc_wopcm_base, guc_wopcm_size,
guc_fw_size, huc_fw_size)) {
wopcm->guc.base = guc_wopcm_base;
wopcm->guc.size = guc_wopcm_size;
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
deleted file mode 100644
index d61ec9c951bf..000000000000
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ /dev/null
@@ -1,787 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#include <linux/prime_numbers.h>
-
-#include "../i915_selftest.h"
-#include "i915_random.h"
-
-static void __igt_dump_block(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block,
- bool buddy)
-{
- pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
- block->header,
- i915_buddy_block_state(block),
- i915_buddy_block_order(block),
- i915_buddy_block_offset(block),
- i915_buddy_block_size(mm, block),
- yesno(!block->parent),
- yesno(buddy));
-}
-
-static void igt_dump_block(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- struct i915_buddy_block *buddy;
-
- __igt_dump_block(mm, block, false);
-
- buddy = get_buddy(block);
- if (buddy)
- __igt_dump_block(mm, buddy, true);
-}
-
-static int igt_check_block(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- struct i915_buddy_block *buddy;
- unsigned int block_state;
- u64 block_size;
- u64 offset;
- int err = 0;
-
- block_state = i915_buddy_block_state(block);
-
- if (block_state != I915_BUDDY_ALLOCATED &&
- block_state != I915_BUDDY_FREE &&
- block_state != I915_BUDDY_SPLIT) {
- pr_err("block state mismatch\n");
- err = -EINVAL;
- }
-
- block_size = i915_buddy_block_size(mm, block);
- offset = i915_buddy_block_offset(block);
-
- if (block_size < mm->chunk_size) {
- pr_err("block size smaller than min size\n");
- err = -EINVAL;
- }
-
- if (!is_power_of_2(block_size)) {
- pr_err("block size not power of two\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(block_size, mm->chunk_size)) {
- pr_err("block size not aligned to min size\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(offset, mm->chunk_size)) {
- pr_err("block offset not aligned to min size\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(offset, block_size)) {
- pr_err("block offset not aligned to block size\n");
- err = -EINVAL;
- }
-
- buddy = get_buddy(block);
-
- if (!buddy && block->parent) {
- pr_err("buddy has gone fishing\n");
- err = -EINVAL;
- }
-
- if (buddy) {
- if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
- pr_err("buddy has wrong offset\n");
- err = -EINVAL;
- }
-
- if (i915_buddy_block_size(mm, buddy) != block_size) {
- pr_err("buddy size mismatch\n");
- err = -EINVAL;
- }
-
- if (i915_buddy_block_state(buddy) == block_state &&
- block_state == I915_BUDDY_FREE) {
- pr_err("block and its buddy are free\n");
- err = -EINVAL;
- }
- }
-
- return err;
-}
-
-static int igt_check_blocks(struct i915_buddy_mm *mm,
- struct list_head *blocks,
- u64 expected_size,
- bool is_contiguous)
-{
- struct i915_buddy_block *block;
- struct i915_buddy_block *prev;
- u64 total;
- int err = 0;
-
- block = NULL;
- prev = NULL;
- total = 0;
-
- list_for_each_entry(block, blocks, link) {
- err = igt_check_block(mm, block);
-
- if (!i915_buddy_block_is_allocated(block)) {
- pr_err("block not allocated\n"),
- err = -EINVAL;
- }
-
- if (is_contiguous && prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = i915_buddy_block_offset(prev);
- prev_block_size = i915_buddy_block_size(mm, prev);
- offset = i915_buddy_block_offset(block);
-
- if (offset != (prev_offset + prev_block_size)) {
- pr_err("block offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- if (err)
- break;
-
- total += i915_buddy_block_size(mm, block);
- prev = block;
- }
-
- if (!err) {
- if (total != expected_size) {
- pr_err("size mismatch, expected=%llx, found=%llx\n",
- expected_size, total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- pr_err("prev block, dump:\n");
- igt_dump_block(mm, prev);
- }
-
- pr_err("bad block, dump:\n");
- igt_dump_block(mm, block);
-
- return err;
-}
-
-static int igt_check_mm(struct i915_buddy_mm *mm)
-{
- struct i915_buddy_block *root;
- struct i915_buddy_block *prev;
- unsigned int i;
- u64 total;
- int err = 0;
-
- if (!mm->n_roots) {
- pr_err("n_roots is zero\n");
- return -EINVAL;
- }
-
- if (mm->n_roots != hweight64(mm->size)) {
- pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
- mm->n_roots, hweight64(mm->size));
- return -EINVAL;
- }
-
- root = NULL;
- prev = NULL;
- total = 0;
-
- for (i = 0; i < mm->n_roots; ++i) {
- struct i915_buddy_block *block;
- unsigned int order;
-
- root = mm->roots[i];
- if (!root) {
- pr_err("root(%u) is NULL\n", i);
- err = -EINVAL;
- break;
- }
-
- err = igt_check_block(mm, root);
-
- if (!i915_buddy_block_is_free(root)) {
- pr_err("root not free\n");
- err = -EINVAL;
- }
-
- order = i915_buddy_block_order(root);
-
- if (!i) {
- if (order != mm->max_order) {
- pr_err("max order root missing\n");
- err = -EINVAL;
- }
- }
-
- if (prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = i915_buddy_block_offset(prev);
- prev_block_size = i915_buddy_block_size(mm, prev);
- offset = i915_buddy_block_offset(root);
-
- if (offset != (prev_offset + prev_block_size)) {
- pr_err("root offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- block = list_first_entry_or_null(&mm->free_list[order],
- struct i915_buddy_block,
- link);
- if (block != root) {
- pr_err("root mismatch at order=%u\n", order);
- err = -EINVAL;
- }
-
- if (err)
- break;
-
- prev = root;
- total += i915_buddy_block_size(mm, root);
- }
-
- if (!err) {
- if (total != mm->size) {
- pr_err("expected mm size=%llx, found=%llx\n", mm->size,
- total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- pr_err("prev root(%u), dump:\n", i - 1);
- igt_dump_block(mm, prev);
- }
-
- if (root) {
- pr_err("bad root(%u), dump:\n", i);
- igt_dump_block(mm, root);
- }
-
- return err;
-}
-
-static void igt_mm_config(u64 *size, u64 *chunk_size)
-{
- I915_RND_STATE(prng);
- u32 s, ms;
-
- /* Nothing fancy, just try to get an interesting bit pattern */
-
- prandom_seed_state(&prng, i915_selftest.random_seed);
-
- /* Let size be a random number of pages up to 8 GB (2M pages) */
- s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
- /* Let the chunk size be a random power of 2 less than size */
- ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
- /* Round size down to the chunk size */
- s &= -ms;
-
- /* Convert from pages to bytes */
- *chunk_size = (u64)ms << 12;
- *size = (u64)s << 12;
-}
-
-static int igt_buddy_alloc_smoke(void *arg)
-{
- struct i915_buddy_mm mm;
- IGT_TIMEOUT(end_time);
- I915_RND_STATE(prng);
- u64 chunk_size;
- u64 mm_size;
- int *order;
- int err, i;
-
- igt_mm_config(&mm_size, &chunk_size);
-
- pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
-
- err = i915_buddy_init(&mm, mm_size, chunk_size);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
-
- order = i915_random_order(mm.max_order + 1, &prng);
- if (!order)
- goto out_fini;
-
- for (i = 0; i <= mm.max_order; ++i) {
- struct i915_buddy_block *block;
- int max_order = order[i];
- bool timeout = false;
- LIST_HEAD(blocks);
- int order;
- u64 total;
-
- err = igt_check_mm(&mm);
- if (err) {
- pr_err("pre-mm check failed, abort\n");
- break;
- }
-
- pr_info("filling from max_order=%u\n", max_order);
-
- order = max_order;
- total = 0;
-
- do {
-retry:
- block = i915_buddy_alloc(&mm, order);
- if (IS_ERR(block)) {
- err = PTR_ERR(block);
- if (err == -ENOMEM) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- } else {
- if (order--) {
- err = 0;
- goto retry;
- }
-
- pr_err("buddy_alloc with order=%d failed(%d)\n",
- order, err);
- }
-
- break;
- }
-
- list_add_tail(&block->link, &blocks);
-
- if (i915_buddy_block_order(block) != order) {
- pr_err("buddy_alloc order mismatch\n");
- err = -EINVAL;
- break;
- }
-
- total += i915_buddy_block_size(&mm, block);
-
- if (__igt_timeout(end_time, NULL)) {
- timeout = true;
- break;
- }
- } while (total < mm.size);
-
- if (!err)
- err = igt_check_blocks(&mm, &blocks, total, false);
-
- i915_buddy_free_list(&mm, &blocks);
-
- if (!err) {
- err = igt_check_mm(&mm);
- if (err)
- pr_err("post-mm check failed\n");
- }
-
- if (err || timeout)
- break;
-
- cond_resched();
- }
-
- if (err == -ENOMEM)
- err = 0;
-
- kfree(order);
-out_fini:
- i915_buddy_fini(&mm);
-
- return err;
-}
-
-static int igt_buddy_alloc_pessimistic(void *arg)
-{
- const unsigned int max_order = 16;
- struct i915_buddy_block *block, *bn;
- struct i915_buddy_mm mm;
- unsigned int order;
- LIST_HEAD(blocks);
- int err;
-
- /*
- * Create a pot-sized mm, then allocate one of each possible
- * order within. This should leave the mm with exactly one
- * page left.
- */
-
- err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
- GEM_BUG_ON(mm.max_order != max_order);
-
- for (order = 0; order < max_order; order++) {
- block = i915_buddy_alloc(&mm, order);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- err = PTR_ERR(block);
- goto err;
- }
-
- list_add_tail(&block->link, &blocks);
- }
-
- /* And now the last remaining block available */
- block = i915_buddy_alloc(&mm, 0);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
- err = PTR_ERR(block);
- goto err;
- }
- list_add_tail(&block->link, &blocks);
-
- /* Should be completely full! */
- for (order = max_order; order--; ) {
- block = i915_buddy_alloc(&mm, order);
- if (!IS_ERR(block)) {
- pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
- order);
- list_add_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
- block = list_last_entry(&blocks, typeof(*block), link);
- list_del(&block->link);
- i915_buddy_free(&mm, block);
-
- /* As we free in increasing size, we make available larger blocks */
- order = 1;
- list_for_each_entry_safe(block, bn, &blocks, link) {
- list_del(&block->link);
- i915_buddy_free(&mm, block);
-
- block = i915_buddy_alloc(&mm, order);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
- order);
- err = PTR_ERR(block);
- goto err;
- }
- i915_buddy_free(&mm, block);
- order++;
- }
-
- /* To confirm, now the whole mm should be available */
- block = i915_buddy_alloc(&mm, max_order);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
- max_order);
- err = PTR_ERR(block);
- goto err;
- }
- i915_buddy_free(&mm, block);
-
-err:
- i915_buddy_free_list(&mm, &blocks);
- i915_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_optimistic(void *arg)
-{
- const int max_order = 16;
- struct i915_buddy_block *block;
- struct i915_buddy_mm mm;
- LIST_HEAD(blocks);
- int order;
- int err;
-
- /*
- * Create a mm with one block of each order available, and
- * try to allocate them all.
- */
-
- err = i915_buddy_init(&mm,
- PAGE_SIZE * ((1 << (max_order + 1)) - 1),
- PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
- GEM_BUG_ON(mm.max_order != max_order);
-
- for (order = 0; order <= max_order; order++) {
- block = i915_buddy_alloc(&mm, order);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- err = PTR_ERR(block);
- goto err;
- }
-
- list_add_tail(&block->link, &blocks);
- }
-
- /* Should be completely full! */
- block = i915_buddy_alloc(&mm, 0);
- if (!IS_ERR(block)) {
- pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
- list_add_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
-
-err:
- i915_buddy_free_list(&mm, &blocks);
- i915_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_pathological(void *arg)
-{
- const int max_order = 16;
- struct i915_buddy_block *block;
- struct i915_buddy_mm mm;
- LIST_HEAD(blocks);
- LIST_HEAD(holes);
- int order, top;
- int err;
-
- /*
- * Create a pot-sized mm, then allocate one of each possible
- * order within. This should leave the mm with exactly one
- * page left. Free the largest block, then whittle down again.
- * Eventually we will have a fully 50% fragmented mm.
- */
-
- err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
- GEM_BUG_ON(mm.max_order != max_order);
-
- for (top = max_order; top; top--) {
- /* Make room by freeing the largest allocated block */
- block = list_first_entry_or_null(&blocks, typeof(*block), link);
- if (block) {
- list_del(&block->link);
- i915_buddy_free(&mm, block);
- }
-
- for (order = top; order--; ) {
- block = i915_buddy_alloc(&mm, order);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
- order, top);
- err = PTR_ERR(block);
- goto err;
- }
- list_add_tail(&block->link, &blocks);
- }
-
- /* There should be one final page for this sub-allocation */
- block = i915_buddy_alloc(&mm, 0);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc hit -ENOMEM for hole\n");
- err = PTR_ERR(block);
- goto err;
- }
- list_add_tail(&block->link, &holes);
-
- block = i915_buddy_alloc(&mm, top);
- if (!IS_ERR(block)) {
- pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
- top, max_order);
- list_add_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
- i915_buddy_free_list(&mm, &holes);
-
- /* Nothing larger than blocks of chunk_size now available */
- for (order = 1; order <= max_order; order++) {
- block = i915_buddy_alloc(&mm, order);
- if (!IS_ERR(block)) {
- pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
- order);
- list_add_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
-err:
- list_splice_tail(&holes, &blocks);
- i915_buddy_free_list(&mm, &blocks);
- i915_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_range(void *arg)
-{
- struct i915_buddy_mm mm;
- unsigned long page_num;
- LIST_HEAD(blocks);
- u64 chunk_size;
- u64 offset;
- u64 size;
- u64 rem;
- int err;
-
- igt_mm_config(&size, &chunk_size);
-
- pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
-
- err = i915_buddy_init(&mm, size, chunk_size);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
-
- err = igt_check_mm(&mm);
- if (err) {
- pr_err("pre-mm check failed, abort, abort, abort!\n");
- goto err_fini;
- }
-
- rem = mm.size;
- offset = 0;
-
- for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
- struct i915_buddy_block *block;
- LIST_HEAD(tmp);
-
- size = min(page_num * mm.chunk_size, rem);
-
- err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
- if (err) {
- if (err == -ENOMEM) {
- pr_info("alloc_range hit -ENOMEM with size=%llx\n",
- size);
- } else {
- pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
- offset, size, err);
- }
-
- break;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct i915_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_range has no blocks\n");
- err = -EINVAL;
- break;
- }
-
- if (i915_buddy_block_offset(block) != offset) {
- pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
- i915_buddy_block_offset(block), offset);
- err = -EINVAL;
- }
-
- if (!err)
- err = igt_check_blocks(&mm, &tmp, size, true);
-
- list_splice_tail(&tmp, &blocks);
-
- if (err)
- break;
-
- offset += size;
-
- rem -= size;
- if (!rem)
- break;
-
- cond_resched();
- }
-
- if (err == -ENOMEM)
- err = 0;
-
- i915_buddy_free_list(&mm, &blocks);
-
- if (!err) {
- err = igt_check_mm(&mm);
- if (err)
- pr_err("post-mm check failed\n");
- }
-
-err_fini:
- i915_buddy_fini(&mm);
-
- return err;
-}
-
-static int igt_buddy_alloc_limit(void *arg)
-{
- struct i915_buddy_block *block;
- struct i915_buddy_mm mm;
- const u64 size = U64_MAX;
- int err;
-
- err = i915_buddy_init(&mm, size, PAGE_SIZE);
- if (err)
- return err;
-
- if (mm.max_order != I915_BUDDY_MAX_ORDER) {
- pr_err("mm.max_order(%d) != %d\n",
- mm.max_order, I915_BUDDY_MAX_ORDER);
- err = -EINVAL;
- goto out_fini;
- }
-
- block = i915_buddy_alloc(&mm, mm.max_order);
- if (IS_ERR(block)) {
- err = PTR_ERR(block);
- goto out_fini;
- }
-
- if (i915_buddy_block_order(block) != mm.max_order) {
- pr_err("block order(%d) != %d\n",
- i915_buddy_block_order(block), mm.max_order);
- err = -EINVAL;
- goto out_free;
- }
-
- if (i915_buddy_block_size(&mm, block) !=
- BIT_ULL(mm.max_order) * PAGE_SIZE) {
- pr_err("block size(%llu) != %llu\n",
- i915_buddy_block_size(&mm, block),
- BIT_ULL(mm.max_order) * PAGE_SIZE);
- err = -EINVAL;
- goto out_free;
- }
-
-out_free:
- i915_buddy_free(&mm, block);
-out_fini:
- i915_buddy_fini(&mm);
- return err;
-}
-
-int i915_buddy_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_buddy_alloc_pessimistic),
- SUBTEST(igt_buddy_alloc_optimistic),
- SUBTEST(igt_buddy_alloc_pathological),
- SUBTEST(igt_buddy_alloc_smoke),
- SUBTEST(igt_buddy_alloc_range),
- SUBTEST(igt_buddy_alloc_limit),
- };
-
- return i915_subtests(tests, NULL);
-}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 6a7328d9c361..e5dd82e7e480 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -42,7 +42,7 @@ static int switch_to_context(struct i915_gem_context *ctx)
static void trash_stolen(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
const u64 slot = ggtt->error_capture.start;
const resource_size_t size = resource_size(&i915->dsm);
unsigned long page;
@@ -100,7 +100,7 @@ static void igt_pm_suspend(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_ggtt_suspend(&i915->ggtt);
+ i915_ggtt_suspend(to_gt(i915)->ggtt);
i915_gem_suspend_late(i915);
}
}
@@ -110,7 +110,7 @@ static void igt_pm_hibernate(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_ggtt_suspend(&i915->ggtt);
+ i915_ggtt_suspend(to_gt(i915)->ggtt);
i915_gem_freeze(i915);
i915_gem_freeze_late(i915);
@@ -126,7 +126,7 @@ static void igt_pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_ggtt_resume(&i915->ggtt);
+ i915_ggtt_resume(to_gt(i915)->ggtt);
i915_gem_resume(i915);
}
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 74a1b2ecf48f..8c6517d29b8e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -118,7 +118,7 @@ static int igt_evict_something(void *arg)
/* Everything is pinned, nothing should happen */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_something(&ggtt->vm,
+ err = i915_gem_evict_something(&ggtt->vm, NULL,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -133,7 +133,7 @@ static int igt_evict_something(void *arg)
/* Everything is unpinned, we should be able to evict something */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_something(&ggtt->vm,
+ err = i915_gem_evict_something(&ggtt->vm, NULL,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -205,7 +205,7 @@ static int igt_evict_for_vma(void *arg)
/* Everything is pinned, nothing should happen */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
@@ -217,7 +217,7 @@ static int igt_evict_for_vma(void *arg)
/* Everything is unpinned, we should be able to evict the node */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
@@ -298,7 +298,7 @@ static int igt_evict_for_cache_color(void *arg)
/* Remove just the second vma */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
@@ -311,7 +311,7 @@ static int igt_evict_for_cache_color(void *arg)
target.color = I915_CACHE_L3_LLC;
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
@@ -332,6 +332,7 @@ static int igt_evict_vm(void *arg)
{
struct intel_gt *gt = arg;
struct i915_ggtt *ggtt = gt->ggtt;
+ struct i915_gem_ww_ctx ww;
LIST_HEAD(objects);
int err;
@@ -343,7 +344,7 @@ static int igt_evict_vm(void *arg)
/* Everything is pinned, nothing should happen */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_vm(&ggtt->vm);
+ err = i915_gem_evict_vm(&ggtt->vm, NULL);
mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
@@ -353,9 +354,12 @@ static int igt_evict_vm(void *arg)
unpin_ggtt(ggtt);
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_vm(&ggtt->vm);
- mutex_unlock(&ggtt->vm.mutex);
+ for_i915_gem_ww(&ww, err, false) {
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_evict_vm(&ggtt->vm, &ww);
+ mutex_unlock(&ggtt->vm.mutex);
+ }
+
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -403,7 +407,7 @@ static int igt_evict_contexts(void *arg)
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_insert(&ggtt->vm, &hole,
+ err = i915_gem_gtt_insert(&ggtt->vm, NULL, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->vm.total,
PIN_NOEVICT);
@@ -423,7 +427,7 @@ static int igt_evict_contexts(void *arg)
goto out_locked;
}
- if (i915_gem_gtt_insert(&ggtt->vm, &r->node,
+ if (i915_gem_gtt_insert(&ggtt->vm, NULL, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->vm.total,
PIN_NOEVICT)) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 1b508c89468c..e7e6c4b2c81d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -33,6 +33,7 @@
#include "i915_random.h"
#include "i915_selftest.h"
+#include "i915_vma_resource.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -239,11 +240,11 @@ static int lowlevel_hole(struct i915_address_space *vm,
unsigned long end_time)
{
I915_RND_STATE(seed_prng);
- struct i915_vma *mock_vma;
+ struct i915_vma_resource *mock_vma_res;
unsigned int size;
- mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
- if (!mock_vma)
+ mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
+ if (!mock_vma_res)
return -ENOMEM;
/* Keep creating larger objects until one cannot fit into the hole */
@@ -269,7 +270,7 @@ static int lowlevel_hole(struct i915_address_space *vm,
break;
} while (count >>= 1);
if (!count) {
- kfree(mock_vma);
+ kfree(mock_vma_res);
return -ENOMEM;
}
GEM_BUG_ON(!order);
@@ -343,12 +344,12 @@ alloc_vm_end:
break;
}
- mock_vma->pages = obj->mm.pages;
- mock_vma->node.size = BIT_ULL(size);
- mock_vma->node.start = addr;
+ mock_vma_res->bi.pages = obj->mm.pages;
+ mock_vma_res->node_size = BIT_ULL(size);
+ mock_vma_res->start = addr;
with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
- vm->insert_entries(vm, mock_vma,
+ vm->insert_entries(vm, mock_vma_res,
I915_CACHE_NONE, 0);
}
count = n;
@@ -371,7 +372,7 @@ alloc_vm_end:
cleanup_freed_objects(vm->i915);
}
- kfree(mock_vma);
+ kfree(mock_vma_res);
return 0;
}
@@ -386,7 +387,7 @@ static void close_object_list(struct list_head *objects,
vma = i915_vma_instance(obj, vm, NULL);
if (!IS_ERR(vma))
- ignored = i915_vma_unbind(vma);
+ ignored = i915_vma_unbind_unlocked(vma);
list_del(&obj->st_link);
i915_gem_object_put(obj);
@@ -497,7 +498,7 @@ static int fill_hole(struct i915_address_space *vm,
goto err;
}
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
__func__, p->name, vma->node.start, vma->node.size,
@@ -570,7 +571,7 @@ static int fill_hole(struct i915_address_space *vm,
goto err;
}
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
__func__, p->name, vma->node.start, vma->node.size,
@@ -656,7 +657,7 @@ static int walk_hole(struct i915_address_space *vm,
goto err_put;
}
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("%s unbind failed at %llx + %llx with err=%d\n",
__func__, addr, vma->size, err);
@@ -733,13 +734,13 @@ static int pot_hole(struct i915_address_space *vm,
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, vma->size);
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
err = -EINVAL;
goto err_obj;
}
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
GEM_BUG_ON(err);
}
@@ -833,13 +834,13 @@ static int drunk_hole(struct i915_address_space *vm,
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, BIT_ULL(size));
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
err = -EINVAL;
goto err_obj;
}
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
GEM_BUG_ON(err);
if (igt_timeout(end_time,
@@ -907,7 +908,7 @@ static int __shrink_hole(struct i915_address_space *vm,
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, size);
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
err = -EINVAL;
break;
}
@@ -1123,7 +1124,7 @@ static int exercise_ggtt(struct drm_i915_private *i915,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
u64 hole_start, hole_end, last = 0;
struct drm_mm_node *node;
IGT_TIMEOUT(end_time);
@@ -1183,7 +1184,7 @@ static int igt_ggtt_page(void *arg)
const unsigned int count = PAGE_SIZE/sizeof(u32);
I915_RND_STATE(prng);
struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
struct drm_mm_node tmp;
@@ -1280,6 +1281,7 @@ static void track_vma_bind(struct i915_vma *vma)
atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
__i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
+ vma->resource->bi.pages = vma->pages;
mutex_lock(&vma->vm->mutex);
list_add_tail(&vma->vm_link, &vma->vm->bound_list);
@@ -1337,6 +1339,33 @@ static int igt_mock_drunk(void *arg)
return exercise_mock(ggtt->vm.i915, drunk_hole);
}
+static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
+{
+ struct i915_address_space *vm = vma->vm;
+ struct i915_vma_resource *vma_res;
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err;
+
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res))
+ return PTR_ERR(vma_res);
+
+ mutex_lock(&vm->mutex);
+ err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
+ offset,
+ obj->cache_level,
+ 0);
+ if (!err) {
+ i915_vma_resource_init_from_vma(vma_res, vma);
+ vma->resource = vma_res;
+ } else {
+ kfree(vma_res);
+ }
+ mutex_unlock(&vm->mutex);
+
+ return err;
+}
+
static int igt_gtt_reserve(void *arg)
{
struct i915_ggtt *ggtt = arg;
@@ -1371,20 +1400,13 @@ static int igt_gtt_reserve(void *arg)
}
list_add(&obj->st_link, &objects);
-
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
- obj->base.size,
- total,
- obj->cache_level,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = reserve_gtt_with_resource(vma, total);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1430,13 +1452,7 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
- obj->base.size,
- total,
- obj->cache_level,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = reserve_gtt_with_resource(vma, total);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1466,7 +1482,7 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("i915_vma_unbind failed with err=%d!\n", err);
goto out;
@@ -1477,13 +1493,7 @@ static int igt_gtt_reserve(void *arg)
2 * I915_GTT_PAGE_SIZE,
I915_GTT_MIN_ALIGNMENT);
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
- obj->base.size,
- offset,
- obj->cache_level,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = reserve_gtt_with_resource(vma, offset);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1510,6 +1520,31 @@ out:
return err;
}
+static int insert_gtt_with_resource(struct i915_vma *vma)
+{
+ struct i915_address_space *vm = vma->vm;
+ struct i915_vma_resource *vma_res;
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err;
+
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res))
+ return PTR_ERR(vma_res);
+
+ mutex_lock(&vm->mutex);
+ err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
+ obj->cache_level, 0, vm->total, 0);
+ if (!err) {
+ i915_vma_resource_init_from_vma(vma_res, vma);
+ vma->resource = vma_res;
+ } else {
+ kfree(vma_res);
+ }
+ mutex_unlock(&vm->mutex);
+
+ return err;
+}
+
static int igt_gtt_insert(void *arg)
{
struct i915_ggtt *ggtt = arg;
@@ -1553,7 +1588,7 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
+ err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
@@ -1594,12 +1629,7 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
- obj->base.size, 0, obj->cache_level,
- 0, ggtt->vm.total,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = insert_gtt_with_resource(vma);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
i915_gem_object_put(obj);
@@ -1648,18 +1678,13 @@ static int igt_gtt_insert(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
offset = vma->node.start;
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("i915_vma_unbind failed with err=%d!\n", err);
goto out;
}
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
- obj->base.size, 0, obj->cache_level,
- 0, ggtt->vm.total,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = insert_gtt_with_resource(vma);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1703,12 +1728,7 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
- obj->base.size, 0, obj->cache_level,
- 0, ggtt->vm.total,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = insert_gtt_with_resource(vma);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1738,26 +1758,28 @@ int i915_gem_gtt_mock_selftests(void)
SUBTEST(igt_gtt_insert),
};
struct drm_i915_private *i915;
- struct i915_ggtt *ggtt;
+ struct intel_gt *gt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
- if (!ggtt) {
- err = -ENOMEM;
+ /* allocate the ggtt */
+ err = intel_gt_assign_ggtt(to_gt(i915));
+ if (err)
goto out_put;
- }
- mock_init_ggtt(i915, ggtt);
- err = i915_subtests(tests, ggtt);
+ gt = to_gt(i915);
+
+ mock_init_ggtt(gt);
+
+ err = i915_subtests(tests, gt->ggtt);
mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(ggtt);
- kfree(ggtt);
+ mock_fini_ggtt(gt->ggtt);
+
out_put:
mock_destroy_device(i915);
return err;
@@ -1940,6 +1962,7 @@ static int igt_cs_tlb(void *arg)
struct i915_vm_pt_stash stash = {};
struct i915_request *rq;
struct i915_gem_ww_ctx ww;
+ struct i915_vma_resource *vma_res;
u64 offset;
offset = igt_random_offset(&prng,
@@ -1960,6 +1983,13 @@ static int igt_cs_tlb(void *arg)
if (err)
goto end;
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res)) {
+ i915_vma_put_pages(vma);
+ err = PTR_ERR(vma_res);
+ goto end;
+ }
+
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_vm_lock_objects(vm, &ww);
@@ -1981,33 +2011,41 @@ end_ww:
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
- if (err)
+ if (err) {
+ kfree(vma_res);
goto end;
+ }
+ i915_vma_resource_init_from_vma(vma_res, vma);
/* Prime the TLB with the dummy pages */
for (i = 0; i < count; i++) {
- vma->node.start = offset + i * PAGE_SIZE;
- vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+ vma_res->start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma_res, I915_CACHE_NONE,
+ 0);
- rq = submit_batch(ce, vma->node.start);
+ rq = submit_batch(ce, vma_res->start);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
+ i915_vma_resource_fini(vma_res);
+ kfree(vma_res);
goto end;
}
i915_request_put(rq);
}
-
+ i915_vma_resource_fini(vma_res);
i915_vma_put_pages(vma);
err = context_sync(ce);
if (err) {
pr_err("%s: dummy setup timed out\n",
ce->engine->name);
+ kfree(vma_res);
goto end;
}
vma = i915_vma_instance(act, vm, NULL);
if (IS_ERR(vma)) {
+ kfree(vma_res);
err = PTR_ERR(vma);
goto end;
}
@@ -2015,19 +2053,22 @@ end_ww:
i915_gem_object_lock(act, NULL);
err = i915_vma_get_pages(vma);
i915_gem_object_unlock(act);
- if (err)
+ if (err) {
+ kfree(vma_res);
goto end;
+ }
+ i915_vma_resource_init_from_vma(vma_res, vma);
/* Replace the TLB with target batches */
for (i = 0; i < count; i++) {
struct i915_request *rq;
u32 *cs = batch + i * 64 / sizeof(*cs);
u64 addr;
- vma->node.start = offset + i * PAGE_SIZE;
- vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+ vma_res->start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma_res, I915_CACHE_NONE, 0);
- addr = vma->node.start + i * 64;
+ addr = vma_res->start + i * 64;
cs[4] = MI_NOOP;
cs[6] = lower_32_bits(addr);
cs[7] = upper_32_bits(addr);
@@ -2036,6 +2077,8 @@ end_ww:
rq = submit_batch(ce, addr);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
+ i915_vma_resource_fini(vma_res);
+ kfree(vma_res);
goto end;
}
@@ -2052,6 +2095,8 @@ end_ww:
}
end_spin(batch, count - 1);
+ i915_vma_resource_fini(vma_res);
+ kfree(vma_res);
i915_vma_put_pages(vma);
err = context_sync(ce);
@@ -2115,7 +2160,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_cs_tlb),
};
- GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
+ GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 793fb28a770d..0c22e0fc9059 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -33,4 +33,3 @@ selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(memory_region, intel_memory_region_mock_selftests)
-selftest(buddy, i915_buddy_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 3e673fd1ea4f..c56a0c2cd2f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -783,6 +783,115 @@ out_spin:
return err;
}
+/*
+ * Test to prove a non-preemptable request can be cancelled and a subsequent
+ * request on the same context can successfully complete after cancellation.
+ *
+ * Testing methodology is to create a non-preemptible request and submit it,
+ * wait for spinner to start, create a NOP request and submit it, cancel the
+ * spinner, wait for spinner to complete and verify it failed with an error,
+ * finally wait for NOP request to complete verify it succeeded without an
+ * error. Preemption timeout also reduced / restored so test runs in a timely
+ * maner.
+ */
+static int __cancel_reset(struct drm_i915_private *i915,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq, *nop;
+ unsigned long preempt_timeout_ms;
+ int err = 0;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT ||
+ !intel_has_reset_engine(engine->gt))
+ return 0;
+
+ preempt_timeout_ms = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 100;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ goto out_restore;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ pr_debug("%s: Cancelling active non-preemptable request\n",
+ engine->name);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ nop = intel_context_create_request(ce);
+ if (IS_ERR(nop))
+ goto out_rq;
+ i915_request_get(nop);
+ i915_request_add(nop);
+
+ i915_request_cancel(rq, -EINTR);
+
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to cancel hung request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_nop;
+ }
+
+ if (rq->fence.error != -EINTR) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ goto out_nop;
+ }
+
+ if (i915_request_wait(nop, 0, HZ) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to complete nop request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_nop;
+ }
+
+ if (nop->fence.error != 0) {
+ pr_err("%s: Nop request errored (%u)\n",
+ engine->name, nop->fence.error);
+ err = -EINVAL;
+ }
+
+out_nop:
+ i915_request_put(nop);
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+out_restore:
+ engine->props.preempt_timeout_ms = preempt_timeout_ms;
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
static int live_cancel_request(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -815,6 +924,14 @@ static int live_cancel_request(void *arg)
return err;
if (err2)
return err2;
+
+ /* Expects reset so call outside of igt_live_test_* */
+ err = __cancel_reset(i915, engine);
+ if (err)
+ return err;
+
+ if (igt_flush_test(i915))
+ return -EIO;
}
return 0;
@@ -844,7 +961,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
intel_gt_chipset_flush(to_gt(i915));
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index cc6f9af679fb..6921ba128015 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -341,7 +341,7 @@ static int igt_vma_pin1(void *arg)
if (!err) {
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("Failed to unbind single page from GGTT, err=%d\n", err);
goto out;
@@ -692,7 +692,7 @@ static int igt_vma_rotate_remap(void *arg)
}
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("Unbinding returned %i\n", err);
goto out_object;
@@ -853,7 +853,7 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
nvma++;
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("Unbinding returned %i\n", err);
goto out_object;
@@ -892,7 +892,7 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("Unbinding returned %i\n", err);
goto out_object;
@@ -923,26 +923,28 @@ int i915_vma_mock_selftests(void)
SUBTEST(igt_vma_partial),
};
struct drm_i915_private *i915;
- struct i915_ggtt *ggtt;
+ struct intel_gt *gt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
- if (!ggtt) {
- err = -ENOMEM;
+ /* allocate the ggtt */
+ err = intel_gt_assign_ggtt(to_gt(i915));
+ if (err)
goto out_put;
- }
- mock_init_ggtt(i915, ggtt);
- err = i915_subtests(tests, ggtt);
+ gt = to_gt(i915);
+
+ mock_init_ggtt(gt);
+
+ err = i915_subtests(tests, gt->ggtt);
mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(ggtt);
- kfree(ggtt);
+ mock_fini_ggtt(gt->ggtt);
+
out_put:
mock_destroy_device(i915);
return err;
@@ -983,7 +985,7 @@ static int igt_vma_remapped_gtt(void *arg)
intel_wakeref_t wakeref;
int err = 0;
- if (!i915_ggtt_has_aperture(&i915->ggtt))
+ if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return 0;
obj = i915_gem_object_create_internal(i915, 10 * 10 * PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index b84594601d30..b484e12df417 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -19,7 +19,7 @@ int igt_flush_test(struct drm_i915_private *i915)
cond_resched();
- if (intel_gt_wait_for_idle(gt, HZ) == -ETIME) {
+ if (intel_gt_wait_for_idle(gt, HZ * 3) == -ETIME) {
pr_err("%pS timed out, cancelling all further testing.\n",
__builtin_return_address(0));
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 8255561ff853..7acba1d2135e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -6,6 +6,8 @@
#include <linux/prime_numbers.h>
#include <linux/sort.h>
+#include <drm/drm_buddy.h>
+
#include "../i915_selftest.h"
#include "mock_drm.h"
@@ -20,7 +22,6 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
-#include "i915_buddy.h"
#include "gt/intel_migrate.h"
#include "i915_memcpy.h"
#include "i915_ttm_buddy_manager.h"
@@ -369,7 +370,7 @@ static int igt_mock_splintered_region(void *arg)
struct drm_i915_private *i915 = mem->i915;
struct i915_ttm_buddy_resource *res;
struct drm_i915_gem_object *obj;
- struct i915_buddy_mm *mm;
+ struct drm_buddy *mm;
unsigned int expected_order;
LIST_HEAD(objects);
u64 size;
@@ -454,8 +455,8 @@ static int igt_mock_max_segment(void *arg)
struct drm_i915_private *i915 = mem->i915;
struct i915_ttm_buddy_resource *res;
struct drm_i915_gem_object *obj;
- struct i915_buddy_block *block;
- struct i915_buddy_mm *mm;
+ struct drm_buddy_block *block;
+ struct drm_buddy *mm;
struct list_head *blocks;
struct scatterlist *sg;
LIST_HEAD(objects);
@@ -485,8 +486,8 @@ static int igt_mock_max_segment(void *arg)
mm = res->mm;
size = 0;
list_for_each_entry(block, blocks, link) {
- if (i915_buddy_block_size(mm, block) > size)
- size = i915_buddy_block_size(mm, block);
+ if (drm_buddy_block_size(mm, block) > size)
+ size = drm_buddy_block_size(mm, block);
}
if (size < max_segment) {
pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 79e819334b4e..573d9b2e1a4a 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -69,7 +69,7 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(&i915->ggtt);
+ mock_fini_ggtt(to_gt(i915)->ggtt);
destroy_workqueue(i915->wq);
intel_region_ttm_device_fini(i915);
@@ -196,8 +196,13 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
- mock_init_ggtt(i915, &i915->ggtt);
- to_gt(i915)->vm = i915_vm_get(&i915->ggtt.vm);
+ /* allocate the ggtt */
+ ret = intel_gt_assign_ggtt(to_gt(i915));
+ if (ret)
+ goto err_unlock;
+
+ mock_init_ggtt(to_gt(i915));
+ to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
to_gt(i915)->info.engine_mask = BIT(0);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 1802baf80a17..568840e7ca66 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -33,23 +33,23 @@ static void mock_insert_page(struct i915_address_space *vm,
}
static void mock_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level level, u32 flags)
{
}
static void mock_bind_ppgtt(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
- set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma));
+ vma_res->bound_flags |= flags;
}
static void mock_unbind_ppgtt(struct i915_address_space *vm,
- struct i915_vma *vma)
+ struct i915_vma_resource *vma_res)
{
}
@@ -93,23 +93,23 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
static void mock_bind_ggtt(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
}
static void mock_unbind_ggtt(struct i915_address_space *vm,
- struct i915_vma *vma)
+ struct i915_vma_resource *vma_res)
{
}
-void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
+void mock_init_ggtt(struct intel_gt *gt)
{
- memset(ggtt, 0, sizeof(*ggtt));
+ struct i915_ggtt *ggtt = gt->ggtt;
- ggtt->vm.gt = to_gt(i915);
- ggtt->vm.i915 = i915;
+ ggtt->vm.gt = gt;
+ ggtt->vm.i915 = gt->i915;
ggtt->vm.is_ggtt = true;
ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
@@ -128,7 +128,6 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
- to_gt(i915)->ggtt = ggtt;
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index e3f224f43beb..d6eb90bd7f3f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -27,8 +27,9 @@
struct drm_i915_private;
struct i915_ggtt;
+struct intel_gt;
-void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
+void mock_init_ggtt(struct intel_gt *gt);
void mock_fini_ggtt(struct i915_ggtt *ggtt);
struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index 8dc2f85c514b..24147ee7080e 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include "dcss-dev.h"
@@ -131,7 +132,7 @@ static struct platform_driver dcss_platform_driver = {
},
};
-module_platform_driver(dcss_platform_driver);
+drm_module_platform_driver(dcss_platform_driver);
MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@nxp.com>");
MODULE_DESCRIPTION("DCSS driver for i.MX8MQ");
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index b4943a56be09..542c4af70661 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -6,6 +6,7 @@
#include "ingenic-drm.h"
+#include <linux/bitfield.h>
#include <linux/component.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
@@ -49,6 +50,11 @@ struct ingenic_dma_hwdesc {
u32 addr;
u32 id;
u32 cmd;
+ /* extended hw descriptor for jz4780 */
+ u32 offsize;
+ u32 pagewidth;
+ u32 cpos;
+ u32 dessize;
} __aligned(16);
struct ingenic_dma_hwdescs {
@@ -60,6 +66,7 @@ struct jz_soc_info {
bool needs_dev_clk;
bool has_osd;
bool map_noncoherent;
+ bool use_extended_hwdesc;
unsigned int max_width, max_height;
const u32 *formats_f0, *formats_f1;
unsigned int num_formats_f0, num_formats_f1;
@@ -173,7 +180,6 @@ static const struct regmap_config ingenic_drm_regmap_config = {
.val_bits = 32,
.reg_stride = 4,
- .max_register = JZ_REG_LCD_SIZE1,
.writeable_reg = ingenic_drm_writeable_reg,
};
@@ -447,6 +453,9 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
+ if (plane == &priv->f0)
+ return -EINVAL;
+
crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
if (WARN_ON(!crtc_state))
@@ -663,6 +672,33 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4);
hwdesc->next = dma_hwdesc_addr(priv, next_id);
+ if (priv->soc_info->use_extended_hwdesc) {
+ hwdesc->cmd |= JZ_LCD_CMD_FRM_ENABLE;
+
+ /* Extended 8-byte descriptor */
+ hwdesc->cpos = 0;
+ hwdesc->offsize = 0;
+ hwdesc->pagewidth = 0;
+
+ switch (newstate->fb->format->format) {
+ case DRM_FORMAT_XRGB1555:
+ hwdesc->cpos |= JZ_LCD_CPOS_RGB555;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ hwdesc->cpos |= JZ_LCD_CPOS_BPP_15_16;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ hwdesc->cpos |= JZ_LCD_CPOS_BPP_18_24;
+ break;
+ }
+ hwdesc->cpos |= (JZ_LCD_CPOS_COEFFICIENT_1 <<
+ JZ_LCD_CPOS_COEFFICIENT_OFFSET);
+ hwdesc->dessize =
+ (0xff << JZ_LCD_DESSIZE_ALPHA_OFFSET) |
+ FIELD_PREP(JZ_LCD_DESSIZE_HEIGHT_MASK, height - 1) |
+ FIELD_PREP(JZ_LCD_DESSIZE_WIDTH_MASK, width - 1);
+ }
+
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
fourcc = newstate->fb->format->format;
@@ -694,6 +730,9 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
| JZ_LCD_CFG_SPL_DISABLE | JZ_LCD_CFG_REV_DISABLE;
}
+ if (priv->soc_info->use_extended_hwdesc)
+ cfg |= JZ_LCD_CFG_DESCRIPTOR_8;
+
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
@@ -1011,6 +1050,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
struct ingenic_drm_bridge *ib;
struct drm_device *drm;
void __iomem *base;
+ struct resource *res;
+ struct regmap_config regmap_config;
long parent_rate;
unsigned int i, clone_mask = 0;
int ret, irq;
@@ -1056,14 +1097,16 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
drm->mode_config.funcs = &ingenic_drm_mode_config_funcs;
drm->mode_config.helper_private = &ingenic_drm_mode_config_helpers;
- base = devm_platform_ioremap_resource(pdev, 0);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
dev_err(dev, "Failed to get memory resource\n");
return PTR_ERR(base);
}
+ regmap_config = ingenic_drm_regmap_config;
+ regmap_config.max_register = res->end - res->start;
priv->map = devm_regmap_init_mmio(dev, base,
- &ingenic_drm_regmap_config);
+ &regmap_config);
if (IS_ERR(priv->map)) {
dev_err(dev, "Failed to create regmap\n");
return PTR_ERR(priv->map);
@@ -1465,10 +1508,23 @@ static const struct jz_soc_info jz4770_soc_info = {
.num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
};
+static const struct jz_soc_info jz4780_soc_info = {
+ .needs_dev_clk = true,
+ .has_osd = true,
+ .use_extended_hwdesc = true,
+ .max_width = 4096,
+ .max_height = 2048,
+ .formats_f1 = jz4770_formats_f1,
+ .num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
+ .formats_f0 = jz4770_formats_f0,
+ .num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
+};
+
static const struct of_device_id ingenic_drm_of_match[] = {
{ .compatible = "ingenic,jz4740-lcd", .data = &jz4740_soc_info },
{ .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
{ .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
+ { .compatible = "ingenic,jz4780-lcd", .data = &jz4780_soc_info },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h
index 22654ac1dde1..cb1d09b62588 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.h
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.h
@@ -44,8 +44,11 @@
#define JZ_REG_LCD_XYP1 0x124
#define JZ_REG_LCD_SIZE0 0x128
#define JZ_REG_LCD_SIZE1 0x12c
+#define JZ_REG_LCD_PCFG 0x2c0
#define JZ_LCD_CFG_SLCD BIT(31)
+#define JZ_LCD_CFG_DESCRIPTOR_8 BIT(28)
+#define JZ_LCD_CFG_RECOVER_FIFO_UNDERRUN BIT(25)
#define JZ_LCD_CFG_PS_DISABLE BIT(23)
#define JZ_LCD_CFG_CLS_DISABLE BIT(22)
#define JZ_LCD_CFG_SPL_DISABLE BIT(21)
@@ -63,6 +66,7 @@
#define JZ_LCD_CFG_DE_ACTIVE_LOW BIT(9)
#define JZ_LCD_CFG_VSYNC_ACTIVE_LOW BIT(8)
#define JZ_LCD_CFG_18_BIT BIT(7)
+#define JZ_LCD_CFG_24_BIT BIT(6)
#define JZ_LCD_CFG_PDW (BIT(5) | BIT(4))
#define JZ_LCD_CFG_MODE_GENERIC_16BIT 0
@@ -132,6 +136,7 @@
#define JZ_LCD_CMD_SOF_IRQ BIT(31)
#define JZ_LCD_CMD_EOF_IRQ BIT(30)
#define JZ_LCD_CMD_ENABLE_PAL BIT(28)
+#define JZ_LCD_CMD_FRM_ENABLE BIT(26)
#define JZ_LCD_SYNC_MASK 0x3ff
@@ -153,6 +158,7 @@
#define JZ_LCD_RGBC_EVEN_BGR (0x5 << 0)
#define JZ_LCD_OSDC_OSDEN BIT(0)
+#define JZ_LCD_OSDC_ALPHAEN BIT(2)
#define JZ_LCD_OSDC_F0EN BIT(3)
#define JZ_LCD_OSDC_F1EN BIT(4)
@@ -176,6 +182,38 @@
#define JZ_LCD_SIZE01_WIDTH_LSB 0
#define JZ_LCD_SIZE01_HEIGHT_LSB 16
+#define JZ_LCD_DESSIZE_ALPHA_OFFSET 24
+#define JZ_LCD_DESSIZE_HEIGHT_MASK GENMASK(23, 12)
+#define JZ_LCD_DESSIZE_WIDTH_MASK GENMASK(11, 0)
+
+#define JZ_LCD_CPOS_BPP_15_16 (4 << 27)
+#define JZ_LCD_CPOS_BPP_18_24 (5 << 27)
+#define JZ_LCD_CPOS_BPP_30 (7 << 27)
+#define JZ_LCD_CPOS_RGB555 BIT(30)
+#define JZ_LCD_CPOS_PREMULTIPLY_LCD BIT(26)
+#define JZ_LCD_CPOS_COEFFICIENT_OFFSET 24
+#define JZ_LCD_CPOS_COEFFICIENT_0 0
+#define JZ_LCD_CPOS_COEFFICIENT_1 1
+#define JZ_LCD_CPOS_COEFFICIENT_ALPHA1 2
+#define JZ_LCD_CPOS_COEFFICIENT_1_ALPHA1 3
+
+#define JZ_LCD_RGBC_RGB_PADDING BIT(15)
+#define JZ_LCD_RGBC_RGB_PADDING_FIRST BIT(14)
+#define JZ_LCD_RGBC_422 BIT(8)
+#define JZ_LCD_RGBC_RGB_FORMAT_ENABLE BIT(7)
+
+#define JZ_LCD_PCFG_PRI_MODE BIT(31)
+#define JZ_LCD_PCFG_HP_BST_4 (0 << 28)
+#define JZ_LCD_PCFG_HP_BST_8 (1 << 28)
+#define JZ_LCD_PCFG_HP_BST_16 (2 << 28)
+#define JZ_LCD_PCFG_HP_BST_32 (3 << 28)
+#define JZ_LCD_PCFG_HP_BST_64 (4 << 28)
+#define JZ_LCD_PCFG_HP_BST_16_CONT (5 << 28)
+#define JZ_LCD_PCFG_HP_BST_DISABLE (7 << 28)
+#define JZ_LCD_PCFG_THRESHOLD2_OFFSET 18
+#define JZ_LCD_PCFG_THRESHOLD1_OFFSET 9
+#define JZ_LCD_PCFG_THRESHOLD0_OFFSET 0
+
struct device;
struct drm_plane;
struct drm_plane_state;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 80f1d439841a..26aeaf0ab86e 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -302,42 +302,42 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (priv->afbcd.ops) {
ret = priv->afbcd.ops->init(priv);
if (ret)
- return ret;
+ goto free_drm;
}
/* Encoder Initialization */
ret = meson_encoder_cvbs_init(priv);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
if (has_components) {
ret = component_bind_all(drm->dev, drm);
if (ret) {
dev_err(drm->dev, "Couldn't bind all components\n");
- goto free_drm;
+ goto exit_afbcd;
}
}
ret = meson_encoder_hdmi_init(priv);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
ret = meson_plane_create(priv);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
ret = meson_overlay_create(priv);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
ret = meson_crtc_create(priv);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
if (ret)
- goto free_drm;
+ goto exit_afbcd;
drm_mode_config_reset(drm);
@@ -355,6 +355,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
uninstall_irq:
free_irq(priv->vsync_irq, drm);
+exit_afbcd:
+ if (priv->afbcd.ops)
+ priv->afbcd.ops->exit(priv);
free_drm:
drm_dev_put(drm);
@@ -385,10 +388,8 @@ static void meson_drv_unbind(struct device *dev)
free_irq(priv->vsync_irq, drm);
drm_dev_put(drm);
- if (priv->afbcd.ops) {
- priv->afbcd.ops->reset(priv);
- meson_rdma_free(priv);
- }
+ if (priv->afbcd.ops)
+ priv->afbcd.ops->exit(priv);
}
static const struct component_master_ops meson_drv_master_ops = {
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c
index ffc6b584dbf8..0cdbe899402f 100644
--- a/drivers/gpu/drm/meson/meson_osd_afbcd.c
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c
@@ -79,11 +79,6 @@ static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format)
return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0;
}
-static int meson_gxm_afbcd_init(struct meson_drm *priv)
-{
- return 0;
-}
-
static int meson_gxm_afbcd_reset(struct meson_drm *priv)
{
writel_relaxed(VIU_SW_RESET_OSD1_AFBCD,
@@ -93,6 +88,16 @@ static int meson_gxm_afbcd_reset(struct meson_drm *priv)
return 0;
}
+static int meson_gxm_afbcd_init(struct meson_drm *priv)
+{
+ return 0;
+}
+
+static void meson_gxm_afbcd_exit(struct meson_drm *priv)
+{
+ meson_gxm_afbcd_reset(priv);
+}
+
static int meson_gxm_afbcd_enable(struct meson_drm *priv)
{
writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) |
@@ -172,6 +177,7 @@ static int meson_gxm_afbcd_setup(struct meson_drm *priv)
struct meson_afbcd_ops meson_afbcd_gxm_ops = {
.init = meson_gxm_afbcd_init,
+ .exit = meson_gxm_afbcd_exit,
.reset = meson_gxm_afbcd_reset,
.enable = meson_gxm_afbcd_enable,
.disable = meson_gxm_afbcd_disable,
@@ -269,6 +275,18 @@ static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format)
return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0;
}
+static int meson_g12a_afbcd_reset(struct meson_drm *priv)
+{
+ meson_rdma_reset(priv);
+
+ meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
+ VIU_SW_RESET_G12A_OSD1_AFBCD,
+ VIU_SW_RESET);
+ meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
+
+ return 0;
+}
+
static int meson_g12a_afbcd_init(struct meson_drm *priv)
{
int ret;
@@ -286,16 +304,10 @@ static int meson_g12a_afbcd_init(struct meson_drm *priv)
return 0;
}
-static int meson_g12a_afbcd_reset(struct meson_drm *priv)
+static void meson_g12a_afbcd_exit(struct meson_drm *priv)
{
- meson_rdma_reset(priv);
-
- meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
- VIU_SW_RESET_G12A_OSD1_AFBCD,
- VIU_SW_RESET);
- meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
-
- return 0;
+ meson_g12a_afbcd_reset(priv);
+ meson_rdma_free(priv);
}
static int meson_g12a_afbcd_enable(struct meson_drm *priv)
@@ -380,6 +392,7 @@ static int meson_g12a_afbcd_setup(struct meson_drm *priv)
struct meson_afbcd_ops meson_afbcd_g12a_ops = {
.init = meson_g12a_afbcd_init,
+ .exit = meson_g12a_afbcd_exit,
.reset = meson_g12a_afbcd_reset,
.enable = meson_g12a_afbcd_enable,
.disable = meson_g12a_afbcd_disable,
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h
index 5e5523304f42..e77ddeb6416f 100644
--- a/drivers/gpu/drm/meson/meson_osd_afbcd.h
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h
@@ -14,6 +14,7 @@
struct meson_afbcd_ops {
int (*init)(struct meson_drm *priv);
+ void (*exit)(struct meson_drm *priv);
int (*reset)(struct meson_drm *priv);
int (*enable)(struct meson_drm *priv);
int (*disable)(struct meson_drm *priv);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 9c817b33c398..6e18d3bbd720 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -529,7 +529,10 @@ static void mgag200_set_format_regs(struct mga_device *mdev,
WREG_GFX(3, 0x00);
WREG_GFX(4, 0x00);
WREG_GFX(5, 0x40);
- WREG_GFX(6, 0x05);
+ /* GCTL6 should be 0x05, but we configure memmapsl to 0xb8000 (text mode),
+ * so that it doesn't hang when running kexec/kdump on G200_SE rev42.
+ */
+ WREG_GFX(6, 0x0d);
WREG_GFX(7, 0x0f);
WREG_GFX(8, 0x0f);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 1eae5a9645f4..4fd931413705 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -12,6 +12,7 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_BRIDGE
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index d7e4a39a904e..4553f4985434 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -8,7 +8,7 @@
#include <linux/of_platform.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include "dp_catalog.h"
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 0728cc09c9ec..82afc8d5210f 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -7,7 +7,7 @@
#define _DP_AUX_H_
#include "dp_catalog.h"
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
int dp_aux_register(struct drm_dp_aux *dp_aux);
void dp_aux_unregister(struct drm_dp_aux *dp_aux);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 6ae9b29044b6..8a6d3ead3440 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -10,7 +10,7 @@
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/rational.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_print.h>
#include "dp_catalog.h"
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index c724cb0bde9d..88ca6c3aedd3 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -12,7 +12,7 @@
#include <linux/phy/phy-dp.h>
#include <linux/pm_opp.h>
#include <drm/drm_fixed.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_print.h>
#include "dp_reg.h"
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
new file mode 100644
index 000000000000..1a82d7a4af9f
--- /dev/null
+++ b/drivers/gpu/drm/msm/edp/edp.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __EDP_CONNECTOR_H__
+#define __EDP_CONNECTOR_H__
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <drm/dp/drm_dp_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+
+#define edp_read(offset) msm_readl((offset))
+#define edp_write(offset, data) msm_writel((data), (offset))
+
+struct edp_ctrl;
+struct edp_aux;
+struct edp_phy;
+
+struct msm_edp {
+ struct drm_device *dev;
+ struct platform_device *pdev;
+
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+
+ /* the encoder we are hooked to (outside of eDP block) */
+ struct drm_encoder *encoder;
+
+ struct edp_ctrl *ctrl;
+
+ int irq;
+};
+
+/* eDP bridge */
+struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp);
+void edp_bridge_destroy(struct drm_bridge *bridge);
+
+/* eDP connector */
+struct drm_connector *msm_edp_connector_init(struct msm_edp *edp);
+
+/* AUX */
+void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux);
+void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux);
+irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr);
+void msm_edp_aux_ctrl(struct edp_aux *aux, int enable);
+
+/* Phy */
+bool msm_edp_phy_ready(struct edp_phy *phy);
+void msm_edp_phy_ctrl(struct edp_phy *phy, int enable);
+void msm_edp_phy_vm_pe_init(struct edp_phy *phy);
+void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1);
+void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane);
+void *msm_edp_phy_init(struct device *dev, void __iomem *regbase);
+
+/* Ctrl */
+irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl);
+void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on);
+int msm_edp_ctrl_init(struct msm_edp *edp);
+void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl);
+bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl);
+int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
+ struct drm_connector *connector, struct edid **edid);
+int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info);
+/* @pixel_rate is in kHz */
+bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
+ u32 pixel_rate, u32 *pm, u32 *pn);
+
+#endif /* __EDP_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
new file mode 100644
index 000000000000..9f537b1fd849
--- /dev/null
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -0,0 +1,1373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <drm/dp/drm_dp_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "edp.h"
+#include "edp.xml.h"
+
+#define VDDA_UA_ON_LOAD 100000 /* uA units */
+#define VDDA_UA_OFF_LOAD 100 /* uA units */
+
+#define DPCD_LINK_VOLTAGE_MAX 4
+#define DPCD_LINK_PRE_EMPHASIS_MAX 4
+
+#define EDP_LINK_BW_MAX DP_LINK_BW_2_7
+
+/* Link training return value */
+#define EDP_TRAIN_FAIL -1
+#define EDP_TRAIN_SUCCESS 0
+#define EDP_TRAIN_RECONFIG 1
+
+#define EDP_CLK_MASK_AHB BIT(0)
+#define EDP_CLK_MASK_AUX BIT(1)
+#define EDP_CLK_MASK_LINK BIT(2)
+#define EDP_CLK_MASK_PIXEL BIT(3)
+#define EDP_CLK_MASK_MDP_CORE BIT(4)
+#define EDP_CLK_MASK_LINK_CHAN (EDP_CLK_MASK_LINK | EDP_CLK_MASK_PIXEL)
+#define EDP_CLK_MASK_AUX_CHAN \
+ (EDP_CLK_MASK_AHB | EDP_CLK_MASK_AUX | EDP_CLK_MASK_MDP_CORE)
+#define EDP_CLK_MASK_ALL (EDP_CLK_MASK_AUX_CHAN | EDP_CLK_MASK_LINK_CHAN)
+
+#define EDP_BACKLIGHT_MAX 255
+
+#define EDP_INTR_STATUS1 \
+ (EDP_INTERRUPT_REG_1_HPD | EDP_INTERRUPT_REG_1_AUX_I2C_DONE | \
+ EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \
+ EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \
+ EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER | \
+ EDP_INTERRUPT_REG_1_PLL_UNLOCK | EDP_INTERRUPT_REG_1_AUX_ERROR)
+#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2)
+#define EDP_INTR_STATUS2 \
+ (EDP_INTERRUPT_REG_2_READY_FOR_VIDEO | \
+ EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT | \
+ EDP_INTERRUPT_REG_2_FRAME_END | EDP_INTERRUPT_REG_2_CRC_UPDATED)
+#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2)
+
+struct edp_ctrl {
+ struct platform_device *pdev;
+
+ void __iomem *base;
+
+ /* regulators */
+ struct regulator *vdda_vreg; /* 1.8 V */
+ struct regulator *lvl_vreg;
+
+ /* clocks */
+ struct clk *aux_clk;
+ struct clk *pixel_clk;
+ struct clk *ahb_clk;
+ struct clk *link_clk;
+ struct clk *mdp_core_clk;
+
+ /* gpios */
+ struct gpio_desc *panel_en_gpio;
+ struct gpio_desc *panel_hpd_gpio;
+
+ /* completion and mutex */
+ struct completion idle_comp;
+ struct mutex dev_mutex; /* To protect device power status */
+
+ /* work queue */
+ struct work_struct on_work;
+ struct work_struct off_work;
+ struct workqueue_struct *workqueue;
+
+ /* Interrupt register lock */
+ spinlock_t irq_lock;
+
+ bool edp_connected;
+ bool power_on;
+
+ /* edid raw data */
+ struct edid *edid;
+
+ struct drm_dp_aux *drm_aux;
+
+ /* dpcd raw data */
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ /* Link status */
+ u8 link_rate;
+ u8 lane_cnt;
+ u8 v_level;
+ u8 p_level;
+
+ /* Timing status */
+ u8 interlaced;
+ u32 pixel_rate; /* in kHz */
+ u32 color_depth;
+
+ struct edp_aux *aux;
+ struct edp_phy *phy;
+};
+
+struct edp_pixel_clk_div {
+ u32 rate; /* in kHz */
+ u32 m;
+ u32 n;
+};
+
+#define EDP_PIXEL_CLK_NUM 8
+static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = {
+ { /* Link clock = 162MHz, source clock = 810MHz */
+ {119000, 31, 211}, /* WSXGA+ 1680x1050@60Hz CVT */
+ {130250, 32, 199}, /* UXGA 1600x1200@60Hz CVT */
+ {148500, 11, 60}, /* FHD 1920x1080@60Hz */
+ {154000, 50, 263}, /* WUXGA 1920x1200@60Hz CVT */
+ {209250, 31, 120}, /* QXGA 2048x1536@60Hz CVT */
+ {268500, 119, 359}, /* WQXGA 2560x1600@60Hz CVT */
+ {138530, 33, 193}, /* AUO B116HAN03.0 Panel */
+ {141400, 48, 275}, /* AUO B133HTN01.2 Panel */
+ },
+ { /* Link clock = 270MHz, source clock = 675MHz */
+ {119000, 52, 295}, /* WSXGA+ 1680x1050@60Hz CVT */
+ {130250, 11, 57}, /* UXGA 1600x1200@60Hz CVT */
+ {148500, 11, 50}, /* FHD 1920x1080@60Hz */
+ {154000, 47, 206}, /* WUXGA 1920x1200@60Hz CVT */
+ {209250, 31, 100}, /* QXGA 2048x1536@60Hz CVT */
+ {268500, 107, 269}, /* WQXGA 2560x1600@60Hz CVT */
+ {138530, 63, 307}, /* AUO B116HAN03.0 Panel */
+ {141400, 53, 253}, /* AUO B133HTN01.2 Panel */
+ },
+};
+
+static int edp_clk_init(struct edp_ctrl *ctrl)
+{
+ struct platform_device *pdev = ctrl->pdev;
+ int ret;
+
+ ctrl->aux_clk = msm_clk_get(pdev, "core");
+ if (IS_ERR(ctrl->aux_clk)) {
+ ret = PTR_ERR(ctrl->aux_clk);
+ pr_err("%s: Can't find core clock, %d\n", __func__, ret);
+ ctrl->aux_clk = NULL;
+ return ret;
+ }
+
+ ctrl->pixel_clk = msm_clk_get(pdev, "pixel");
+ if (IS_ERR(ctrl->pixel_clk)) {
+ ret = PTR_ERR(ctrl->pixel_clk);
+ pr_err("%s: Can't find pixel clock, %d\n", __func__, ret);
+ ctrl->pixel_clk = NULL;
+ return ret;
+ }
+
+ ctrl->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(ctrl->ahb_clk)) {
+ ret = PTR_ERR(ctrl->ahb_clk);
+ pr_err("%s: Can't find iface clock, %d\n", __func__, ret);
+ ctrl->ahb_clk = NULL;
+ return ret;
+ }
+
+ ctrl->link_clk = msm_clk_get(pdev, "link");
+ if (IS_ERR(ctrl->link_clk)) {
+ ret = PTR_ERR(ctrl->link_clk);
+ pr_err("%s: Can't find link clock, %d\n", __func__, ret);
+ ctrl->link_clk = NULL;
+ return ret;
+ }
+
+ /* need mdp core clock to receive irq */
+ ctrl->mdp_core_clk = msm_clk_get(pdev, "mdp_core");
+ if (IS_ERR(ctrl->mdp_core_clk)) {
+ ret = PTR_ERR(ctrl->mdp_core_clk);
+ pr_err("%s: Can't find mdp_core clock, %d\n", __func__, ret);
+ ctrl->mdp_core_clk = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int edp_clk_enable(struct edp_ctrl *ctrl, u32 clk_mask)
+{
+ int ret;
+
+ DBG("mask=%x", clk_mask);
+ /* ahb_clk should be enabled first */
+ if (clk_mask & EDP_CLK_MASK_AHB) {
+ ret = clk_prepare_enable(ctrl->ahb_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable ahb clk\n", __func__);
+ goto f0;
+ }
+ }
+ if (clk_mask & EDP_CLK_MASK_AUX) {
+ ret = clk_set_rate(ctrl->aux_clk, 19200000);
+ if (ret) {
+ pr_err("%s: Failed to set rate aux clk\n", __func__);
+ goto f1;
+ }
+ ret = clk_prepare_enable(ctrl->aux_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable aux clk\n", __func__);
+ goto f1;
+ }
+ }
+ /* Need to set rate and enable link_clk prior to pixel_clk */
+ if (clk_mask & EDP_CLK_MASK_LINK) {
+ DBG("edp->link_clk, set_rate %ld",
+ (unsigned long)ctrl->link_rate * 27000000);
+ ret = clk_set_rate(ctrl->link_clk,
+ (unsigned long)ctrl->link_rate * 27000000);
+ if (ret) {
+ pr_err("%s: Failed to set rate to link clk\n",
+ __func__);
+ goto f2;
+ }
+
+ ret = clk_prepare_enable(ctrl->link_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable link clk\n", __func__);
+ goto f2;
+ }
+ }
+ if (clk_mask & EDP_CLK_MASK_PIXEL) {
+ DBG("edp->pixel_clk, set_rate %ld",
+ (unsigned long)ctrl->pixel_rate * 1000);
+ ret = clk_set_rate(ctrl->pixel_clk,
+ (unsigned long)ctrl->pixel_rate * 1000);
+ if (ret) {
+ pr_err("%s: Failed to set rate to pixel clk\n",
+ __func__);
+ goto f3;
+ }
+
+ ret = clk_prepare_enable(ctrl->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable pixel clk\n", __func__);
+ goto f3;
+ }
+ }
+ if (clk_mask & EDP_CLK_MASK_MDP_CORE) {
+ ret = clk_prepare_enable(ctrl->mdp_core_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable mdp core clk\n", __func__);
+ goto f4;
+ }
+ }
+
+ return 0;
+
+f4:
+ if (clk_mask & EDP_CLK_MASK_PIXEL)
+ clk_disable_unprepare(ctrl->pixel_clk);
+f3:
+ if (clk_mask & EDP_CLK_MASK_LINK)
+ clk_disable_unprepare(ctrl->link_clk);
+f2:
+ if (clk_mask & EDP_CLK_MASK_AUX)
+ clk_disable_unprepare(ctrl->aux_clk);
+f1:
+ if (clk_mask & EDP_CLK_MASK_AHB)
+ clk_disable_unprepare(ctrl->ahb_clk);
+f0:
+ return ret;
+}
+
+static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask)
+{
+ if (clk_mask & EDP_CLK_MASK_MDP_CORE)
+ clk_disable_unprepare(ctrl->mdp_core_clk);
+ if (clk_mask & EDP_CLK_MASK_PIXEL)
+ clk_disable_unprepare(ctrl->pixel_clk);
+ if (clk_mask & EDP_CLK_MASK_LINK)
+ clk_disable_unprepare(ctrl->link_clk);
+ if (clk_mask & EDP_CLK_MASK_AUX)
+ clk_disable_unprepare(ctrl->aux_clk);
+ if (clk_mask & EDP_CLK_MASK_AHB)
+ clk_disable_unprepare(ctrl->ahb_clk);
+}
+
+static int edp_regulator_init(struct edp_ctrl *ctrl)
+{
+ struct device *dev = &ctrl->pdev->dev;
+ int ret;
+
+ DBG("");
+ ctrl->vdda_vreg = devm_regulator_get(dev, "vdda");
+ ret = PTR_ERR_OR_ZERO(ctrl->vdda_vreg);
+ if (ret) {
+ pr_err("%s: Could not get vdda reg, ret = %d\n", __func__,
+ ret);
+ ctrl->vdda_vreg = NULL;
+ return ret;
+ }
+ ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd");
+ ret = PTR_ERR_OR_ZERO(ctrl->lvl_vreg);
+ if (ret) {
+ pr_err("%s: Could not get lvl-vdd reg, ret = %d\n", __func__,
+ ret);
+ ctrl->lvl_vreg = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int edp_regulator_enable(struct edp_ctrl *ctrl)
+{
+ int ret;
+
+ ret = regulator_set_load(ctrl->vdda_vreg, VDDA_UA_ON_LOAD);
+ if (ret < 0) {
+ pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
+ goto vdda_set_fail;
+ }
+
+ ret = regulator_enable(ctrl->vdda_vreg);
+ if (ret) {
+ pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__);
+ goto vdda_enable_fail;
+ }
+
+ ret = regulator_enable(ctrl->lvl_vreg);
+ if (ret) {
+ pr_err("Failed to enable lvl-vdd reg regulator, %d", ret);
+ goto lvl_enable_fail;
+ }
+
+ DBG("exit");
+ return 0;
+
+lvl_enable_fail:
+ regulator_disable(ctrl->vdda_vreg);
+vdda_enable_fail:
+ regulator_set_load(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
+vdda_set_fail:
+ return ret;
+}
+
+static void edp_regulator_disable(struct edp_ctrl *ctrl)
+{
+ regulator_disable(ctrl->lvl_vreg);
+ regulator_disable(ctrl->vdda_vreg);
+ regulator_set_load(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
+}
+
+static int edp_gpio_config(struct edp_ctrl *ctrl)
+{
+ struct device *dev = &ctrl->pdev->dev;
+ int ret;
+
+ ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN);
+ if (IS_ERR(ctrl->panel_hpd_gpio)) {
+ ret = PTR_ERR(ctrl->panel_hpd_gpio);
+ ctrl->panel_hpd_gpio = NULL;
+ pr_err("%s: cannot get panel-hpd-gpios, %d\n", __func__, ret);
+ return ret;
+ }
+
+ ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW);
+ if (IS_ERR(ctrl->panel_en_gpio)) {
+ ret = PTR_ERR(ctrl->panel_en_gpio);
+ ctrl->panel_en_gpio = NULL;
+ pr_err("%s: cannot get panel-en-gpios, %d\n", __func__, ret);
+ return ret;
+ }
+
+ DBG("gpio on");
+
+ return 0;
+}
+
+static void edp_ctrl_irq_enable(struct edp_ctrl *ctrl, int enable)
+{
+ unsigned long flags;
+
+ DBG("%d", enable);
+ spin_lock_irqsave(&ctrl->irq_lock, flags);
+ if (enable) {
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, EDP_INTR_MASK1);
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, EDP_INTR_MASK2);
+ } else {
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, 0x0);
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, 0x0);
+ }
+ spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+ DBG("exit");
+}
+
+static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
+{
+ u32 prate;
+ u32 lrate;
+ u32 bpp;
+ u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd);
+ u8 lane;
+
+ prate = ctrl->pixel_rate;
+ bpp = ctrl->color_depth * 3;
+
+ /*
+ * By default, use the maximum link rate and minimum lane count,
+ * so that we can do rate down shift during link training.
+ */
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
+
+ prate *= bpp;
+ prate /= 8; /* in kByte */
+
+ lrate = 270000; /* in kHz */
+ lrate *= ctrl->link_rate;
+ lrate /= 10; /* in kByte, 10 bits --> 8 bits */
+
+ for (lane = 1; lane <= max_lane; lane <<= 1) {
+ if (lrate >= prate)
+ break;
+ lrate <<= 1;
+ }
+
+ ctrl->lane_cnt = lane;
+ DBG("rate=%d lane=%d", ctrl->link_rate, ctrl->lane_cnt);
+}
+
+static void edp_config_ctrl(struct edp_ctrl *ctrl)
+{
+ u32 data;
+ enum edp_color_depth depth;
+
+ data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
+
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
+ data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
+
+ depth = EDP_6BIT;
+ if (ctrl->color_depth == 8)
+ depth = EDP_8BIT;
+
+ data |= EDP_CONFIGURATION_CTRL_COLOR(depth);
+
+ if (!ctrl->interlaced) /* progressive */
+ data |= EDP_CONFIGURATION_CTRL_PROGRESSIVE;
+
+ data |= (EDP_CONFIGURATION_CTRL_SYNC_CLK |
+ EDP_CONFIGURATION_CTRL_STATIC_MVID);
+
+ edp_write(ctrl->base + REG_EDP_CONFIGURATION_CTRL, data);
+}
+
+static void edp_state_ctrl(struct edp_ctrl *ctrl, u32 state)
+{
+ edp_write(ctrl->base + REG_EDP_STATE_CTRL, state);
+ /* Make sure H/W status is set */
+ wmb();
+}
+
+static int edp_lane_set_write(struct edp_ctrl *ctrl,
+ u8 voltage_level, u8 pre_emphasis_level)
+{
+ int i;
+ u8 buf[4];
+
+ if (voltage_level >= DPCD_LINK_VOLTAGE_MAX)
+ voltage_level |= 0x04;
+
+ if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX)
+ pre_emphasis_level |= 0x04;
+
+ pre_emphasis_level <<= 3;
+
+ for (i = 0; i < 4; i++)
+ buf[i] = voltage_level | pre_emphasis_level;
+
+ DBG("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level);
+ if (drm_dp_dpcd_write(ctrl->drm_aux, 0x103, buf, 4) < 4) {
+ pr_err("%s: Set sw/pe to panel failed\n", __func__);
+ return -ENOLINK;
+ }
+
+ return 0;
+}
+
+static int edp_train_pattern_set_write(struct edp_ctrl *ctrl, u8 pattern)
+{
+ u8 p = pattern;
+
+ DBG("pattern=%x", p);
+ if (drm_dp_dpcd_write(ctrl->drm_aux,
+ DP_TRAINING_PATTERN_SET, &p, 1) < 1) {
+ pr_err("%s: Set training pattern to panel failed\n", __func__);
+ return -ENOLINK;
+ }
+
+ return 0;
+}
+
+static void edp_sink_train_set_adjust(struct edp_ctrl *ctrl,
+ const u8 *link_status)
+{
+ int i;
+ u8 max = 0;
+ u8 data;
+
+ /* use the max level across lanes */
+ for (i = 0; i < ctrl->lane_cnt; i++) {
+ data = drm_dp_get_adjust_request_voltage(link_status, i);
+ DBG("lane=%d req_voltage_swing=0x%x", i, data);
+ if (max < data)
+ max = data;
+ }
+
+ ctrl->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
+
+ /* use the max level across lanes */
+ max = 0;
+ for (i = 0; i < ctrl->lane_cnt; i++) {
+ data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+ DBG("lane=%d req_pre_emphasis=0x%x", i, data);
+ if (max < data)
+ max = data;
+ }
+
+ ctrl->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ DBG("v_level=%d, p_level=%d", ctrl->v_level, ctrl->p_level);
+}
+
+static void edp_host_train_set(struct edp_ctrl *ctrl, u32 train)
+{
+ int cnt = 10;
+ u32 data;
+ u32 shift = train - 1;
+
+ DBG("train=%d", train);
+
+ edp_state_ctrl(ctrl, EDP_STATE_CTRL_TRAIN_PATTERN_1 << shift);
+ while (--cnt) {
+ data = edp_read(ctrl->base + REG_EDP_MAINLINK_READY);
+ if (data & (EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY << shift))
+ break;
+ }
+
+ if (cnt == 0)
+ pr_err("%s: set link_train=%d failed\n", __func__, train);
+}
+
+static const u8 vm_pre_emphasis[4][4] = {
+ {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
+ {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
+ {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
+ {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
+};
+
+/* voltage swing, 0.2v and 1.0v are not support */
+static const u8 vm_voltage_swing[4][4] = {
+ {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
+ {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
+ {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
+ {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
+};
+
+static int edp_voltage_pre_emphasise_set(struct edp_ctrl *ctrl)
+{
+ u32 value0;
+ u32 value1;
+
+ DBG("v=%d p=%d", ctrl->v_level, ctrl->p_level);
+
+ value0 = vm_pre_emphasis[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
+ value1 = vm_voltage_swing[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
+
+ /* Configure host and panel only if both values are allowed */
+ if (value0 != 0xFF && value1 != 0xFF) {
+ msm_edp_phy_vm_pe_cfg(ctrl->phy, value0, value1);
+ return edp_lane_set_write(ctrl, ctrl->v_level, ctrl->p_level);
+ }
+
+ return -EINVAL;
+}
+
+static int edp_start_link_train_1(struct edp_ctrl *ctrl)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 old_v_level;
+ int tries;
+ int ret;
+ int rlen;
+
+ DBG("");
+
+ edp_host_train_set(ctrl, DP_TRAINING_PATTERN_1);
+ ret = edp_voltage_pre_emphasise_set(ctrl);
+ if (ret)
+ return ret;
+ ret = edp_train_pattern_set_write(ctrl,
+ DP_TRAINING_PATTERN_1 | DP_RECOVERED_CLOCK_OUT_EN);
+ if (ret)
+ return ret;
+
+ tries = 0;
+ old_v_level = ctrl->v_level;
+ while (1) {
+ drm_dp_link_train_clock_recovery_delay(ctrl->drm_aux, ctrl->dpcd);
+
+ rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
+ if (rlen < DP_LINK_STATUS_SIZE) {
+ pr_err("%s: read link status failed\n", __func__);
+ return -ENOLINK;
+ }
+ if (drm_dp_clock_recovery_ok(link_status, ctrl->lane_cnt)) {
+ ret = 0;
+ break;
+ }
+
+ if (ctrl->v_level == DPCD_LINK_VOLTAGE_MAX) {
+ ret = -1;
+ break;
+ }
+
+ if (old_v_level == ctrl->v_level) {
+ tries++;
+ if (tries >= 5) {
+ ret = -1;
+ break;
+ }
+ } else {
+ tries = 0;
+ old_v_level = ctrl->v_level;
+ }
+
+ edp_sink_train_set_adjust(ctrl, link_status);
+ ret = edp_voltage_pre_emphasise_set(ctrl);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int edp_start_link_train_2(struct edp_ctrl *ctrl)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ int tries = 0;
+ int ret;
+ int rlen;
+
+ DBG("");
+
+ edp_host_train_set(ctrl, DP_TRAINING_PATTERN_2);
+ ret = edp_voltage_pre_emphasise_set(ctrl);
+ if (ret)
+ return ret;
+
+ ret = edp_train_pattern_set_write(ctrl,
+ DP_TRAINING_PATTERN_2 | DP_RECOVERED_CLOCK_OUT_EN);
+ if (ret)
+ return ret;
+
+ while (1) {
+ drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
+
+ rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
+ if (rlen < DP_LINK_STATUS_SIZE) {
+ pr_err("%s: read link status failed\n", __func__);
+ return -ENOLINK;
+ }
+ if (drm_dp_channel_eq_ok(link_status, ctrl->lane_cnt)) {
+ ret = 0;
+ break;
+ }
+
+ tries++;
+ if (tries > 10) {
+ ret = -1;
+ break;
+ }
+
+ edp_sink_train_set_adjust(ctrl, link_status);
+ ret = edp_voltage_pre_emphasise_set(ctrl);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
+{
+ u32 prate, lrate, bpp;
+ u8 rate, lane, max_lane;
+ int changed = 0;
+
+ rate = ctrl->link_rate;
+ lane = ctrl->lane_cnt;
+ max_lane = drm_dp_max_lane_count(ctrl->dpcd);
+
+ bpp = ctrl->color_depth * 3;
+ prate = ctrl->pixel_rate;
+ prate *= bpp;
+ prate /= 8; /* in kByte */
+
+ if (rate > DP_LINK_BW_1_62 && rate <= EDP_LINK_BW_MAX) {
+ rate -= 4; /* reduce rate */
+ changed++;
+ }
+
+ if (changed) {
+ if (lane >= 1 && lane < max_lane)
+ lane <<= 1; /* increase lane */
+
+ lrate = 270000; /* in kHz */
+ lrate *= rate;
+ lrate /= 10; /* kByte, 10 bits --> 8 bits */
+ lrate *= lane;
+
+ DBG("new lrate=%u prate=%u(kHz) rate=%d lane=%d p=%u b=%d",
+ lrate, prate, rate, lane,
+ ctrl->pixel_rate,
+ bpp);
+
+ if (lrate > prate) {
+ ctrl->link_rate = rate;
+ ctrl->lane_cnt = lane;
+ DBG("new rate=%d %d", rate, lane);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
+{
+ int ret;
+
+ ret = edp_train_pattern_set_write(ctrl, 0);
+
+ drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
+
+ return ret;
+}
+
+static int edp_do_link_train(struct edp_ctrl *ctrl)
+{
+ u8 values[2];
+ int ret;
+
+ DBG("");
+ /*
+ * Set the current link rate and lane cnt to panel. They may have been
+ * adjusted and the values are different from them in DPCD CAP
+ */
+ values[0] = ctrl->lane_cnt;
+ values[1] = ctrl->link_rate;
+
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values,
+ sizeof(values)) < 0)
+ return EDP_TRAIN_FAIL;
+
+ ctrl->v_level = 0; /* start from default level */
+ ctrl->p_level = 0;
+
+ edp_state_ctrl(ctrl, 0);
+ if (edp_clear_training_pattern(ctrl))
+ return EDP_TRAIN_FAIL;
+
+ ret = edp_start_link_train_1(ctrl);
+ if (ret < 0) {
+ if (edp_link_rate_down_shift(ctrl) == 0) {
+ DBG("link reconfig");
+ ret = EDP_TRAIN_RECONFIG;
+ goto clear;
+ } else {
+ pr_err("%s: Training 1 failed", __func__);
+ ret = EDP_TRAIN_FAIL;
+ goto clear;
+ }
+ }
+ DBG("Training 1 completed successfully");
+
+ edp_state_ctrl(ctrl, 0);
+ if (edp_clear_training_pattern(ctrl))
+ return EDP_TRAIN_FAIL;
+
+ ret = edp_start_link_train_2(ctrl);
+ if (ret < 0) {
+ if (edp_link_rate_down_shift(ctrl) == 0) {
+ DBG("link reconfig");
+ ret = EDP_TRAIN_RECONFIG;
+ goto clear;
+ } else {
+ pr_err("%s: Training 2 failed", __func__);
+ ret = EDP_TRAIN_FAIL;
+ goto clear;
+ }
+ }
+ DBG("Training 2 completed successfully");
+
+ edp_state_ctrl(ctrl, EDP_STATE_CTRL_SEND_VIDEO);
+clear:
+ edp_clear_training_pattern(ctrl);
+
+ return ret;
+}
+
+static void edp_clock_synchrous(struct edp_ctrl *ctrl, int sync)
+{
+ u32 data;
+ enum edp_color_depth depth;
+
+ data = edp_read(ctrl->base + REG_EDP_MISC1_MISC0);
+
+ if (sync)
+ data |= EDP_MISC1_MISC0_SYNC;
+ else
+ data &= ~EDP_MISC1_MISC0_SYNC;
+
+ /* only legacy rgb mode supported */
+ depth = EDP_6BIT; /* Default */
+ if (ctrl->color_depth == 8)
+ depth = EDP_8BIT;
+ else if (ctrl->color_depth == 10)
+ depth = EDP_10BIT;
+ else if (ctrl->color_depth == 12)
+ depth = EDP_12BIT;
+ else if (ctrl->color_depth == 16)
+ depth = EDP_16BIT;
+
+ data |= EDP_MISC1_MISC0_COLOR(depth);
+
+ edp_write(ctrl->base + REG_EDP_MISC1_MISC0, data);
+}
+
+static int edp_sw_mvid_nvid(struct edp_ctrl *ctrl, u32 m, u32 n)
+{
+ u32 n_multi, m_multi = 5;
+
+ if (ctrl->link_rate == DP_LINK_BW_1_62) {
+ n_multi = 1;
+ } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
+ n_multi = 2;
+ } else {
+ pr_err("%s: Invalid link rate, %d\n", __func__,
+ ctrl->link_rate);
+ return -EINVAL;
+ }
+
+ edp_write(ctrl->base + REG_EDP_SOFTWARE_MVID, m * m_multi);
+ edp_write(ctrl->base + REG_EDP_SOFTWARE_NVID, n * n_multi);
+
+ return 0;
+}
+
+static void edp_mainlink_ctrl(struct edp_ctrl *ctrl, int enable)
+{
+ u32 data = 0;
+
+ edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, EDP_MAINLINK_CTRL_RESET);
+ /* Make sure fully reset */
+ wmb();
+ usleep_range(500, 1000);
+
+ if (enable)
+ data |= EDP_MAINLINK_CTRL_ENABLE;
+
+ edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, data);
+}
+
+static void edp_ctrl_phy_aux_enable(struct edp_ctrl *ctrl, int enable)
+{
+ if (enable) {
+ edp_regulator_enable(ctrl);
+ edp_clk_enable(ctrl, EDP_CLK_MASK_AUX_CHAN);
+ msm_edp_phy_ctrl(ctrl->phy, 1);
+ msm_edp_aux_ctrl(ctrl->aux, 1);
+ gpiod_set_value(ctrl->panel_en_gpio, 1);
+ } else {
+ gpiod_set_value(ctrl->panel_en_gpio, 0);
+ msm_edp_aux_ctrl(ctrl->aux, 0);
+ msm_edp_phy_ctrl(ctrl->phy, 0);
+ edp_clk_disable(ctrl, EDP_CLK_MASK_AUX_CHAN);
+ edp_regulator_disable(ctrl);
+ }
+}
+
+static void edp_ctrl_link_enable(struct edp_ctrl *ctrl, int enable)
+{
+ u32 m, n;
+
+ if (enable) {
+ /* Enable link channel clocks */
+ edp_clk_enable(ctrl, EDP_CLK_MASK_LINK_CHAN);
+
+ msm_edp_phy_lane_power_ctrl(ctrl->phy, true, ctrl->lane_cnt);
+
+ msm_edp_phy_vm_pe_init(ctrl->phy);
+
+ /* Make sure phy is programed */
+ wmb();
+ msm_edp_phy_ready(ctrl->phy);
+
+ edp_config_ctrl(ctrl);
+ msm_edp_ctrl_pixel_clock_valid(ctrl, ctrl->pixel_rate, &m, &n);
+ edp_sw_mvid_nvid(ctrl, m, n);
+ edp_mainlink_ctrl(ctrl, 1);
+ } else {
+ edp_mainlink_ctrl(ctrl, 0);
+
+ msm_edp_phy_lane_power_ctrl(ctrl->phy, false, 0);
+ edp_clk_disable(ctrl, EDP_CLK_MASK_LINK_CHAN);
+ }
+}
+
+static int edp_ctrl_training(struct edp_ctrl *ctrl)
+{
+ int ret;
+
+ /* Do link training only when power is on */
+ if (!ctrl->power_on)
+ return -EINVAL;
+
+train_start:
+ ret = edp_do_link_train(ctrl);
+ if (ret == EDP_TRAIN_RECONFIG) {
+ /* Re-configure main link */
+ edp_ctrl_irq_enable(ctrl, 0);
+ edp_ctrl_link_enable(ctrl, 0);
+ msm_edp_phy_ctrl(ctrl->phy, 0);
+
+ /* Make sure link is fully disabled */
+ wmb();
+ usleep_range(500, 1000);
+
+ msm_edp_phy_ctrl(ctrl->phy, 1);
+ edp_ctrl_link_enable(ctrl, 1);
+ edp_ctrl_irq_enable(ctrl, 1);
+ goto train_start;
+ }
+
+ return ret;
+}
+
+static void edp_ctrl_on_worker(struct work_struct *work)
+{
+ struct edp_ctrl *ctrl = container_of(
+ work, struct edp_ctrl, on_work);
+ u8 value;
+ int ret;
+
+ mutex_lock(&ctrl->dev_mutex);
+
+ if (ctrl->power_on) {
+ DBG("already on");
+ goto unlock_ret;
+ }
+
+ edp_ctrl_phy_aux_enable(ctrl, 1);
+ edp_ctrl_link_enable(ctrl, 1);
+
+ edp_ctrl_irq_enable(ctrl, 1);
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret < 0)
+ goto fail;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+ }
+
+ ctrl->power_on = true;
+
+ /* Start link training */
+ ret = edp_ctrl_training(ctrl);
+ if (ret != EDP_TRAIN_SUCCESS)
+ goto fail;
+
+ DBG("DONE");
+ goto unlock_ret;
+
+fail:
+ edp_ctrl_irq_enable(ctrl, 0);
+ edp_ctrl_link_enable(ctrl, 0);
+ edp_ctrl_phy_aux_enable(ctrl, 0);
+ ctrl->power_on = false;
+unlock_ret:
+ mutex_unlock(&ctrl->dev_mutex);
+}
+
+static void edp_ctrl_off_worker(struct work_struct *work)
+{
+ struct edp_ctrl *ctrl = container_of(
+ work, struct edp_ctrl, off_work);
+ unsigned long time_left;
+
+ mutex_lock(&ctrl->dev_mutex);
+
+ if (!ctrl->power_on) {
+ DBG("already off");
+ goto unlock_ret;
+ }
+
+ reinit_completion(&ctrl->idle_comp);
+ edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE);
+
+ time_left = wait_for_completion_timeout(&ctrl->idle_comp,
+ msecs_to_jiffies(500));
+ if (!time_left)
+ DBG("%s: idle pattern timedout\n", __func__);
+
+ edp_state_ctrl(ctrl, 0);
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ u8 value;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret > 0) {
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ }
+ }
+
+ edp_ctrl_irq_enable(ctrl, 0);
+
+ edp_ctrl_link_enable(ctrl, 0);
+
+ edp_ctrl_phy_aux_enable(ctrl, 0);
+
+ ctrl->power_on = false;
+
+unlock_ret:
+ mutex_unlock(&ctrl->dev_mutex);
+}
+
+irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl)
+{
+ u32 isr1, isr2, mask1, mask2;
+ u32 ack;
+
+ DBG("");
+ spin_lock(&ctrl->irq_lock);
+ isr1 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_1);
+ isr2 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_2);
+
+ mask1 = isr1 & EDP_INTR_MASK1;
+ mask2 = isr2 & EDP_INTR_MASK2;
+
+ isr1 &= ~mask1; /* remove masks bit */
+ isr2 &= ~mask2;
+
+ DBG("isr=%x mask=%x isr2=%x mask2=%x",
+ isr1, mask1, isr2, mask2);
+
+ ack = isr1 & EDP_INTR_STATUS1;
+ ack <<= 1; /* ack bits */
+ ack |= mask1;
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, ack);
+
+ ack = isr2 & EDP_INTR_STATUS2;
+ ack <<= 1; /* ack bits */
+ ack |= mask2;
+ edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, ack);
+ spin_unlock(&ctrl->irq_lock);
+
+ if (isr1 & EDP_INTERRUPT_REG_1_HPD)
+ DBG("edp_hpd");
+
+ if (isr2 & EDP_INTERRUPT_REG_2_READY_FOR_VIDEO)
+ DBG("edp_video_ready");
+
+ if (isr2 & EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT) {
+ DBG("idle_patterns_sent");
+ complete(&ctrl->idle_comp);
+ }
+
+ msm_edp_aux_irq(ctrl->aux, isr1);
+
+ return IRQ_HANDLED;
+}
+
+void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
+{
+ if (on)
+ queue_work(ctrl->workqueue, &ctrl->on_work);
+ else
+ queue_work(ctrl->workqueue, &ctrl->off_work);
+}
+
+int msm_edp_ctrl_init(struct msm_edp *edp)
+{
+ struct edp_ctrl *ctrl = NULL;
+ struct device *dev;
+ int ret;
+
+ if (!edp) {
+ pr_err("%s: edp is NULL!\n", __func__);
+ return -EINVAL;
+ }
+
+ dev = &edp->pdev->dev;
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ edp->ctrl = ctrl;
+ ctrl->pdev = edp->pdev;
+
+ ctrl->base = msm_ioremap(ctrl->pdev, "edp", "eDP");
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
+
+ /* Get regulator, clock, gpio, pwm */
+ ret = edp_regulator_init(ctrl);
+ if (ret) {
+ pr_err("%s:regulator init fail\n", __func__);
+ return ret;
+ }
+ ret = edp_clk_init(ctrl);
+ if (ret) {
+ pr_err("%s:clk init fail\n", __func__);
+ return ret;
+ }
+ ret = edp_gpio_config(ctrl);
+ if (ret) {
+ pr_err("%s:failed to configure GPIOs: %d", __func__, ret);
+ return ret;
+ }
+
+ /* Init aux and phy */
+ ctrl->aux = msm_edp_aux_init(edp, ctrl->base, &ctrl->drm_aux);
+ if (!ctrl->aux || !ctrl->drm_aux) {
+ pr_err("%s:failed to init aux\n", __func__);
+ return -ENOMEM;
+ }
+
+ ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
+ if (!ctrl->phy) {
+ pr_err("%s:failed to init phy\n", __func__);
+ ret = -ENOMEM;
+ goto err_destory_aux;
+ }
+
+ spin_lock_init(&ctrl->irq_lock);
+ mutex_init(&ctrl->dev_mutex);
+ init_completion(&ctrl->idle_comp);
+
+ /* setup workqueue */
+ ctrl->workqueue = alloc_ordered_workqueue("edp_drm_work", 0);
+ INIT_WORK(&ctrl->on_work, edp_ctrl_on_worker);
+ INIT_WORK(&ctrl->off_work, edp_ctrl_off_worker);
+
+ return 0;
+
+err_destory_aux:
+ msm_edp_aux_destroy(dev, ctrl->aux);
+ ctrl->aux = NULL;
+ return ret;
+}
+
+void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl)
+{
+ if (!ctrl)
+ return;
+
+ if (ctrl->workqueue) {
+ destroy_workqueue(ctrl->workqueue);
+ ctrl->workqueue = NULL;
+ }
+
+ if (ctrl->aux) {
+ msm_edp_aux_destroy(&ctrl->pdev->dev, ctrl->aux);
+ ctrl->aux = NULL;
+ }
+
+ kfree(ctrl->edid);
+ ctrl->edid = NULL;
+
+ mutex_destroy(&ctrl->dev_mutex);
+}
+
+bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl)
+{
+ mutex_lock(&ctrl->dev_mutex);
+ DBG("connect status = %d", ctrl->edp_connected);
+ if (ctrl->edp_connected) {
+ mutex_unlock(&ctrl->dev_mutex);
+ return true;
+ }
+
+ if (!ctrl->power_on) {
+ edp_ctrl_phy_aux_enable(ctrl, 1);
+ edp_ctrl_irq_enable(ctrl, 1);
+ }
+
+ if (drm_dp_dpcd_read(ctrl->drm_aux, DP_DPCD_REV, ctrl->dpcd,
+ DP_RECEIVER_CAP_SIZE) < DP_RECEIVER_CAP_SIZE) {
+ pr_err("%s: AUX channel is NOT ready\n", __func__);
+ memset(ctrl->dpcd, 0, DP_RECEIVER_CAP_SIZE);
+ } else {
+ ctrl->edp_connected = true;
+ }
+
+ if (!ctrl->power_on) {
+ edp_ctrl_irq_enable(ctrl, 0);
+ edp_ctrl_phy_aux_enable(ctrl, 0);
+ }
+
+ DBG("exit: connect status=%d", ctrl->edp_connected);
+
+ mutex_unlock(&ctrl->dev_mutex);
+
+ return ctrl->edp_connected;
+}
+
+int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
+ struct drm_connector *connector, struct edid **edid)
+{
+ mutex_lock(&ctrl->dev_mutex);
+
+ if (ctrl->edid) {
+ if (edid) {
+ DBG("Just return edid buffer");
+ *edid = ctrl->edid;
+ }
+ goto unlock_ret;
+ }
+
+ if (!ctrl->power_on) {
+ edp_ctrl_phy_aux_enable(ctrl, 1);
+ edp_ctrl_irq_enable(ctrl, 1);
+ }
+
+ /* Initialize link rate as panel max link rate */
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
+
+ ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
+ if (!ctrl->edid) {
+ pr_err("%s: edid read fail\n", __func__);
+ goto disable_ret;
+ }
+
+ if (edid)
+ *edid = ctrl->edid;
+
+disable_ret:
+ if (!ctrl->power_on) {
+ edp_ctrl_irq_enable(ctrl, 0);
+ edp_ctrl_phy_aux_enable(ctrl, 0);
+ }
+unlock_ret:
+ mutex_unlock(&ctrl->dev_mutex);
+ return 0;
+}
+
+int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info)
+{
+ u32 hstart_from_sync, vstart_from_sync;
+ u32 data;
+ int ret = 0;
+
+ mutex_lock(&ctrl->dev_mutex);
+ /*
+ * Need to keep color depth, pixel rate and
+ * interlaced information in ctrl context
+ */
+ ctrl->color_depth = info->bpc;
+ ctrl->pixel_rate = mode->clock;
+ ctrl->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ /* Fill initial link config based on passed in timing */
+ edp_fill_link_cfg(ctrl);
+
+ if (edp_clk_enable(ctrl, EDP_CLK_MASK_AHB)) {
+ pr_err("%s, fail to prepare enable ahb clk\n", __func__);
+ ret = -EINVAL;
+ goto unlock_ret;
+ }
+ edp_clock_synchrous(ctrl, 1);
+
+ /* Configure eDP timing to HW */
+ edp_write(ctrl->base + REG_EDP_TOTAL_HOR_VER,
+ EDP_TOTAL_HOR_VER_HORIZ(mode->htotal) |
+ EDP_TOTAL_HOR_VER_VERT(mode->vtotal));
+
+ vstart_from_sync = mode->vtotal - mode->vsync_start;
+ hstart_from_sync = mode->htotal - mode->hsync_start;
+ edp_write(ctrl->base + REG_EDP_START_HOR_VER_FROM_SYNC,
+ EDP_START_HOR_VER_FROM_SYNC_HORIZ(hstart_from_sync) |
+ EDP_START_HOR_VER_FROM_SYNC_VERT(vstart_from_sync));
+
+ data = EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(
+ mode->vsync_end - mode->vsync_start);
+ data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(
+ mode->hsync_end - mode->hsync_start);
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC;
+ edp_write(ctrl->base + REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY, data);
+
+ edp_write(ctrl->base + REG_EDP_ACTIVE_HOR_VER,
+ EDP_ACTIVE_HOR_VER_HORIZ(mode->hdisplay) |
+ EDP_ACTIVE_HOR_VER_VERT(mode->vdisplay));
+
+ edp_clk_disable(ctrl, EDP_CLK_MASK_AHB);
+
+unlock_ret:
+ mutex_unlock(&ctrl->dev_mutex);
+ return ret;
+}
+
+bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
+ u32 pixel_rate, u32 *pm, u32 *pn)
+{
+ const struct edp_pixel_clk_div *divs;
+ u32 err = 1; /* 1% error tolerance */
+ u32 clk_err;
+ int i;
+
+ if (ctrl->link_rate == DP_LINK_BW_1_62) {
+ divs = clk_divs[0];
+ } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
+ divs = clk_divs[1];
+ } else {
+ pr_err("%s: Invalid link rate,%d\n", __func__, ctrl->link_rate);
+ return false;
+ }
+
+ for (i = 0; i < EDP_PIXEL_CLK_NUM; i++) {
+ clk_err = abs(divs[i].rate - pixel_rate);
+ if ((divs[i].rate * err / 100) >= clk_err) {
+ if (pm)
+ *pm = divs[i].m;
+ if (pn)
+ *pn = divs[i].n;
+ return true;
+ }
+ }
+
+ DBG("pixel clock %d(kHz) not supported", pixel_rate);
+
+ return false;
+}
+
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 86d78634a979..375f26d4a417 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -374,12 +374,20 @@ static int mxsfb_remove(struct platform_device *pdev)
struct drm_device *drm = platform_get_drvdata(pdev);
drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
mxsfb_unload(drm);
drm_dev_put(drm);
return 0;
}
+static void mxsfb_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(drm);
+}
+
#ifdef CONFIG_PM_SLEEP
static int mxsfb_suspend(struct device *dev)
{
@@ -403,6 +411,7 @@ static const struct dev_pm_ops mxsfb_pm_ops = {
static struct platform_driver mxsfb_platform_driver = {
.probe = mxsfb_probe,
.remove = mxsfb_remove,
+ .shutdown = mxsfb_shutdown,
.driver = {
.name = "mxsfb",
.of_match_table = mxsfb_dt_ids,
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 9436310d0854..3ec690b6f0b4 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -4,6 +4,7 @@ config DRM_NOUVEAU
depends on DRM && PCI && MMU
select IOMMU_API
select FW_LOADER
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index ae1f41205520..df58c6445c51 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -35,7 +35,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 40f90e353540..1b173191cc41 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -36,7 +36,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_util.h>
#include "nouveau_crtc.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 040ed88d362d..724d40ddd452 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include "nouveau_drv.h"
#include "nouveau_connector.h"
@@ -147,6 +147,21 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
nv_encoder->dp.link_nr =
dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && dpcd[DP_DPCD_REV] >= 0x13) {
+ struct drm_dp_aux *aux = &nv_connector->aux;
+ int ret, i;
+ u8 sink_rates[16];
+
+ ret = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, sink_rates, sizeof(sink_rates));
+ if (ret == sizeof(sink_rates)) {
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
+ int val = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
+ if (val && (i == 0 || val > nv_encoder->dp.link_bw))
+ nv_encoder->dp.link_bw = val;
+ }
+ }
+ }
+
NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n",
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw,
dpcd[DP_DPCD_REV]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 77c2fed76e8b..65ed84f88cca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -30,8 +30,8 @@
#include <subdev/bios/dcb.h>
#include <drm/drm_encoder_slave.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include "dispnv04/disp.h"
struct nv50_head_atom;
struct nouveau_connector;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2ca3207c13fc..2e517cdc24c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -162,11 +162,12 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
}
void
-nouveau_mem_del(struct ttm_resource *reg)
+nouveau_mem_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
nouveau_mem_fini(mem);
+ ttm_resource_fini(man, reg);
kfree(mem);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 2c01166a90f2..325551eba5cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -23,7 +23,8 @@ nouveau_mem(struct ttm_resource *reg)
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
struct ttm_resource **);
-void nouveau_mem_del(struct ttm_resource *);
+void nouveau_mem_del(struct ttm_resource_manager *man,
+ struct ttm_resource *);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 2ca9d9a9e5d5..85f1f5a0fe5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -36,9 +36,10 @@
#include <core/tegra.h>
static void
-nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
+nouveau_manager_del(struct ttm_resource_manager *man,
+ struct ttm_resource *reg)
{
- nouveau_mem_del(reg);
+ nouveau_mem_del(man, reg);
}
static int
@@ -62,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
if (ret) {
- nouveau_mem_del(*res);
+ nouveau_mem_del(man, *res);
return ret;
}
@@ -118,7 +119,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
(long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) {
- nouveau_mem_del(*res);
+ nouveau_mem_del(man, *res);
return ret;
}
@@ -163,7 +164,7 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm)
man->func = &nouveau_vram_manager;
- ttm_resource_manager_init(man,
+ ttm_resource_manager_init(man, &drm->ttm.bdev,
drm->gem.vram_available >> PAGE_SHIFT);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
ttm_resource_manager_set_used(man, true);
@@ -210,7 +211,7 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
man->func = func;
man->use_tt = true;
- ttm_resource_manager_init(man, size_pages);
+ ttm_resource_manager_init(man, &drm->ttm.bdev, size_pages);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
ttm_resource_manager_set_used(man, true);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 9669472a2749..8e09315b8fb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -41,6 +41,10 @@
struct lt_state {
struct nvkm_dp *dp;
+
+ int repeaters;
+ int repeater;
+
u8 stat[6];
u8 conf[4];
bool pc2;
@@ -52,14 +56,26 @@ static int
nvkm_dp_train_sense(struct lt_state *lt, bool pc, u32 delay)
{
struct nvkm_dp *dp = lt->dp;
+ u32 addr;
int ret;
- if (dp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL])
- mdelay(dp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL] * 4);
+ usleep_range(delay, delay * 2);
+
+ if (lt->repeater)
+ addr = DPCD_LTTPR_LANE0_1_STATUS(lt->repeater);
+ else
+ addr = DPCD_LS02;
+
+ ret = nvkm_rdaux(dp->aux, addr, &lt->stat[0], 3);
+ if (ret)
+ return ret;
+
+ if (lt->repeater)
+ addr = DPCD_LTTPR_LANE0_1_ADJUST(lt->repeater);
else
- udelay(delay);
+ addr = DPCD_LS06;
- ret = nvkm_rdaux(dp->aux, DPCD_LS02, lt->stat, 6);
+ ret = nvkm_rdaux(dp->aux, addr, &lt->stat[4], 2);
if (ret)
return ret;
@@ -85,6 +101,7 @@ nvkm_dp_train_drive(struct lt_state *lt, bool pc)
struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
u8 ver, hdr, cnt, len;
+ u32 addr;
u32 data;
int ret, i;
@@ -113,6 +130,9 @@ nvkm_dp_train_drive(struct lt_state *lt, bool pc)
OUTP_TRACE(&dp->outp, "config lane %d %02x %02x",
i, lt->conf[i], lpc2);
+ if (lt->repeater != lt->repeaters)
+ continue;
+
data = nvbios_dpout_match(bios, dp->outp.info.hasht,
dp->outp.info.hashm,
&ver, &hdr, &cnt, &len, &info);
@@ -129,7 +149,12 @@ nvkm_dp_train_drive(struct lt_state *lt, bool pc)
ocfg.pe, ocfg.tx_pu);
}
- ret = nvkm_wraux(dp->aux, DPCD_LC03(0), lt->conf, 4);
+ if (lt->repeater)
+ addr = DPCD_LTTPR_LANE0_SET(lt->repeater);
+ else
+ addr = DPCD_LC03(0);
+
+ ret = nvkm_wraux(dp->aux, addr, lt->conf, 4);
if (ret)
return ret;
@@ -146,32 +171,59 @@ static void
nvkm_dp_train_pattern(struct lt_state *lt, u8 pattern)
{
struct nvkm_dp *dp = lt->dp;
+ u32 addr;
u8 sink_tp;
OUTP_TRACE(&dp->outp, "training pattern %d", pattern);
dp->outp.ior->func->dp.pattern(dp->outp.ior, pattern);
- nvkm_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
+ if (lt->repeater)
+ addr = DPCD_LTTPR_PATTERN_SET(lt->repeater);
+ else
+ addr = DPCD_LC02;
+
+ nvkm_rdaux(dp->aux, addr, &sink_tp, 1);
sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
- sink_tp |= pattern;
- nvkm_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
+ sink_tp |= (pattern != 4) ? pattern : 7;
+
+ if (pattern != 0)
+ sink_tp |= DPCD_LC02_SCRAMBLING_DISABLE;
+ else
+ sink_tp &= ~DPCD_LC02_SCRAMBLING_DISABLE;
+ nvkm_wraux(dp->aux, addr, &sink_tp, 1);
}
static int
nvkm_dp_train_eq(struct lt_state *lt)
{
+ struct nvkm_i2c_aux *aux = lt->dp->aux;
bool eq_done = false, cr_done = true;
- int tries = 0, i;
+ int tries = 0, usec = 0, i;
+ u8 data;
- if (lt->dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
- nvkm_dp_train_pattern(lt, 3);
- else
- nvkm_dp_train_pattern(lt, 2);
+ if (lt->repeater) {
+ if (!nvkm_rdaux(aux, DPCD_LTTPR_AUX_RD_INTERVAL(lt->repeater), &data, sizeof(data)))
+ usec = (data & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
+
+ nvkm_dp_train_pattern(lt, 4);
+ } else {
+ if (lt->dp->dpcd[DPCD_RC00_DPCD_REV] >= 0x14 &&
+ lt->dp->dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)
+ nvkm_dp_train_pattern(lt, 4);
+ else
+ if (lt->dp->dpcd[DPCD_RC00_DPCD_REV] >= 0x12 &&
+ lt->dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
+ nvkm_dp_train_pattern(lt, 3);
+ else
+ nvkm_dp_train_pattern(lt, 2);
+
+ usec = (lt->dp->dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
+ }
do {
if ((tries &&
nvkm_dp_train_drive(lt, lt->pc2)) ||
- nvkm_dp_train_sense(lt, lt->pc2, 400))
+ nvkm_dp_train_sense(lt, lt->pc2, usec ? usec : 400))
break;
eq_done = !!(lt->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
@@ -193,13 +245,16 @@ nvkm_dp_train_cr(struct lt_state *lt)
{
bool cr_done = false, abort = false;
int voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
- int tries = 0, i;
+ int tries = 0, usec = 0, i;
nvkm_dp_train_pattern(lt, 1);
+ if (lt->dp->dpcd[DPCD_RC00_DPCD_REV] < 0x14 && !lt->repeater)
+ usec = (lt->dp->dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
+
do {
if (nvkm_dp_train_drive(lt, false) ||
- nvkm_dp_train_sense(lt, false, 100))
+ nvkm_dp_train_sense(lt, false, usec ? usec : 100))
break;
cr_done = true;
@@ -223,7 +278,7 @@ nvkm_dp_train_cr(struct lt_state *lt)
}
static int
-nvkm_dp_train_links(struct nvkm_dp *dp)
+nvkm_dp_train_links(struct nvkm_dp *dp, int rate)
{
struct nvkm_ior *ior = dp->outp.ior;
struct nvkm_disp *disp = dp->outp.disp;
@@ -233,13 +288,15 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
.dp = dp,
};
u32 lnkcmp;
- u8 sink[2];
+ u8 sink[2], data;
int ret;
OUTP_DBG(&dp->outp, "training %d x %d MB/s",
ior->dp.nr, ior->dp.bw * 27);
/* Intersect misc. capabilities of the OR and sink. */
+ if (disp->engine.subdev.device->chipset < 0x110)
+ dp->dpcd[DPCD_RC03] &= ~DPCD_RC03_TPS4_SUPPORTED;
if (disp->engine.subdev.device->chipset < 0xd0)
dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
@@ -287,8 +344,22 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
ior->func->dp.power(ior, ior->dp.nr);
+ /* Select LTTPR non-transparent mode if we have a valid configuration,
+ * use transparent mode otherwise.
+ */
+ if (dp->lttpr[0] >= 0x14) {
+ data = DPCD_LTTPR_MODE_TRANSPARENT;
+ nvkm_wraux(dp->aux, DPCD_LTTPR_MODE, &data, sizeof(data));
+
+ if (dp->lttprs) {
+ data = DPCD_LTTPR_MODE_NON_TRANSPARENT;
+ nvkm_wraux(dp->aux, DPCD_LTTPR_MODE, &data, sizeof(data));
+ lt.repeaters = dp->lttprs;
+ }
+ }
+
/* Set desired link configuration on the sink. */
- sink[0] = ior->dp.bw;
+ sink[0] = (dp->rate[rate].dpcd < 0) ? ior->dp.bw : 0;
sink[1] = ior->dp.nr;
if (ior->dp.ef)
sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
@@ -297,12 +368,33 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
if (ret)
return ret;
+ if (dp->rate[rate].dpcd >= 0) {
+ ret = nvkm_rdaux(dp->aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
+ if (ret)
+ return ret;
+
+ sink[0] &= ~DPCD_LC15_LINK_RATE_SET_MASK;
+ sink[0] |= dp->rate[rate].dpcd;
+
+ ret = nvkm_wraux(dp->aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
+ if (ret)
+ return ret;
+ }
+
/* Attempt to train the link in this configuration. */
- memset(lt.stat, 0x00, sizeof(lt.stat));
- ret = nvkm_dp_train_cr(&lt);
- if (ret == 0)
- ret = nvkm_dp_train_eq(&lt);
- nvkm_dp_train_pattern(&lt, 0);
+ for (lt.repeater = lt.repeaters; lt.repeater >= 0; lt.repeater--) {
+ if (lt.repeater)
+ OUTP_DBG(&dp->outp, "training LTTPR%d", lt.repeater);
+ else
+ OUTP_DBG(&dp->outp, "training sink");
+
+ memset(lt.stat, 0x00, sizeof(lt.stat));
+ ret = nvkm_dp_train_cr(&lt);
+ if (ret == 0)
+ ret = nvkm_dp_train_eq(&lt);
+ nvkm_dp_train_pattern(&lt, 0);
+ }
+
return ret;
}
@@ -345,63 +437,13 @@ nvkm_dp_train_init(struct nvkm_dp *dp)
}
}
-static const struct dp_rates {
- u32 rate;
- u8 bw;
- u8 nr;
-} nvkm_dp_rates[] = {
- { 2160000, 0x14, 4 },
- { 1080000, 0x0a, 4 },
- { 1080000, 0x14, 2 },
- { 648000, 0x06, 4 },
- { 540000, 0x0a, 2 },
- { 540000, 0x14, 1 },
- { 324000, 0x06, 2 },
- { 270000, 0x0a, 1 },
- { 162000, 0x06, 1 },
- {}
-};
-
static int
nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
{
struct nvkm_ior *ior = dp->outp.ior;
- const u8 sink_nr = dp->dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
- const u8 sink_bw = dp->dpcd[DPCD_RC01_MAX_LINK_RATE];
- const u8 outp_nr = dp->outp.info.dpconf.link_nr;
- const u8 outp_bw = dp->outp.info.dpconf.link_bw;
- const struct dp_rates *failsafe = NULL, *cfg;
- int ret = -EINVAL;
+ int ret = -EINVAL, nr, rate;
u8 pwr;
- /* Find the lowest configuration of the OR that can support
- * the required link rate.
- *
- * We will refuse to program the OR to lower rates, even if
- * link training fails at higher rates (or even if the sink
- * can't support the rate at all, though the DD is supposed
- * to prevent such situations from happening).
- *
- * Attempting to do so can cause the entire display to hang,
- * and it's better to have a failed modeset than that.
- */
- for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
- if (cfg->nr <= outp_nr && cfg->bw <= outp_bw) {
- /* Try to respect sink limits too when selecting
- * lowest link configuration.
- */
- if (!failsafe ||
- (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
- failsafe = cfg;
- }
-
- if (failsafe && cfg[1].rate < dataKBps)
- break;
- }
-
- if (WARN_ON(!failsafe))
- return ret;
-
/* Ensure sink is not in a low-power state. */
if (!nvkm_rdaux(dp->aux, DPCD_SC00, &pwr, 1)) {
if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
@@ -411,25 +453,22 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
}
}
+ ior->dp.mst = dp->lt.mst;
+ ior->dp.ef = dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
+ ior->dp.nr = 0;
+
/* Link training. */
- OUTP_DBG(&dp->outp, "training (min: %d x %d MB/s)",
- failsafe->nr, failsafe->bw * 27);
+ OUTP_DBG(&dp->outp, "training");
nvkm_dp_train_init(dp);
- for (cfg = nvkm_dp_rates; ret < 0 && cfg <= failsafe; cfg++) {
- /* Skip configurations not supported by both OR and sink. */
- if ((cfg->nr > outp_nr || cfg->bw > outp_bw ||
- cfg->nr > sink_nr || cfg->bw > sink_bw)) {
- if (cfg != failsafe)
- continue;
- OUTP_ERR(&dp->outp, "link rate unsupported by sink");
+ for (nr = dp->links; ret < 0 && nr; nr >>= 1) {
+ for (rate = 0; ret < 0 && rate < dp->rates; rate++) {
+ if (dp->rate[rate].rate * nr >= dataKBps || WARN_ON(!ior->dp.nr)) {
+ /* Program selected link configuration. */
+ ior->dp.bw = dp->rate[rate].rate / 27000;
+ ior->dp.nr = nr;
+ ret = nvkm_dp_train_links(dp, rate);
+ }
}
- ior->dp.mst = dp->lt.mst;
- ior->dp.ef = dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
- ior->dp.bw = cfg->bw;
- ior->dp.nr = cfg->nr;
-
- /* Program selected link configuration. */
- ret = nvkm_dp_train_links(dp);
}
nvkm_dp_train_fini(dp);
if (ret < 0)
@@ -527,6 +566,47 @@ done:
}
static bool
+nvkm_dp_enable_supported_link_rates(struct nvkm_dp *dp)
+{
+ u8 sink_rates[DPCD_RC10_SUPPORTED_LINK_RATES__SIZE];
+ int i, j, k;
+
+ if (dp->outp.conn->info.type != DCB_CONNECTOR_eDP ||
+ dp->dpcd[DPCD_RC00_DPCD_REV] < 0x13 ||
+ nvkm_rdaux(dp->aux, DPCD_RC10_SUPPORTED_LINK_RATES(0), sink_rates, sizeof(sink_rates)))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
+ const u32 rate = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
+
+ if (!rate || WARN_ON(dp->rates == ARRAY_SIZE(dp->rate)))
+ break;
+
+ if (rate > dp->outp.info.dpconf.link_bw * 27000) {
+ OUTP_DBG(&dp->outp, "rate %d !outp", rate);
+ continue;
+ }
+
+ for (j = 0; j < dp->rates; j++) {
+ if (rate > dp->rate[j].rate) {
+ for (k = dp->rates; k > j; k--)
+ dp->rate[k] = dp->rate[k - 1];
+ break;
+ }
+ }
+
+ dp->rate[j].dpcd = i / 2;
+ dp->rate[j].rate = rate;
+ dp->rates++;
+ }
+
+ for (i = 0; i < dp->rates; i++)
+ OUTP_DBG(&dp->outp, "link_rate[%d] = %d", dp->rate[i].dpcd, dp->rate[i].rate);
+
+ return dp->rates != 0;
+}
+
+static bool
nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
{
struct nvkm_i2c_aux *aux = dp->aux;
@@ -538,9 +618,60 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
dp->present = true;
}
- if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
- sizeof(dp->dpcd)))
+ /* Detect any LTTPRs before reading DPCD receiver caps. */
+ if (!nvkm_rdaux(aux, DPCD_LTTPR_REV, dp->lttpr, sizeof(dp->lttpr)) &&
+ dp->lttpr[0] >= 0x14 && dp->lttpr[2]) {
+ switch (dp->lttpr[2]) {
+ case 0x80: dp->lttprs = 1; break;
+ case 0x40: dp->lttprs = 2; break;
+ case 0x20: dp->lttprs = 3; break;
+ case 0x10: dp->lttprs = 4; break;
+ case 0x08: dp->lttprs = 5; break;
+ case 0x04: dp->lttprs = 6; break;
+ case 0x02: dp->lttprs = 7; break;
+ case 0x01: dp->lttprs = 8; break;
+ default:
+ /* Unknown LTTPR count, we'll switch to transparent mode. */
+ WARN_ON(1);
+ dp->lttprs = 0;
+ break;
+ }
+ } else {
+ /* No LTTPR support, or zero LTTPR count - don't touch it at all. */
+ memset(dp->lttpr, 0x00, sizeof(dp->lttpr));
+ }
+
+ if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd, sizeof(dp->dpcd))) {
+ const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
+ const u8 *rate;
+ int rate_max;
+
+ dp->rates = 0;
+ dp->links = dp->dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
+ dp->links = min(dp->links, dp->outp.info.dpconf.link_nr);
+ if (dp->lttprs && dp->lttpr[4])
+ dp->links = min_t(int, dp->links, dp->lttpr[4]);
+
+ rate_max = dp->dpcd[DPCD_RC01_MAX_LINK_RATE];
+ rate_max = min(rate_max, dp->outp.info.dpconf.link_bw);
+ if (dp->lttprs && dp->lttpr[1])
+ rate_max = min_t(int, rate_max, dp->lttpr[1]);
+
+ if (!nvkm_dp_enable_supported_link_rates(dp)) {
+ for (rate = rates; *rate; rate++) {
+ if (*rate <= rate_max) {
+ if (WARN_ON(dp->rates == ARRAY_SIZE(dp->rate)))
+ break;
+
+ dp->rate[dp->rates].dpcd = -1;
+ dp->rate[dp->rates].rate = *rate * 27000;
+ dp->rates++;
+ }
+ }
+ }
+
return true;
+ }
}
if (dp->present) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
index e484d0c3b0d4..8e59dd469da6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
@@ -9,10 +9,7 @@
#include <subdev/bios/dp.h>
struct nvkm_dp {
- union {
- struct nvkm_outp base;
- struct nvkm_outp outp;
- };
+ struct nvkm_outp outp;
struct nvbios_dpout info;
u8 version;
@@ -21,8 +18,17 @@ struct nvkm_dp {
struct nvkm_notify hpd;
bool present;
+ u8 lttpr[6];
+ u8 lttprs;
u8 dpcd[16];
+ struct {
+ int dpcd; /* -1, or index into SUPPORTED_LINK_RATES table */
+ u32 rate;
+ } rate[8];
+ int rates;
+ int links;
+
struct mutex mutex;
struct {
atomic_t done;
@@ -42,8 +48,12 @@ void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
#define DPCD_RC02_TPS3_SUPPORTED 0x40
#define DPCD_RC02_MAX_LANE_COUNT 0x1f
#define DPCD_RC03 0x00003
+#define DPCD_RC03_TPS4_SUPPORTED 0x80
#define DPCD_RC03_MAX_DOWNSPREAD 0x01
-#define DPCD_RC0E_AUX_RD_INTERVAL 0x0000e
+#define DPCD_RC0E 0x0000e
+#define DPCD_RC0E_AUX_RD_INTERVAL 0x7f
+#define DPCD_RC10_SUPPORTED_LINK_RATES(i) 0x00010
+#define DPCD_RC10_SUPPORTED_LINK_RATES__SIZE 16
/* DPCD Link Configuration */
#define DPCD_LC00_LINK_BW_SET 0x00100
@@ -51,7 +61,8 @@ void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
#define DPCD_LC01_ENHANCED_FRAME_EN 0x80
#define DPCD_LC01_LANE_COUNT_SET 0x1f
#define DPCD_LC02 0x00102
-#define DPCD_LC02_TRAINING_PATTERN_SET 0x03
+#define DPCD_LC02_TRAINING_PATTERN_SET 0x0f
+#define DPCD_LC02_SCRAMBLING_DISABLE 0x20
#define DPCD_LC03(l) ((l) + 0x00103)
#define DPCD_LC03_MAX_PRE_EMPHASIS_REACHED 0x20
#define DPCD_LC03_PRE_EMPHASIS_SET 0x18
@@ -67,6 +78,8 @@ void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
#define DPCD_LC10_LANE3_POST_CURSOR2_SET 0x30
#define DPCD_LC10_LANE2_MAX_POST_CURSOR2_REACHED 0x04
#define DPCD_LC10_LANE2_POST_CURSOR2_SET 0x03
+#define DPCD_LC15_LINK_RATE_SET 0x00115
+#define DPCD_LC15_LINK_RATE_SET_MASK 0x07
/* DPCD Link/Sink Status */
#define DPCD_LS02 0x00202
@@ -108,4 +121,14 @@ void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
#define DPCD_SC00_SET_POWER 0x03
#define DPCD_SC00_SET_POWER_D0 0x01
#define DPCD_SC00_SET_POWER_D3 0x03
+
+#define DPCD_LTTPR_REV 0xf0000
+#define DPCD_LTTPR_MODE 0xf0003
+#define DPCD_LTTPR_MODE_TRANSPARENT 0x55
+#define DPCD_LTTPR_MODE_NON_TRANSPARENT 0xaa
+#define DPCD_LTTPR_PATTERN_SET(i) ((i - 1) * 0x50 + 0xf0010)
+#define DPCD_LTTPR_LANE0_SET(i) ((i - 1) * 0x50 + 0xf0011)
+#define DPCD_LTTPR_AUX_RD_INTERVAL(i) ((i - 1) * 0x50 + 0xf0020)
+#define DPCD_LTTPR_LANE0_1_STATUS(i) ((i - 1) * 0x50 + 0xf0030)
+#define DPCD_LTTPR_LANE0_1_ADJUST(i) ((i - 1) * 0x50 + 0xf0033)
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 4d59d02525d9..56b8f4411988 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -77,7 +77,18 @@ g94_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 loff = nv50_sor_link(sor);
- nvkm_mask(device, 0x61c10c + loff, 0x0f000000, pattern << 24);
+ u32 data;
+
+ switch (pattern) {
+ case 0: data = 0x00001000; break;
+ case 1: data = 0x01000000; break;
+ case 2: data = 0x02000000; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ nvkm_mask(device, 0x61c10c + loff, 0x0f001000, data);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
index 033827de9116..d2c05f5c4aa0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
@@ -37,6 +37,10 @@ ga102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
case 0x0a: clksor |= 0x00040000; break;
case 0x14: clksor |= 0x00080000; break;
case 0x1e: clksor |= 0x000c0000; break;
+ case 0x08: clksor |= 0x00100000; break;
+ case 0x09: clksor |= 0x00140000; break;
+ case 0x0c: clksor |= 0x00180000; break;
+ case 0x10: clksor |= 0x001c0000; break;
default:
WARN_ON(1);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 3b3643fb1019..c431e0b9fc11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -92,7 +92,19 @@ gf119_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
- nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
+ u32 data;
+
+ switch (pattern) {
+ case 0: data = 0x10101010; break;
+ case 1: data = 0x01010101; break;
+ case 2: data = 0x02020202; break;
+ case 3: data = 0x03030303; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ nvkm_mask(device, 0x61c110 + soff, 0x1f1f1f1f, data);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index 38045c92197f..3696bfd3bfd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -28,11 +28,23 @@ gm107_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
- const u32 data = 0x01010101 * pattern;
+ u32 mask = 0x1f1f1f1f, data;
+
+ switch (pattern) {
+ case 0: data = 0x10101010; break;
+ case 1: data = 0x01010101; break;
+ case 2: data = 0x02020202; break;
+ case 3: data = 0x03030303; break;
+ case 4: data = 0x1b1b1b1b; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
if (sor->asy.link & 1)
- nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+ nvkm_mask(device, 0x61c110 + soff, mask, data);
else
- nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+ nvkm_mask(device, 0x61c12c + soff, mask, data);
}
static const struct nvkm_ior_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
index 667fa016496e..a6ea89a5d51a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
@@ -142,11 +142,12 @@ nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver,
hsfw->imem_size = desc->code_size;
hsfw->imem_tag = desc->start_tag;
- hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL);
- memcpy(hsfw->imem, data + desc->code_off, desc->code_size);
-
+ hsfw->imem = kmemdup(data + desc->code_off, desc->code_size, GFP_KERNEL);
nvkm_firmware_put(fw);
- return 0;
+ if (!hsfw->imem)
+ return -ENOMEM;
+ else
+ return 0;
}
int
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 57af3d97be77..ab3fc86e7256 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -93,6 +93,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
exp_info.size = omap_gem_mmap_size(obj);
exp_info.flags = flags;
exp_info.priv = obj;
+ exp_info.resv = obj->resv;
return drm_gem_dmabuf_export(obj->dev, &exp_info);
}
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 5fcbde789ddb..1be150ac758f 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -86,7 +86,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0x0F, 0x73),
_INIT_DCS_CMD(0x95, 0xE6),
_INIT_DCS_CMD(0x96, 0xF0),
- _INIT_DCS_CMD(0x30, 0x11),
+ _INIT_DCS_CMD(0x30, 0x00),
_INIT_DCS_CMD(0x6D, 0x66),
_INIT_DCS_CMD(0x75, 0xA2),
_INIT_DCS_CMD(0x77, 0x3B),
@@ -112,17 +112,17 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
- _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0),
+ _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xE0),
_INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
- _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0),
+ _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xE0),
_INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
- _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0),
+ _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xE0),
_INIT_DCS_CMD(0xFF, 0x24),
_INIT_DCS_CMD(0xFB, 0x01),
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 176ef0c3cc1d..a394a15dc3fb 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -36,8 +36,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_dp_aux_bus.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_aux_bus.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_panel.h>
/**
@@ -1745,6 +1745,19 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_200_500_e80_d50 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 80,
+ .disable = 50,
+};
+
+static const struct panel_delay delay_100_500_e200 = {
+ .hpd_absent = 100,
+ .unprepare = 500,
+ .enable = 200,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.name = _name, \
@@ -1768,13 +1781,17 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+ EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
+
{ /* sentinal */ }
};
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 221db6512859..20666b6217e7 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -14,8 +14,8 @@
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
-#include <drm/drm_dp_aux_bus.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_aux_bus.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9e46db5e359c..0c8786ebffd1 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2525,6 +2525,36 @@ static const struct panel_desc mitsubishi_aa070mc01 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
+static const struct display_timing multi_inno_mi0700s4t_6_timing = {
+ .pixelclock = { 29000000, 33000000, 38000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 180, 210, 240 },
+ .hback_porch = { 16, 16, 16 },
+ .hsync_len = { 30, 30, 30 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 12, 22, 32 },
+ .vback_porch = { 10, 10, 10 },
+ .vsync_len = { 13, 13, 13 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc multi_inno_mi0700s4t_6 = {
+ .timings = &multi_inno_mi0700s4t_6_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct display_timing multi_inno_mi1010ait_1cp_timing = {
.pixelclock = { 68900000, 70000000, 73400000 },
.hactive = { 1280, 1280, 1280 },
@@ -3872,6 +3902,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "mitsubishi,aa070mc01-ca1",
.data = &mitsubishi_aa070mc01,
}, {
+ .compatible = "multi-inno,mi0700s4t-6",
+ .data = &multi_inno_mi0700s4t_6,
+ }, {
.compatible = "multi-inno,mi1010ait-1cp",
.data = &multi_inno_mi1010ait_1cp,
}, {
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
index 5056777c7744..34f2bae1ec8c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_features.h
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -12,24 +12,6 @@ enum panfrost_hw_feature {
HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
HW_FEATURE_XAFFINITY,
- HW_FEATURE_OUT_OF_ORDER_EXEC,
- HW_FEATURE_MRT,
- HW_FEATURE_BRNDOUT_CC,
- HW_FEATURE_INTERPIPE_REG_ALIASING,
- HW_FEATURE_LD_ST_TILEBUFFER,
- HW_FEATURE_MSAA_16X,
- HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
- HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
- HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
- HW_FEATURE_T7XX_PAIRING_RULES,
- HW_FEATURE_LD_ST_LEA_TEX,
- HW_FEATURE_LINEAR_FILTER_FLOAT,
- HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
- HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS,
- HW_FEATURE_TEST4_DATUM_MODE,
- HW_FEATURE_NEXT_INSTRUCTION_TYPE,
- HW_FEATURE_BRNDOUT_KILL,
- HW_FEATURE_WARPING,
HW_FEATURE_V4,
HW_FEATURE_FLUSH_REDUCTION,
HW_FEATURE_PROTECTED_MODE,
@@ -42,128 +24,31 @@ enum panfrost_hw_feature {
};
#define hw_features_t600 (\
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_V4))
-#define hw_features_t620 (\
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
- BIT_ULL(HW_FEATURE_V4))
-
-#define hw_features_t720 (\
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
- BIT_ULL(HW_FEATURE_OPTIMIZED_COVERAGE_MASK) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
- BIT_ULL(HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4) | \
- BIT_ULL(HW_FEATURE_WARPING) | \
- BIT_ULL(HW_FEATURE_V4))
+#define hw_features_t620 hw_features_t600
+#define hw_features_t720 hw_features_t600
#define hw_features_t760 (\
BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
BIT_ULL(HW_FEATURE_XAFFINITY) | \
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_MRT) | \
- BIT_ULL(HW_FEATURE_MSAA_16X) | \
- BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
-// T860
-#define hw_features_t860 (\
- BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
- BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
- BIT_ULL(HW_FEATURE_XAFFINITY) | \
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_MRT) | \
- BIT_ULL(HW_FEATURE_MSAA_16X) | \
- BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
- BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
- BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+#define hw_features_t860 hw_features_t760
-#define hw_features_t880 hw_features_t860
+#define hw_features_t880 hw_features_t760
-#define hw_features_t830 (\
- BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
- BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
- BIT_ULL(HW_FEATURE_XAFFINITY) | \
- BIT_ULL(HW_FEATURE_WARPING) | \
- BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_MRT) | \
- BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
- BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
- BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+#define hw_features_t830 hw_features_t760
-#define hw_features_t820 (\
- BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
- BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
- BIT_ULL(HW_FEATURE_XAFFINITY) | \
- BIT_ULL(HW_FEATURE_WARPING) | \
- BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_MRT) | \
- BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
- BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
- BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+#define hw_features_t820 hw_features_t760
#define hw_features_g71 (\
BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
BIT_ULL(HW_FEATURE_XAFFINITY) | \
- BIT_ULL(HW_FEATURE_WARPING) | \
- BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_MRT) | \
- BIT_ULL(HW_FEATURE_MSAA_16X) | \
- BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
- BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
@@ -173,71 +58,18 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
BIT_ULL(HW_FEATURE_XAFFINITY) | \
- BIT_ULL(HW_FEATURE_WARPING) | \
- BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_MRT) | \
- BIT_ULL(HW_FEATURE_MSAA_16X) | \
- BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
- BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
-#define hw_features_g51 (\
- BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
- BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
- BIT_ULL(HW_FEATURE_XAFFINITY) | \
- BIT_ULL(HW_FEATURE_WARPING) | \
- BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_MRT) | \
- BIT_ULL(HW_FEATURE_MSAA_16X) | \
- BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
- BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
- BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
- BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
- BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
- BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
- BIT_ULL(HW_FEATURE_COHERENCY_REG))
+#define hw_features_g51 hw_features_g72
#define hw_features_g52 (\
BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
BIT_ULL(HW_FEATURE_XAFFINITY) | \
- BIT_ULL(HW_FEATURE_WARPING) | \
- BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_MRT) | \
- BIT_ULL(HW_FEATURE_MSAA_16X) | \
- BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
- BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
@@ -248,21 +80,6 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
BIT_ULL(HW_FEATURE_XAFFINITY) | \
- BIT_ULL(HW_FEATURE_WARPING) | \
- BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_MRT) | \
- BIT_ULL(HW_FEATURE_MSAA_16X) | \
- BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
- BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
@@ -276,21 +93,6 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
BIT_ULL(HW_FEATURE_XAFFINITY) | \
- BIT_ULL(HW_FEATURE_WARPING) | \
- BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
- BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
- BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
- BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
- BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
- BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
- BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
- BIT_ULL(HW_FEATURE_MRT) | \
- BIT_ULL(HW_FEATURE_MSAA_16X) | \
- BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
- BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
- BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
- BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index bbe628b306ee..15cec831a99a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -320,19 +320,36 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
{
int ret;
u32 val;
+ u64 core_mask = U64_MAX;
panfrost_gpu_init_quirks(pfdev);
- /* Just turn on everything for now */
- gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
+ if (pfdev->features.l2_present != 1) {
+ /*
+ * Only support one core group now.
+ * ~(l2_present - 1) unsets all bits in l2_present except
+ * the bottom bit. (l2_present - 2) has all the bits in
+ * the first core group set. AND them together to generate
+ * a mask of cores in the first core group.
+ */
+ core_mask = ~(pfdev->features.l2_present - 1) &
+ (pfdev->features.l2_present - 2);
+ dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
+ hweight64(core_mask),
+ hweight64(pfdev->features.shader_present));
+ }
+ gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
- val, val == pfdev->features.l2_present, 100, 20000);
+ val, val == (pfdev->features.l2_present & core_mask),
+ 100, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu L2");
- gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
+ gpu_write(pfdev, SHADER_PWRON_LO,
+ pfdev->features.shader_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
- val, val == pfdev->features.shader_present, 100, 20000);
+ val, val == (pfdev->features.shader_present & core_mask),
+ 100, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu shader");
@@ -360,8 +377,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
panfrost_gpu_init_features(pfdev);
- dma_set_mask_and_coherent(pfdev->dev,
+ err = dma_set_mask_and_coherent(pfdev->dev,
DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
+ if (err)
+ return err;
+
dma_set_max_seg_size(pfdev->dev, UINT_MAX);
irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 4c1e551d9714..4798cf23d251 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -30,7 +30,7 @@
#include "atom.h"
#include "atom-bits.h"
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 607ad5620bd9..a7925a8290b2 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -27,7 +27,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -204,7 +204,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
/* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
- if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
+ if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) &&
(mode_clock * 5/4 <= max_tmds_clock))
bpc = 10;
else
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 751c2c075e09..9f26baf7adb0 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: MIT
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index fe16f140a6b4..5288dc7a4897 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -33,8 +33,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
#include <linux/i2c.h>
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 11b21d605584..0d1283cdc8fb 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -802,7 +802,7 @@ static int radeon_mm_vram_dump_table_show(struct seq_file *m, void *unused)
TTM_PL_VRAM);
struct drm_printer p = drm_seq_file_printer(m);
- man->func->debug(man, &p);
+ ttm_resource_manager_debug(man, &p);
return 0;
}
@@ -820,7 +820,7 @@ static int radeon_mm_gtt_dump_table_show(struct seq_file *m, void *unused)
TTM_PL_TT);
struct drm_printer p = drm_seq_file_printer(m);
- man->func->debug(man, &p);
+ ttm_resource_manager_debug(man, &p);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 9f1ecefc3933..fa5cfda4e90e 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,11 +2,13 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
+ select DRM_DP_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
+ select DRM_DP_HELPER if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 8abb5ac26807..c82901d9a9cc 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -22,7 +22,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/bridge/analogix_dp.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -117,7 +117,7 @@ static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data,
{
struct drm_display_info *di = &connector->display_info;
/* VOP couldn't output YUV video format for eDP rightly */
- u32 mask = DRM_COLOR_FORMAT_YCRCB444 | DRM_COLOR_FORMAT_YCRCB422;
+ u32 mask = DRM_COLOR_FORMAT_YCBCR444 | DRM_COLOR_FORMAT_YCBCR422;
if ((di->color_formats & mask)) {
DRM_DEBUG_KMS("Swapping display color format from YUV to RGB\n");
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 16497c31d9f9..4740cc14beb8 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -16,7 +16,7 @@
#include <sound/hdmi-codec.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 81ac9b658a70..0d044146f4e9 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -7,7 +7,7 @@
#ifndef _CDN_DP_CORE_H
#define _CDN_DP_CORE_H
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index be74c87a8be4..0b972418067e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -20,7 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 09be9678f2bd..2494b079489d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -11,7 +11,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
index 6b4759ed6bfd..fc1deb1231a2 100644
--- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
@@ -7,10 +7,10 @@
#include <linux/random.h>
-#include <drm/drm_dp_mst_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_print.h>
-#include "../drm_dp_mst_topology_internal.h"
+#include "../dp/drm_dp_mst_topology_internal.h"
#include "test-drm_modeset_common.h"
int igt_dp_mst_calc_pbn_mode(void *ignored)
@@ -131,8 +131,10 @@ sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
return false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
- if (!txmsg)
+ if (!txmsg) {
+ kfree(out);
return false;
+ }
drm_dp_encode_sideband_req(in, txmsg);
ret = drm_dp_decode_sideband_req(txmsg, out);
diff --git a/drivers/gpu/drm/selftests/test-drm_plane_helper.c b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
index 0a9553f51796..ceebeede55ea 100644
--- a/drivers/gpu/drm/selftests/test-drm_plane_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
@@ -87,11 +87,15 @@ int igt_check_plane_state(void *ignored)
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
},
};
+ struct drm_plane plane = {
+ .dev = NULL
+ };
struct drm_framebuffer fb = {
.width = 2048,
.height = 2048
};
struct drm_plane_state plane_state = {
+ .plane = &plane,
.crtc = ZERO_SIZE_PTR,
.fb = &fb,
.rotation = DRM_MODE_ROTATE_0
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 222869b232ae..9f441aadf2d5 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -14,6 +14,7 @@
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
@@ -183,6 +184,10 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
DRM_DEBUG("%s\n", __func__);
+ ret = drm_aperture_remove_framebuffers(false, &drv_driver);
+ if (ret)
+ return ret;
+
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
ddev = drm_dev_alloc(&drv_driver, dev);
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 32cb41b2202f..89897d5f5c72 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -247,14 +247,6 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
int ret, bpp;
u32 val;
- /* Update lane capabilities according to hw version */
- dsi->lane_min_kbps = LANE_MIN_KBPS;
- dsi->lane_max_kbps = LANE_MAX_KBPS;
- if (dsi->hw_version == HWVER_131) {
- dsi->lane_min_kbps *= 2;
- dsi->lane_max_kbps *= 2;
- }
-
pll_in_khz = (unsigned int)(clk_get_rate(dsi->pllref_clk) / 1000);
/* Compute requested pll out */
@@ -330,6 +322,103 @@ dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
return 0;
}
+#define CLK_TOLERANCE_HZ 50
+
+static enum drm_mode_status
+dw_mipi_dsi_stm_mode_valid(void *priv_data,
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format)
+{
+ struct dw_mipi_dsi_stm *dsi = priv_data;
+ unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz;
+ int ret, bpp;
+
+ bpp = mipi_dsi_pixel_format_to_bpp(format);
+ if (bpp < 0)
+ return MODE_BAD;
+
+ /* Compute requested pll out */
+ pll_out_khz = mode->clock * bpp / lanes;
+
+ if (pll_out_khz > dsi->lane_max_kbps)
+ return MODE_CLOCK_HIGH;
+
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ /* Add 20% to pll out to be higher than pixel bw */
+ pll_out_khz = (pll_out_khz * 12) / 10;
+ } else {
+ if (pll_out_khz < dsi->lane_min_kbps)
+ return MODE_CLOCK_LOW;
+ }
+
+ /* Compute best pll parameters */
+ idf = 0;
+ ndiv = 0;
+ odf = 0;
+ pll_in_khz = clk_get_rate(dsi->pllref_clk) / 1000;
+ ret = dsi_pll_get_params(dsi, pll_in_khz, pll_out_khz, &idf, &ndiv, &odf);
+ if (ret) {
+ DRM_WARN("Warning dsi_pll_get_params(): bad params\n");
+ return MODE_ERROR;
+ }
+
+ if (!(mode_flags & MIPI_DSI_MODE_VIDEO_BURST)) {
+ unsigned int px_clock_hz, target_px_clock_hz, lane_mbps;
+ int dsi_short_packet_size_px, hfp, hsync, hbp, delay_to_lp;
+ struct dw_mipi_dsi_dphy_timing dphy_timing;
+
+ /* Get the adjusted pll out value */
+ pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf);
+
+ px_clock_hz = DIV_ROUND_CLOSEST_ULL(1000ULL * pll_out_khz * lanes, bpp);
+ target_px_clock_hz = mode->clock * 1000;
+ /*
+ * Filter modes according to the clock value, particularly useful for
+ * hdmi modes that require precise pixel clocks.
+ */
+ if (px_clock_hz < target_px_clock_hz - CLK_TOLERANCE_HZ ||
+ px_clock_hz > target_px_clock_hz + CLK_TOLERANCE_HZ)
+ return MODE_CLOCK_RANGE;
+
+ /* sync packets are codes as DSI short packets (4 bytes) */
+ dsi_short_packet_size_px = DIV_ROUND_UP(4 * BITS_PER_BYTE, bpp);
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsync = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ /* hsync must be longer than 4 bytes HSS packets */
+ if (hsync < dsi_short_packet_size_px)
+ return MODE_HSYNC_NARROW;
+
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ /* HBP must be longer than 4 bytes HSE packets */
+ if (hbp < dsi_short_packet_size_px)
+ return MODE_HSYNC_NARROW;
+ hbp -= dsi_short_packet_size_px;
+ } else {
+ /* With sync events HBP extends in the hsync */
+ hbp += hsync - dsi_short_packet_size_px;
+ }
+
+ lane_mbps = pll_out_khz / 1000;
+ ret = dw_mipi_dsi_phy_get_timing(priv_data, lane_mbps, &dphy_timing);
+ if (ret)
+ return MODE_ERROR;
+ /*
+ * In non-burst mode DSI has to enter in LP during HFP
+ * (horizontal front porch) or HBP (horizontal back porch) to
+ * resync with LTDC pixel clock.
+ */
+ delay_to_lp = DIV_ROUND_UP((dphy_timing.data_hs2lp + dphy_timing.data_lp2hs) *
+ lanes * BITS_PER_BYTE, bpp);
+ if (hfp < delay_to_lp && hbp < delay_to_lp)
+ return MODE_HSYNC;
+ }
+
+ return MODE_OK;
+}
+
static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_stm_phy_ops = {
.init = dw_mipi_dsi_phy_init,
.power_on = dw_mipi_dsi_phy_power_on,
@@ -340,6 +429,7 @@ static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_stm_phy_ops = {
static struct dw_mipi_dsi_plat_data dw_mipi_dsi_stm_plat_data = {
.max_data_lanes = 2,
+ .mode_valid = dw_mipi_dsi_stm_mode_valid,
.phy_ops = &dw_mipi_dsi_stm_phy_ops,
};
@@ -417,6 +507,14 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
goto err_dsi_probe;
}
+ /* set lane capabilities according to hw version */
+ dsi->lane_min_kbps = LANE_MIN_KBPS;
+ dsi->lane_max_kbps = LANE_MAX_KBPS;
+ if (dsi->hw_version == HWVER_131) {
+ dsi->lane_min_kbps *= 2;
+ dsi->lane_max_kbps *= 2;
+ }
+
dw_mipi_dsi_stm_plat_data.base = dsi->base;
dw_mipi_dsi_stm_plat_data.priv_data = dsi;
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index dbdee954692a..5eeb32c9c9ce 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -18,6 +18,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/drm_atomic.h>
@@ -46,15 +47,15 @@
#define HWVER_10200 0x010200
#define HWVER_10300 0x010300
#define HWVER_20101 0x020101
+#define HWVER_40100 0x040100
/*
* The address of some registers depends on the HW version: such registers have
- * an extra offset specified with reg_ofs.
+ * an extra offset specified with layer_ofs.
*/
-#define REG_OFS_NONE 0
-#define REG_OFS_4 4 /* Insertion of "Layer Conf. 2" reg */
-#define REG_OFS (ldev->caps.reg_ofs)
-#define LAY_OFS 0x80 /* Register Offset between 2 layers */
+#define LAY_OFS_0 0x80
+#define LAY_OFS_1 0x100
+#define LAY_OFS (ldev->caps.layer_ofs)
/* Global register offsets */
#define LTDC_IDR 0x0000 /* IDentification */
@@ -75,29 +76,35 @@
#define LTDC_LIPCR 0x0040 /* Line Interrupt Position Conf. */
#define LTDC_CPSR 0x0044 /* Current Position Status */
#define LTDC_CDSR 0x0048 /* Current Display Status */
+#define LTDC_EDCR 0x0060 /* External Display Control */
+#define LTDC_FUT 0x0090 /* Fifo underrun Threshold */
/* Layer register offsets */
-#define LTDC_L1LC1R (0x80) /* L1 Layer Configuration 1 */
-#define LTDC_L1LC2R (0x84) /* L1 Layer Configuration 2 */
-#define LTDC_L1CR (0x84 + REG_OFS)/* L1 Control */
-#define LTDC_L1WHPCR (0x88 + REG_OFS)/* L1 Window Hor Position Config */
-#define LTDC_L1WVPCR (0x8C + REG_OFS)/* L1 Window Vert Position Config */
-#define LTDC_L1CKCR (0x90 + REG_OFS)/* L1 Color Keying Configuration */
-#define LTDC_L1PFCR (0x94 + REG_OFS)/* L1 Pixel Format Configuration */
-#define LTDC_L1CACR (0x98 + REG_OFS)/* L1 Constant Alpha Config */
-#define LTDC_L1DCCR (0x9C + REG_OFS)/* L1 Default Color Configuration */
-#define LTDC_L1BFCR (0xA0 + REG_OFS)/* L1 Blend Factors Configuration */
-#define LTDC_L1FBBCR (0xA4 + REG_OFS)/* L1 FrameBuffer Bus Control */
-#define LTDC_L1AFBCR (0xA8 + REG_OFS)/* L1 AuxFB Control */
-#define LTDC_L1CFBAR (0xAC + REG_OFS)/* L1 Color FrameBuffer Address */
-#define LTDC_L1CFBLR (0xB0 + REG_OFS)/* L1 Color FrameBuffer Length */
-#define LTDC_L1CFBLNR (0xB4 + REG_OFS)/* L1 Color FrameBuffer Line Nb */
-#define LTDC_L1AFBAR (0xB8 + REG_OFS)/* L1 AuxFB Address */
-#define LTDC_L1AFBLR (0xBC + REG_OFS)/* L1 AuxFB Length */
-#define LTDC_L1AFBLNR (0xC0 + REG_OFS)/* L1 AuxFB Line Number */
-#define LTDC_L1CLUTWR (0xC4 + REG_OFS)/* L1 CLUT Write */
-#define LTDC_L1YS1R (0xE0 + REG_OFS)/* L1 YCbCr Scale 1 */
-#define LTDC_L1YS2R (0xE4 + REG_OFS)/* L1 YCbCr Scale 2 */
+#define LTDC_L1C0R (ldev->caps.layer_regs[0]) /* L1 configuration 0 */
+#define LTDC_L1C1R (ldev->caps.layer_regs[1]) /* L1 configuration 1 */
+#define LTDC_L1RCR (ldev->caps.layer_regs[2]) /* L1 reload control */
+#define LTDC_L1CR (ldev->caps.layer_regs[3]) /* L1 control register */
+#define LTDC_L1WHPCR (ldev->caps.layer_regs[4]) /* L1 window horizontal position configuration */
+#define LTDC_L1WVPCR (ldev->caps.layer_regs[5]) /* L1 window vertical position configuration */
+#define LTDC_L1CKCR (ldev->caps.layer_regs[6]) /* L1 color keying configuration */
+#define LTDC_L1PFCR (ldev->caps.layer_regs[7]) /* L1 pixel format configuration */
+#define LTDC_L1CACR (ldev->caps.layer_regs[8]) /* L1 constant alpha configuration */
+#define LTDC_L1DCCR (ldev->caps.layer_regs[9]) /* L1 default color configuration */
+#define LTDC_L1BFCR (ldev->caps.layer_regs[10]) /* L1 blending factors configuration */
+#define LTDC_L1BLCR (ldev->caps.layer_regs[11]) /* L1 burst length configuration */
+#define LTDC_L1PCR (ldev->caps.layer_regs[12]) /* L1 planar configuration */
+#define LTDC_L1CFBAR (ldev->caps.layer_regs[13]) /* L1 color frame buffer address */
+#define LTDC_L1CFBLR (ldev->caps.layer_regs[14]) /* L1 color frame buffer length */
+#define LTDC_L1CFBLNR (ldev->caps.layer_regs[15]) /* L1 color frame buffer line number */
+#define LTDC_L1AFBA0R (ldev->caps.layer_regs[16]) /* L1 auxiliary frame buffer address 0 */
+#define LTDC_L1AFBA1R (ldev->caps.layer_regs[17]) /* L1 auxiliary frame buffer address 1 */
+#define LTDC_L1AFBLR (ldev->caps.layer_regs[18]) /* L1 auxiliary frame buffer length */
+#define LTDC_L1AFBLNR (ldev->caps.layer_regs[19]) /* L1 auxiliary frame buffer line number */
+#define LTDC_L1CLUTWR (ldev->caps.layer_regs[20]) /* L1 CLUT write */
+#define LTDC_L1CYR0R (ldev->caps.layer_regs[21]) /* L1 Conversion YCbCr RGB 0 */
+#define LTDC_L1CYR1R (ldev->caps.layer_regs[22]) /* L1 Conversion YCbCr RGB 1 */
+#define LTDC_L1FPF0R (ldev->caps.layer_regs[23]) /* L1 Flexible Pixel Format 0 */
+#define LTDC_L1FPF1R (ldev->caps.layer_regs[24]) /* L1 Flexible Pixel Format 1 */
/* Bit definitions */
#define SSCR_VSH GENMASK(10, 0) /* Vertical Synchronization Height */
@@ -164,6 +171,10 @@
#define ISR_TERRIF BIT(2) /* Transfer ERRor Interrupt Flag */
#define ISR_RRIF BIT(3) /* Register Reload Interrupt Flag */
+#define EDCR_OCYEN BIT(25) /* Output Conversion to YCbCr 422: ENable */
+#define EDCR_OCYSEL BIT(26) /* Output Conversion to YCbCr 422: SELection of the CCIR */
+#define EDCR_OCYCO BIT(27) /* Output Conversion to YCbCr 422: Chrominance Order */
+
#define LXCR_LEN BIT(0) /* Layer ENable */
#define LXCR_COLKEN BIT(1) /* Color Keying Enable */
#define LXCR_CLUTEN BIT(4) /* Color Look-Up Table ENable */
@@ -175,6 +186,7 @@
#define LXWVPCR_WVSPPOS GENMASK(26, 16) /* Window Vertical StoP POSition */
#define LXPFCR_PF GENMASK(2, 0) /* Pixel Format */
+#define PF_FLEXIBLE 0x7 /* Flexible Pixel Format selected */
#define LXCACR_CONSTA GENMASK(7, 0) /* CONSTant Alpha */
@@ -186,6 +198,25 @@
#define LXCFBLNR_CFBLN GENMASK(10, 0) /* Color Frame Buffer Line Number */
+#define LXCR_C1R_YIA BIT(0) /* Ycbcr 422 Interleaved Ability */
+#define LXCR_C1R_YSPA BIT(1) /* Ycbcr 420 Semi-Planar Ability */
+#define LXCR_C1R_YFPA BIT(2) /* Ycbcr 420 Full-Planar Ability */
+#define LXCR_C1R_SCA BIT(31) /* SCaling Ability*/
+
+#define LxPCR_YREN BIT(9) /* Y Rescale Enable for the color dynamic range */
+#define LxPCR_OF BIT(8) /* Odd pixel First */
+#define LxPCR_CBF BIT(7) /* CB component First */
+#define LxPCR_YF BIT(6) /* Y component First */
+#define LxPCR_YCM GENMASK(5, 4) /* Ycbcr Conversion Mode */
+#define YCM_I 0x0 /* Interleaved 422 */
+#define YCM_SP 0x1 /* Semi-Planar 420 */
+#define YCM_FP 0x2 /* Full-Planar 420 */
+#define LxPCR_YCEN BIT(3) /* YCbCr-to-RGB Conversion Enable */
+
+#define LXRCR_IMR BIT(0) /* IMmediate Reload */
+#define LXRCR_VBR BIT(1) /* Vertical Blanking Reload */
+#define LXRCR_GRMSK BIT(2) /* Global (centralized) Reload MaSKed */
+
#define CLUT_SIZE 256
#define CONSTA_MAX 0xFF /* CONSTant Alpha MAX= 1.0 */
@@ -201,8 +232,12 @@ enum ltdc_pix_fmt {
/* RGB formats */
PF_ARGB8888, /* ARGB [32 bits] */
PF_RGBA8888, /* RGBA [32 bits] */
+ PF_ABGR8888, /* ABGR [32 bits] */
+ PF_BGRA8888, /* BGRA [32 bits] */
PF_RGB888, /* RGB [24 bits] */
+ PF_BGR888, /* BGR [24 bits] */
PF_RGB565, /* RGB [16 bits] */
+ PF_BGR565, /* BGR [16 bits] */
PF_ARGB1555, /* ARGB A:1 bit RGB:15 bits [16 bits] */
PF_ARGB4444, /* ARGB A:4 bits R/G/B: 4 bits each [16 bits] */
/* Indexed formats */
@@ -234,36 +269,198 @@ static const enum ltdc_pix_fmt ltdc_pix_fmt_a1[NB_PF] = {
PF_ARGB4444 /* 0x07 */
};
-static const u64 ltdc_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const enum ltdc_pix_fmt ltdc_pix_fmt_a2[NB_PF] = {
+ PF_ARGB8888, /* 0x00 */
+ PF_ABGR8888, /* 0x01 */
+ PF_RGBA8888, /* 0x02 */
+ PF_BGRA8888, /* 0x03 */
+ PF_RGB565, /* 0x04 */
+ PF_BGR565, /* 0x05 */
+ PF_RGB888, /* 0x06 */
+ PF_NONE /* 0x07 */
};
-static inline u32 reg_read(void __iomem *base, u32 reg)
-{
- return readl_relaxed(base + reg);
-}
+static const u32 ltdc_drm_fmt_a0[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_C8
+};
-static inline void reg_write(void __iomem *base, u32 reg, u32 val)
-{
- writel_relaxed(val, base + reg);
-}
+static const u32 ltdc_drm_fmt_a1[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_C8
+};
-static inline void reg_set(void __iomem *base, u32 reg, u32 mask)
-{
- reg_write(base, reg, reg_read(base, reg) | mask);
-}
+static const u32 ltdc_drm_fmt_a2[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_C8
+};
-static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
-{
- reg_write(base, reg, reg_read(base, reg) & ~mask);
-}
+static const u32 ltdc_drm_fmt_ycbcr_cp[] = {
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY
+};
-static inline void reg_update_bits(void __iomem *base, u32 reg, u32 mask,
- u32 val)
-{
- reg_write(base, reg, (reg_read(base, reg) & ~mask) | val);
-}
+static const u32 ltdc_drm_fmt_ycbcr_sp[] = {
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21
+};
+
+static const u32 ltdc_drm_fmt_ycbcr_fp[] = {
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420
+};
+
+/* Layer register offsets */
+static const u32 ltdc_layer_regs_a0[] = {
+ 0x80, /* L1 configuration 0 */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x84, /* L1 control register */
+ 0x88, /* L1 window horizontal position configuration */
+ 0x8c, /* L1 window vertical position configuration */
+ 0x90, /* L1 color keying configuration */
+ 0x94, /* L1 pixel format configuration */
+ 0x98, /* L1 constant alpha configuration */
+ 0x9c, /* L1 default color configuration */
+ 0xa0, /* L1 blending factors configuration */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0xac, /* L1 color frame buffer address */
+ 0xb0, /* L1 color frame buffer length */
+ 0xb4, /* L1 color frame buffer line number */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0xc4, /* L1 CLUT write */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00 /* not available */
+};
+
+static const u32 ltdc_layer_regs_a1[] = {
+ 0x80, /* L1 configuration 0 */
+ 0x84, /* L1 configuration 1 */
+ 0x00, /* L1 reload control */
+ 0x88, /* L1 control register */
+ 0x8c, /* L1 window horizontal position configuration */
+ 0x90, /* L1 window vertical position configuration */
+ 0x94, /* L1 color keying configuration */
+ 0x98, /* L1 pixel format configuration */
+ 0x9c, /* L1 constant alpha configuration */
+ 0xa0, /* L1 default color configuration */
+ 0xa4, /* L1 blending factors configuration */
+ 0xa8, /* L1 burst length configuration */
+ 0x00, /* not available */
+ 0xac, /* L1 color frame buffer address */
+ 0xb0, /* L1 color frame buffer length */
+ 0xb4, /* L1 color frame buffer line number */
+ 0xb8, /* L1 auxiliary frame buffer address 0 */
+ 0xbc, /* L1 auxiliary frame buffer address 1 */
+ 0xc0, /* L1 auxiliary frame buffer length */
+ 0xc4, /* L1 auxiliary frame buffer line number */
+ 0xc8, /* L1 CLUT write */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00, /* not available */
+ 0x00 /* not available */
+};
+
+static const u32 ltdc_layer_regs_a2[] = {
+ 0x100, /* L1 configuration 0 */
+ 0x104, /* L1 configuration 1 */
+ 0x108, /* L1 reload control */
+ 0x10c, /* L1 control register */
+ 0x110, /* L1 window horizontal position configuration */
+ 0x114, /* L1 window vertical position configuration */
+ 0x118, /* L1 color keying configuration */
+ 0x11c, /* L1 pixel format configuration */
+ 0x120, /* L1 constant alpha configuration */
+ 0x124, /* L1 default color configuration */
+ 0x128, /* L1 blending factors configuration */
+ 0x12c, /* L1 burst length configuration */
+ 0x130, /* L1 planar configuration */
+ 0x134, /* L1 color frame buffer address */
+ 0x138, /* L1 color frame buffer length */
+ 0x13c, /* L1 color frame buffer line number */
+ 0x140, /* L1 auxiliary frame buffer address 0 */
+ 0x144, /* L1 auxiliary frame buffer address 1 */
+ 0x148, /* L1 auxiliary frame buffer length */
+ 0x14c, /* L1 auxiliary frame buffer line number */
+ 0x150, /* L1 CLUT write */
+ 0x16c, /* L1 Conversion YCbCr RGB 0 */
+ 0x170, /* L1 Conversion YCbCr RGB 1 */
+ 0x174, /* L1 Flexible Pixel Format 0 */
+ 0x178 /* L1 Flexible Pixel Format 1 */
+};
+
+static const u64 ltdc_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const struct regmap_config stm32_ltdc_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 0x400,
+ .use_relaxed_mmio = true,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const u32 ltdc_ycbcr2rgb_coeffs[DRM_COLOR_ENCODING_MAX][DRM_COLOR_RANGE_MAX][2] = {
+ [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 0x02040199, /* (b_cb = 516 / r_cr = 409) */
+ 0x006400D0 /* (g_cb = 100 / g_cr = 208) */
+ },
+ [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 0x01C60167, /* (b_cb = 454 / r_cr = 359) */
+ 0x005800B7 /* (g_cb = 88 / g_cr = 183) */
+ },
+ [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 0x021D01CB, /* (b_cb = 541 / r_cr = 459) */
+ 0x00370089 /* (g_cb = 55 / g_cr = 137) */
+ },
+ [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 0x01DB0193, /* (b_cb = 475 / r_cr = 403) */
+ 0x00300078 /* (g_cb = 48 / g_cr = 120) */
+ }
+ /* BT2020 not supported */
+};
static inline struct ltdc_device *crtc_to_ltdc(struct drm_crtc *crtc)
{
@@ -289,16 +486,30 @@ static inline enum ltdc_pix_fmt to_ltdc_pixelformat(u32 drm_fmt)
case DRM_FORMAT_XRGB8888:
pf = PF_ARGB8888;
break;
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XBGR8888:
+ pf = PF_ABGR8888;
+ break;
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_RGBX8888:
pf = PF_RGBA8888;
break;
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_BGRX8888:
+ pf = PF_BGRA8888;
+ break;
case DRM_FORMAT_RGB888:
pf = PF_RGB888;
break;
+ case DRM_FORMAT_BGR888:
+ pf = PF_BGR888;
+ break;
case DRM_FORMAT_RGB565:
pf = PF_RGB565;
break;
+ case DRM_FORMAT_BGR565:
+ pf = PF_BGR565;
+ break;
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
pf = PF_ARGB1555;
@@ -319,49 +530,138 @@ static inline enum ltdc_pix_fmt to_ltdc_pixelformat(u32 drm_fmt)
return pf;
}
-static inline u32 to_drm_pixelformat(enum ltdc_pix_fmt pf)
+static inline u32 ltdc_set_flexible_pixel_format(struct drm_plane *plane, enum ltdc_pix_fmt pix_fmt)
{
- switch (pf) {
- case PF_ARGB8888:
- return DRM_FORMAT_ARGB8888;
- case PF_RGBA8888:
- return DRM_FORMAT_RGBA8888;
- case PF_RGB888:
- return DRM_FORMAT_RGB888;
- case PF_RGB565:
- return DRM_FORMAT_RGB565;
+ struct ltdc_device *ldev = plane_to_ltdc(plane);
+ u32 lofs = plane->index * LAY_OFS, ret = PF_FLEXIBLE;
+ int psize, alen, apos, rlen, rpos, glen, gpos, blen, bpos;
+
+ switch (pix_fmt) {
+ case PF_BGR888:
+ psize = 3;
+ alen = 0; apos = 0; rlen = 8; rpos = 0;
+ glen = 8; gpos = 8; blen = 8; bpos = 16;
+ break;
case PF_ARGB1555:
- return DRM_FORMAT_ARGB1555;
+ psize = 2;
+ alen = 1; apos = 15; rlen = 5; rpos = 10;
+ glen = 5; gpos = 5; blen = 5; bpos = 0;
+ break;
case PF_ARGB4444:
- return DRM_FORMAT_ARGB4444;
+ psize = 2;
+ alen = 4; apos = 12; rlen = 4; rpos = 8;
+ glen = 4; gpos = 4; blen = 4; bpos = 0;
+ break;
case PF_L8:
- return DRM_FORMAT_C8;
- case PF_AL44: /* No DRM support */
- case PF_AL88: /* No DRM support */
- case PF_NONE:
+ psize = 1;
+ alen = 0; apos = 0; rlen = 8; rpos = 0;
+ glen = 8; gpos = 0; blen = 8; bpos = 0;
+ break;
+ case PF_AL44:
+ psize = 1;
+ alen = 4; apos = 4; rlen = 4; rpos = 0;
+ glen = 4; gpos = 0; blen = 4; bpos = 0;
+ break;
+ case PF_AL88:
+ psize = 2;
+ alen = 8; apos = 8; rlen = 8; rpos = 0;
+ glen = 8; gpos = 0; blen = 8; bpos = 0;
+ break;
default:
- return 0;
+ ret = NB_PF; /* error case, trace msg is handled by the caller */
+ break;
}
+
+ if (ret == PF_FLEXIBLE) {
+ regmap_write(ldev->regmap, LTDC_L1FPF0R + lofs,
+ (rlen << 14) + (rpos << 9) + (alen << 5) + apos);
+
+ regmap_write(ldev->regmap, LTDC_L1FPF1R + lofs,
+ (psize << 18) + (blen << 14) + (bpos << 9) + (glen << 5) + gpos);
+ }
+
+ return ret;
}
-static inline u32 get_pixelformat_without_alpha(u32 drm)
+/*
+ * All non-alpha color formats derived from native alpha color formats are
+ * either characterized by a FourCC format code
+ */
+static inline u32 is_xrgb(u32 drm)
{
- switch (drm) {
- case DRM_FORMAT_ARGB4444:
- return DRM_FORMAT_XRGB4444;
- case DRM_FORMAT_RGBA4444:
- return DRM_FORMAT_RGBX4444;
- case DRM_FORMAT_ARGB1555:
- return DRM_FORMAT_XRGB1555;
- case DRM_FORMAT_RGBA5551:
- return DRM_FORMAT_RGBX5551;
- case DRM_FORMAT_ARGB8888:
- return DRM_FORMAT_XRGB8888;
- case DRM_FORMAT_RGBA8888:
- return DRM_FORMAT_RGBX8888;
+ return ((drm & 0xFF) == 'X' || ((drm >> 8) & 0xFF) == 'X');
+}
+
+static inline void ltdc_set_ycbcr_config(struct drm_plane *plane, u32 drm_pix_fmt)
+{
+ struct ltdc_device *ldev = plane_to_ltdc(plane);
+ struct drm_plane_state *state = plane->state;
+ u32 lofs = plane->index * LAY_OFS;
+ u32 val;
+
+ switch (drm_pix_fmt) {
+ case DRM_FORMAT_YUYV:
+ val = (YCM_I << 4) | LxPCR_YF | LxPCR_CBF;
+ break;
+ case DRM_FORMAT_YVYU:
+ val = (YCM_I << 4) | LxPCR_YF;
+ break;
+ case DRM_FORMAT_UYVY:
+ val = (YCM_I << 4) | LxPCR_CBF;
+ break;
+ case DRM_FORMAT_VYUY:
+ val = (YCM_I << 4);
+ break;
+ case DRM_FORMAT_NV12:
+ val = (YCM_SP << 4) | LxPCR_CBF;
+ break;
+ case DRM_FORMAT_NV21:
+ val = (YCM_SP << 4);
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ val = (YCM_FP << 4);
+ break;
default:
- return 0;
+ /* RGB or not a YCbCr supported format */
+ break;
+ }
+
+ /* Enable limited range */
+ if (state->color_range == DRM_COLOR_YCBCR_LIMITED_RANGE)
+ val |= LxPCR_YREN;
+
+ /* enable ycbcr conversion */
+ val |= LxPCR_YCEN;
+
+ regmap_write(ldev->regmap, LTDC_L1PCR + lofs, val);
+}
+
+static inline void ltdc_set_ycbcr_coeffs(struct drm_plane *plane)
+{
+ struct ltdc_device *ldev = plane_to_ltdc(plane);
+ struct drm_plane_state *state = plane->state;
+ enum drm_color_encoding enc = state->color_encoding;
+ enum drm_color_range ran = state->color_range;
+ u32 lofs = plane->index * LAY_OFS;
+
+ if (enc != DRM_COLOR_YCBCR_BT601 && enc != DRM_COLOR_YCBCR_BT709) {
+ DRM_ERROR("color encoding %d not supported, use bt601 by default\n", enc);
+ /* set by default color encoding to DRM_COLOR_YCBCR_BT601 */
+ enc = DRM_COLOR_YCBCR_BT601;
+ }
+
+ if (ran != DRM_COLOR_YCBCR_LIMITED_RANGE && ran != DRM_COLOR_YCBCR_FULL_RANGE) {
+ DRM_ERROR("color range %d not supported, use limited range by default\n", ran);
+ /* set by default color range to DRM_COLOR_YCBCR_LIMITED_RANGE */
+ ran = DRM_COLOR_YCBCR_LIMITED_RANGE;
}
+
+ DRM_DEBUG_DRIVER("Color encoding=%d, range=%d\n", enc, ran);
+ regmap_write(ldev->regmap, LTDC_L1CYR0R + lofs,
+ ltdc_ycbcr2rgb_coeffs[enc][ran][0]);
+ regmap_write(ldev->regmap, LTDC_L1CYR1R + lofs,
+ ltdc_ycbcr2rgb_coeffs[enc][ran][1]);
}
static irqreturn_t ltdc_irq_thread(int irq, void *arg)
@@ -390,9 +690,13 @@ static irqreturn_t ltdc_irq(int irq, void *arg)
struct drm_device *ddev = arg;
struct ltdc_device *ldev = ddev->dev_private;
- /* Read & Clear the interrupt status */
- ldev->irq_status = reg_read(ldev->regs, LTDC_ISR);
- reg_write(ldev->regs, LTDC_ICR, ldev->irq_status);
+ /*
+ * Read & Clear the interrupt status
+ * In order to write / read registers in this critical section
+ * very quickly, the regmap functions are not used.
+ */
+ ldev->irq_status = readl_relaxed(ldev->regs + LTDC_ISR);
+ writel_relaxed(ldev->irq_status, ldev->regs + LTDC_ICR);
return IRQ_WAKE_THREAD;
}
@@ -416,7 +720,7 @@ static void ltdc_crtc_update_clut(struct drm_crtc *crtc)
for (i = 0; i < CLUT_SIZE; i++, lut++) {
val = ((lut->red << 8) & 0xff0000) | (lut->green & 0xff00) |
(lut->blue >> 8) | (i << 24);
- reg_write(ldev->regs, LTDC_L1CLUTWR, val);
+ regmap_write(ldev->regmap, LTDC_L1CLUTWR, val);
}
}
@@ -431,13 +735,14 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
pm_runtime_get_sync(ddev->dev);
/* Sets the background color value */
- reg_write(ldev->regs, LTDC_BCCR, BCCR_BCBLACK);
+ regmap_write(ldev->regmap, LTDC_BCCR, BCCR_BCBLACK);
/* Enable IRQ */
- reg_set(ldev->regs, LTDC_IER, IER_RRIE | IER_FUIE | IER_TERRIE);
+ regmap_set_bits(ldev->regmap, LTDC_IER, IER_RRIE | IER_FUIE | IER_TERRIE);
/* Commit shadow registers = update planes at next vblank */
- reg_set(ldev->regs, LTDC_SRCR, SRCR_VBR);
+ if (!ldev->caps.plane_reg_shadow)
+ regmap_set_bits(ldev->regmap, LTDC_SRCR, SRCR_VBR);
drm_crtc_vblank_on(crtc);
}
@@ -453,10 +758,11 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
/* disable IRQ */
- reg_clear(ldev->regs, LTDC_IER, IER_RRIE | IER_FUIE | IER_TERRIE);
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_RRIE | IER_FUIE | IER_TERRIE);
/* immediately commit disable of layers before switching off LTDC */
- reg_set(ldev->regs, LTDC_SRCR, SRCR_IMR);
+ if (!ldev->caps.plane_reg_shadow)
+ regmap_set_bits(ldev->regmap, LTDC_SRCR, SRCR_IMR);
pm_runtime_put_sync(ddev->dev);
}
@@ -533,6 +839,7 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
+ u32 bus_formats = MEDIA_BUS_FMT_RGB888_1X24;
u32 bus_flags = 0;
u32 val;
int ret;
@@ -558,8 +865,11 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (bridge && bridge->timings)
bus_flags = bridge->timings->input_bus_flags;
- else if (connector)
+ else if (connector) {
bus_flags = connector->display_info.bus_flags;
+ if (connector->display_info.num_bus_formats)
+ bus_formats = connector->display_info.bus_formats[0];
+ }
if (!pm_runtime_active(ddev->dev)) {
ret = pm_runtime_get_sync(ddev->dev);
@@ -604,26 +914,56 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
val |= GCR_PCPOL;
- reg_update_bits(ldev->regs, LTDC_GCR,
- GCR_HSPOL | GCR_VSPOL | GCR_DEPOL | GCR_PCPOL, val);
+ regmap_update_bits(ldev->regmap, LTDC_GCR,
+ GCR_HSPOL | GCR_VSPOL | GCR_DEPOL | GCR_PCPOL, val);
/* Set Synchronization size */
val = (hsync << 16) | vsync;
- reg_update_bits(ldev->regs, LTDC_SSCR, SSCR_VSH | SSCR_HSW, val);
+ regmap_update_bits(ldev->regmap, LTDC_SSCR, SSCR_VSH | SSCR_HSW, val);
/* Set Accumulated Back porch */
val = (accum_hbp << 16) | accum_vbp;
- reg_update_bits(ldev->regs, LTDC_BPCR, BPCR_AVBP | BPCR_AHBP, val);
+ regmap_update_bits(ldev->regmap, LTDC_BPCR, BPCR_AVBP | BPCR_AHBP, val);
/* Set Accumulated Active Width */
val = (accum_act_w << 16) | accum_act_h;
- reg_update_bits(ldev->regs, LTDC_AWCR, AWCR_AAW | AWCR_AAH, val);
+ regmap_update_bits(ldev->regmap, LTDC_AWCR, AWCR_AAW | AWCR_AAH, val);
/* Set total width & height */
val = (total_width << 16) | total_height;
- reg_update_bits(ldev->regs, LTDC_TWCR, TWCR_TOTALH | TWCR_TOTALW, val);
+ regmap_update_bits(ldev->regmap, LTDC_TWCR, TWCR_TOTALH | TWCR_TOTALW, val);
- reg_write(ldev->regs, LTDC_LIPCR, (accum_act_h + 1));
+ regmap_write(ldev->regmap, LTDC_LIPCR, (accum_act_h + 1));
+
+ /* Configure the output format (hw version dependent) */
+ if (ldev->caps.ycbcr_output) {
+ /* Input video dynamic_range & colorimetry */
+ int vic = drm_match_cea_mode(mode);
+ u32 val;
+
+ if (vic == 6 || vic == 7 || vic == 21 || vic == 22 ||
+ vic == 2 || vic == 3 || vic == 17 || vic == 18)
+ /* ITU-R BT.601 */
+ val = 0;
+ else
+ /* ITU-R BT.709 */
+ val = EDCR_OCYSEL;
+
+ switch (bus_formats) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ /* enable ycbcr output converter */
+ regmap_write(ldev->regmap, LTDC_EDCR, EDCR_OCYEN | val);
+ break;
+ case MEDIA_BUS_FMT_YVYU8_1X16:
+ /* enable ycbcr output converter & invert chrominance order */
+ regmap_write(ldev->regmap, LTDC_EDCR, EDCR_OCYEN | EDCR_OCYCO | val);
+ break;
+ default:
+ /* disable ycbcr output converter */
+ regmap_write(ldev->regmap, LTDC_EDCR, 0);
+ break;
+ }
+ }
}
static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
@@ -638,7 +978,8 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
ltdc_crtc_update_clut(crtc);
/* Commit shadow registers = update planes at next vblank */
- reg_set(ldev->regs, LTDC_SRCR, SRCR_VBR);
+ if (!ldev->caps.plane_reg_shadow)
+ regmap_set_bits(ldev->regmap, LTDC_SRCR, SRCR_VBR);
if (event) {
crtc->state->event = NULL;
@@ -680,10 +1021,14 @@ static bool ltdc_crtc_get_scanout_position(struct drm_crtc *crtc,
* simplify the code and only test if line > vactive_end
*/
if (pm_runtime_active(ddev->dev)) {
- line = reg_read(ldev->regs, LTDC_CPSR) & CPSR_CYPOS;
- vactive_start = reg_read(ldev->regs, LTDC_BPCR) & BPCR_AVBP;
- vactive_end = reg_read(ldev->regs, LTDC_AWCR) & AWCR_AAH;
- vtotal = reg_read(ldev->regs, LTDC_TWCR) & TWCR_TOTALH;
+ regmap_read(ldev->regmap, LTDC_CPSR, &line);
+ line &= CPSR_CYPOS;
+ regmap_read(ldev->regmap, LTDC_BPCR, &vactive_start);
+ vactive_start &= BPCR_AVBP;
+ regmap_read(ldev->regmap, LTDC_AWCR, &vactive_end);
+ vactive_end &= AWCR_AAH;
+ regmap_read(ldev->regmap, LTDC_TWCR, &vtotal);
+ vtotal &= TWCR_TOTALH;
if (line > vactive_end)
*vpos = line - vtotal - vactive_start;
@@ -719,7 +1064,7 @@ static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
DRM_DEBUG_DRIVER("\n");
if (state->enable)
- reg_set(ldev->regs, LTDC_IER, IER_LIE);
+ regmap_set_bits(ldev->regmap, LTDC_IER, IER_LIE);
else
return -EPERM;
@@ -731,7 +1076,7 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
DRM_DEBUG_DRIVER("\n");
- reg_clear(ldev->regs, LTDC_IER, IER_LIE);
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE);
}
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
@@ -789,7 +1134,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
u32 y0 = newstate->crtc_y;
u32 y1 = newstate->crtc_y + newstate->crtc_h - 1;
u32 src_x, src_y, src_w, src_h;
- u32 val, pitch_in_bytes, line_length, paddr, ahbp, avbp, bpcr;
+ u32 val, pitch_in_bytes, line_length, line_number, paddr, ahbp, avbp, bpcr;
enum ltdc_pix_fmt pf;
if (!newstate->crtc || !fb) {
@@ -809,19 +1154,20 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
newstate->crtc_w, newstate->crtc_h,
newstate->crtc_x, newstate->crtc_y);
- bpcr = reg_read(ldev->regs, LTDC_BPCR);
+ regmap_read(ldev->regmap, LTDC_BPCR, &bpcr);
+
ahbp = (bpcr & BPCR_AHBP) >> 16;
avbp = bpcr & BPCR_AVBP;
/* Configures the horizontal start and stop position */
val = ((x1 + 1 + ahbp) << 16) + (x0 + 1 + ahbp);
- reg_update_bits(ldev->regs, LTDC_L1WHPCR + lofs,
- LXWHPCR_WHSTPOS | LXWHPCR_WHSPPOS, val);
+ regmap_write_bits(ldev->regmap, LTDC_L1WHPCR + lofs,
+ LXWHPCR_WHSTPOS | LXWHPCR_WHSPPOS, val);
/* Configures the vertical start and stop position */
val = ((y1 + 1 + avbp) << 16) + (y0 + 1 + avbp);
- reg_update_bits(ldev->regs, LTDC_L1WVPCR + lofs,
- LXWVPCR_WVSTPOS | LXWVPCR_WVSPPOS, val);
+ regmap_write_bits(ldev->regmap, LTDC_L1WVPCR + lofs,
+ LXWVPCR_WVSTPOS | LXWVPCR_WVSPPOS, val);
/* Specifies the pixel format */
pf = to_ltdc_pixelformat(fb->format->format);
@@ -829,24 +1175,27 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
if (ldev->caps.pix_fmt_hw[val] == pf)
break;
+ /* Use the flexible color format feature if necessary and available */
+ if (ldev->caps.pix_fmt_flex && val == NB_PF)
+ val = ltdc_set_flexible_pixel_format(plane, pf);
+
if (val == NB_PF) {
DRM_ERROR("Pixel format %.4s not supported\n",
(char *)&fb->format->format);
val = 0; /* set by default ARGB 32 bits */
}
- reg_update_bits(ldev->regs, LTDC_L1PFCR + lofs, LXPFCR_PF, val);
+ regmap_write_bits(ldev->regmap, LTDC_L1PFCR + lofs, LXPFCR_PF, val);
/* Configures the color frame buffer pitch in bytes & line length */
pitch_in_bytes = fb->pitches[0];
line_length = fb->format->cpp[0] *
(x1 - x0 + 1) + (ldev->caps.bus_width >> 3) - 1;
val = ((pitch_in_bytes << 16) | line_length);
- reg_update_bits(ldev->regs, LTDC_L1CFBLR + lofs,
- LXCFBLR_CFBLL | LXCFBLR_CFBP, val);
+ regmap_write_bits(ldev->regmap, LTDC_L1CFBLR + lofs, LXCFBLR_CFBLL | LXCFBLR_CFBP, val);
/* Specifies the constant alpha value */
val = newstate->alpha >> 8;
- reg_update_bits(ldev->regs, LTDC_L1CACR + lofs, LXCACR_CONSTA, val);
+ regmap_write_bits(ldev->regmap, LTDC_L1CACR + lofs, LXCACR_CONSTA, val);
/* Specifies the blending factors */
val = BF1_PAXCA | BF2_1PAXCA;
@@ -858,24 +1207,98 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
plane->type != DRM_PLANE_TYPE_PRIMARY)
val = BF1_PAXCA | BF2_1PAXCA;
- reg_update_bits(ldev->regs, LTDC_L1BFCR + lofs,
- LXBFCR_BF2 | LXBFCR_BF1, val);
+ regmap_write_bits(ldev->regmap, LTDC_L1BFCR + lofs, LXBFCR_BF2 | LXBFCR_BF1, val);
/* Configures the frame buffer line number */
- val = y1 - y0 + 1;
- reg_update_bits(ldev->regs, LTDC_L1CFBLNR + lofs, LXCFBLNR_CFBLN, val);
+ line_number = y1 - y0 + 1;
+ regmap_write_bits(ldev->regmap, LTDC_L1CFBLNR + lofs, LXCFBLNR_CFBLN, line_number);
/* Sets the FB address */
paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 0);
DRM_DEBUG_DRIVER("fb: phys 0x%08x", paddr);
- reg_write(ldev->regs, LTDC_L1CFBAR + lofs, paddr);
+ regmap_write(ldev->regmap, LTDC_L1CFBAR + lofs, paddr);
+
+ if (ldev->caps.ycbcr_input) {
+ if (fb->format->is_yuv) {
+ switch (fb->format->format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ /* Configure the auxiliary frame buffer address 0 & 1 */
+ paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
+ regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr);
+ regmap_write(ldev->regmap, LTDC_L1AFBA1R + lofs, paddr + 1);
+
+ /* Configure the buffer length */
+ val = ((pitch_in_bytes << 16) | line_length);
+ regmap_write(ldev->regmap, LTDC_L1AFBLR + lofs, val);
+
+ /* Configure the frame buffer line number */
+ val = (line_number >> 1);
+ regmap_write(ldev->regmap, LTDC_L1AFBLNR + lofs, val);
+ break;
+ case DRM_FORMAT_YUV420:
+ /* Configure the auxiliary frame buffer address 0 */
+ paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
+ regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr);
+
+ /* Configure the auxiliary frame buffer address 1 */
+ paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 2);
+ regmap_write(ldev->regmap, LTDC_L1AFBA1R + lofs, paddr);
+
+ line_length = ((fb->format->cpp[0] * (x1 - x0 + 1)) >> 1) +
+ (ldev->caps.bus_width >> 3) - 1;
+
+ /* Configure the buffer length */
+ val = (((pitch_in_bytes >> 1) << 16) | line_length);
+ regmap_write(ldev->regmap, LTDC_L1AFBLR + lofs, val);
+
+ /* Configure the frame buffer line number */
+ val = (line_number >> 1);
+ regmap_write(ldev->regmap, LTDC_L1AFBLNR + lofs, val);
+ break;
+ case DRM_FORMAT_YVU420:
+ /* Configure the auxiliary frame buffer address 0 */
+ paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 2);
+ regmap_write(ldev->regmap, LTDC_L1AFBA0R + lofs, paddr);
+
+ /* Configure the auxiliary frame buffer address 1 */
+ paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
+ regmap_write(ldev->regmap, LTDC_L1AFBA1R + lofs, paddr);
+
+ line_length = ((fb->format->cpp[0] * (x1 - x0 + 1)) >> 1) +
+ (ldev->caps.bus_width >> 3) - 1;
+
+ /* Configure the buffer length */
+ val = (((pitch_in_bytes >> 1) << 16) | line_length);
+ regmap_write(ldev->regmap, LTDC_L1AFBLR + lofs, val);
+
+ /* Configure the frame buffer line number */
+ val = (line_number >> 1);
+ regmap_write(ldev->regmap, LTDC_L1AFBLNR + lofs, val);
+ break;
+ }
+
+ /* Configure YCbC conversion coefficient */
+ ltdc_set_ycbcr_coeffs(plane);
+
+ /* Configure YCbCr format and enable/disable conversion */
+ ltdc_set_ycbcr_config(plane, fb->format->format);
+ } else {
+ /* disable ycbcr conversion */
+ regmap_write(ldev->regmap, LTDC_L1PCR + lofs, 0);
+ }
+ }
/* Enable layer and CLUT if needed */
val = fb->format->format == DRM_FORMAT_C8 ? LXCR_CLUTEN : 0;
val |= LXCR_LEN;
- reg_update_bits(ldev->regs, LTDC_L1CR + lofs,
- LXCR_LEN | LXCR_CLUTEN, val);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN, val);
+
+ /* Commit shadow registers = update plane at next vblank */
+ if (ldev->caps.plane_reg_shadow)
+ regmap_write_bits(ldev->regmap, LTDC_L1RCR + lofs,
+ LXRCR_IMR | LXRCR_VBR | LXRCR_GRMSK, LXRCR_VBR);
ldev->plane_fpsi[plane->index].counter++;
@@ -900,7 +1323,12 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
u32 lofs = plane->index * LAY_OFS;
/* disable layer */
- reg_clear(ldev->regs, LTDC_L1CR + lofs, LXCR_LEN);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN, 0);
+
+ /* Commit shadow registers = update plane at next vblank */
+ if (ldev->caps.plane_reg_shadow)
+ regmap_write_bits(ldev->regmap, LTDC_L1RCR + lofs,
+ LXRCR_IMR | LXRCR_VBR | LXRCR_GRMSK, LXRCR_VBR);
DRM_DEBUG_DRIVER("CRTC:%d plane:%d\n",
oldstate->crtc->base.id, plane->base.id);
@@ -925,16 +1353,6 @@ static void ltdc_plane_atomic_print_state(struct drm_printer *p,
fpsi->counter = 0;
}
-static bool ltdc_plane_format_mod_supported(struct drm_plane *plane,
- u32 format,
- u64 modifier)
-{
- if (modifier == DRM_FORMAT_MOD_LINEAR)
- return true;
-
- return false;
-}
-
static const struct drm_plane_funcs ltdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -943,7 +1361,6 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_print_state = ltdc_plane_atomic_print_state,
- .format_mod_supported = ltdc_plane_format_mod_supported,
};
static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
@@ -953,36 +1370,58 @@ static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
};
static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
- enum drm_plane_type type)
+ enum drm_plane_type type,
+ int index)
{
unsigned long possible_crtcs = CRTC_MASK;
struct ltdc_device *ldev = ddev->dev_private;
struct device *dev = ddev->dev;
struct drm_plane *plane;
unsigned int i, nb_fmt = 0;
- u32 formats[NB_PF * 2];
- u32 drm_fmt, drm_fmt_no_alpha;
+ u32 *formats;
+ u32 drm_fmt;
const u64 *modifiers = ltdc_format_modifiers;
+ u32 lofs = index * LAY_OFS;
+ u32 val;
int ret;
- /* Get supported pixel formats */
- for (i = 0; i < NB_PF; i++) {
- drm_fmt = to_drm_pixelformat(ldev->caps.pix_fmt_hw[i]);
- if (!drm_fmt)
- continue;
- formats[nb_fmt++] = drm_fmt;
+ /* Allocate the biggest size according to supported color formats */
+ formats = devm_kzalloc(dev, (ldev->caps.pix_fmt_nb +
+ ARRAY_SIZE(ltdc_drm_fmt_ycbcr_cp) +
+ ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) +
+ ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp)) *
+ sizeof(*formats), GFP_KERNEL);
- /* Add the no-alpha related format if any & supported */
- drm_fmt_no_alpha = get_pixelformat_without_alpha(drm_fmt);
- if (!drm_fmt_no_alpha)
- continue;
+ for (i = 0; i < ldev->caps.pix_fmt_nb; i++) {
+ drm_fmt = ldev->caps.pix_fmt_drm[i];
/* Manage hw-specific capabilities */
- if (ldev->caps.non_alpha_only_l1 &&
- type != DRM_PLANE_TYPE_PRIMARY)
- continue;
+ if (ldev->caps.non_alpha_only_l1)
+ /* XR24 & RX24 like formats supported only on primary layer */
+ if (type != DRM_PLANE_TYPE_PRIMARY && is_xrgb(drm_fmt))
+ continue;
- formats[nb_fmt++] = drm_fmt_no_alpha;
+ formats[nb_fmt++] = drm_fmt;
+ }
+
+ /* Add YCbCr supported pixel formats */
+ if (ldev->caps.ycbcr_input) {
+ regmap_read(ldev->regmap, LTDC_L1C1R + lofs, &val);
+ if (val & LXCR_C1R_YIA) {
+ memcpy(&formats[nb_fmt], ltdc_drm_fmt_ycbcr_cp,
+ ARRAY_SIZE(ltdc_drm_fmt_ycbcr_cp) * sizeof(*formats));
+ nb_fmt += ARRAY_SIZE(ltdc_drm_fmt_ycbcr_cp);
+ }
+ if (val & LXCR_C1R_YSPA) {
+ memcpy(&formats[nb_fmt], ltdc_drm_fmt_ycbcr_sp,
+ ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) * sizeof(*formats));
+ nb_fmt += ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp);
+ }
+ if (val & LXCR_C1R_YFPA) {
+ memcpy(&formats[nb_fmt], ltdc_drm_fmt_ycbcr_fp,
+ ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp) * sizeof(*formats));
+ nb_fmt += ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp);
+ }
}
plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
@@ -995,6 +1434,17 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
if (ret < 0)
return NULL;
+ if (ldev->caps.ycbcr_input) {
+ if (val & (LXCR_C1R_YIA | LXCR_C1R_YSPA | LXCR_C1R_YFPA))
+ drm_plane_create_color_properties(plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+ }
+
drm_plane_helper_add(plane, &ltdc_plane_helper_funcs);
drm_plane_create_alpha_property(plane);
@@ -1020,7 +1470,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
unsigned int i;
int ret;
- primary = ltdc_plane_create(ddev, DRM_PLANE_TYPE_PRIMARY);
+ primary = ltdc_plane_create(ddev, DRM_PLANE_TYPE_PRIMARY, 0);
if (!primary) {
DRM_ERROR("Can not create primary plane\n");
return -EINVAL;
@@ -1044,7 +1494,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
/* Add planes. Note : the first layer is used by primary plane */
for (i = 1; i < ldev->caps.nb_layers; i++) {
- overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY);
+ overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY, i);
if (!overlay) {
ret = -ENOMEM;
DRM_ERROR("Can not create overlay plane %d\n", i);
@@ -1068,7 +1518,7 @@ static void ltdc_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("\n");
/* Disable LTDC */
- reg_clear(ldev->regs, LTDC_GCR, GCR_LTDCEN);
+ regmap_clear_bits(ldev->regmap, LTDC_GCR, GCR_LTDCEN);
/* Set to sleep state the pinctrl whatever type of encoder */
pinctrl_pm_select_sleep_state(ddev->dev);
@@ -1082,7 +1532,7 @@ static void ltdc_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("\n");
/* Enable LTDC */
- reg_set(ldev->regs, LTDC_GCR, GCR_LTDCEN);
+ regmap_set_bits(ldev->regmap, LTDC_GCR, GCR_LTDCEN);
}
static void ltdc_encoder_mode_set(struct drm_encoder *encoder,
@@ -1145,21 +1595,25 @@ static int ltdc_get_caps(struct drm_device *ddev)
* at least 1 layer must be managed & the number of layers
* must not exceed LTDC_MAX_LAYER
*/
- lcr = reg_read(ldev->regs, LTDC_LCR);
+ regmap_read(ldev->regmap, LTDC_LCR, &lcr);
ldev->caps.nb_layers = clamp((int)lcr, 1, LTDC_MAX_LAYER);
/* set data bus width */
- gc2r = reg_read(ldev->regs, LTDC_GC2R);
+ regmap_read(ldev->regmap, LTDC_GC2R, &gc2r);
bus_width_log2 = (gc2r & GC2R_BW) >> 4;
ldev->caps.bus_width = 8 << bus_width_log2;
- ldev->caps.hw_version = reg_read(ldev->regs, LTDC_IDR);
+ regmap_read(ldev->regmap, LTDC_IDR, &ldev->caps.hw_version);
switch (ldev->caps.hw_version) {
case HWVER_10200:
case HWVER_10300:
- ldev->caps.reg_ofs = REG_OFS_NONE;
+ ldev->caps.layer_ofs = LAY_OFS_0;
+ ldev->caps.layer_regs = ltdc_layer_regs_a0;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a0;
+ ldev->caps.pix_fmt_drm = ltdc_drm_fmt_a0;
+ ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a0);
+ ldev->caps.pix_fmt_flex = false;
/*
* Hw older versions support non-alpha color formats derived
* from native alpha color formats only on the primary layer.
@@ -1172,13 +1626,37 @@ static int ltdc_get_caps(struct drm_device *ddev)
if (ldev->caps.hw_version == HWVER_10200)
ldev->caps.pad_max_freq_hz = 65000000;
ldev->caps.nb_irq = 2;
+ ldev->caps.ycbcr_input = false;
+ ldev->caps.ycbcr_output = false;
+ ldev->caps.plane_reg_shadow = false;
break;
case HWVER_20101:
- ldev->caps.reg_ofs = REG_OFS_4;
+ ldev->caps.layer_ofs = LAY_OFS_0;
+ ldev->caps.layer_regs = ltdc_layer_regs_a1;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
+ ldev->caps.pix_fmt_drm = ltdc_drm_fmt_a1;
+ ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a1);
+ ldev->caps.pix_fmt_flex = false;
ldev->caps.non_alpha_only_l1 = false;
ldev->caps.pad_max_freq_hz = 150000000;
ldev->caps.nb_irq = 4;
+ ldev->caps.ycbcr_input = false;
+ ldev->caps.ycbcr_output = false;
+ ldev->caps.plane_reg_shadow = false;
+ break;
+ case HWVER_40100:
+ ldev->caps.layer_ofs = LAY_OFS_1;
+ ldev->caps.layer_regs = ltdc_layer_regs_a2;
+ ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a2;
+ ldev->caps.pix_fmt_drm = ltdc_drm_fmt_a2;
+ ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a2);
+ ldev->caps.pix_fmt_flex = true;
+ ldev->caps.non_alpha_only_l1 = false;
+ ldev->caps.pad_max_freq_hz = 90000000;
+ ldev->caps.nb_irq = 2;
+ ldev->caps.ycbcr_input = true;
+ ldev->caps.ycbcr_output = true;
+ ldev->caps.plane_reg_shadow = true;
break;
default:
return -ENODEV;
@@ -1296,9 +1774,15 @@ int ltdc_load(struct drm_device *ddev)
goto err;
}
+ ldev->regmap = devm_regmap_init_mmio(&pdev->dev, ldev->regs, &stm32_ltdc_regmap_cfg);
+ if (IS_ERR(ldev->regmap)) {
+ DRM_ERROR("Unable to regmap ltdc registers\n");
+ ret = PTR_ERR(ldev->regmap);
+ goto err;
+ }
+
/* Disable interrupts */
- reg_clear(ldev->regs, LTDC_IER,
- IER_LIE | IER_RRIE | IER_FUIE | IER_TERRIE);
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUIE | IER_TERRIE);
ret = ltdc_get_caps(ddev);
if (ret) {
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index f153b908c70e..6968d1ca5149 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -14,12 +14,19 @@
struct ltdc_caps {
u32 hw_version; /* hardware version */
u32 nb_layers; /* number of supported layers */
- u32 reg_ofs; /* register offset for applicable regs */
+ u32 layer_ofs; /* layer offset for applicable regs */
+ const u32 *layer_regs; /* layer register offset */
u32 bus_width; /* bus width (32 or 64 bits) */
- const u32 *pix_fmt_hw; /* supported pixel formats */
+ const u32 *pix_fmt_hw; /* supported hw pixel formats */
+ const u32 *pix_fmt_drm; /* supported drm pixel formats */
+ int pix_fmt_nb; /* number of pixel format */
+ bool pix_fmt_flex; /* pixel format flexibility supported */
bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
int pad_max_freq_hz; /* max frequency supported by pad */
int nb_irq; /* number of hardware interrupts */
+ bool ycbcr_input; /* ycbcr input converter supported */
+ bool ycbcr_output; /* ycbcr output converter supported */
+ bool plane_reg_shadow; /* plane shadow registers ability */
};
#define LTDC_MAX_LAYER 4
@@ -31,6 +38,7 @@ struct fps_info {
struct ltdc_device {
void __iomem *regs;
+ struct regmap *regmap;
struct clk *pixel_clk; /* lcd pixel clock */
struct mutex err_lock; /* protecting error_status */
struct ltdc_caps caps;
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 8cf5aeb9db6c..18c319b804c0 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -5,6 +5,7 @@ config DRM_TEGRA
depends on COMMON_CLK
depends on DRM
depends on OF
+ select DRM_DP_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
index 70dfb7d1dec5..e4369e5b2943 100644
--- a/drivers/gpu/drm/tegra/dp.c
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -5,7 +5,7 @@
*/
#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_print.h>
#include "dp.h"
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 1f96e416fa08..8ca500977a46 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -18,7 +18,7 @@
#include <linux/reset.h>
#include <linux/workqueue.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_panel.h>
#include "dp.h"
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 0ea320c1092b..b125572feb84 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -18,7 +18,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_panel.h>
#include <drm/drm_scdc_helper.h>
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 3ddb7c710a3d..cc567c87057d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -60,8 +60,6 @@ void tilcdc_module_cleanup(struct tilcdc_module *mod)
list_del(&mod->list);
}
-static struct of_device_id tilcdc_of_match[];
-
static int tilcdc_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -587,7 +585,7 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tilcdc_of_match[] = {
+static const struct of_device_id tilcdc_of_match[] = {
{ .compatible = "ti,am33xx-tilcdc", },
{ .compatible = "ti,da850-tilcdc", },
{ },
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index fc26a1ce11ee..ed971c8bb446 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -10,6 +10,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -716,24 +717,7 @@ static struct pci_driver bochs_pci_driver = {
/* ---------------------------------------------------------------------- */
/* module init/exit */
-static int __init bochs_init(void)
-{
- if (drm_firmware_drivers_only() && bochs_modeset == -1)
- return -EINVAL;
-
- if (bochs_modeset == 0)
- return -EINVAL;
-
- return pci_register_driver(&bochs_pci_driver);
-}
-
-static void __exit bochs_exit(void)
-{
- pci_unregister_driver(&bochs_pci_driver);
-}
-
-module_init(bochs_init);
-module_exit(bochs_exit);
+drm_module_pci_driver_if_modeset(bochs_pci_driver, bochs_modeset);
MODULE_DEVICE_TABLE(pci, bochs_pci_tbl);
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 2dc5ffecf191..c8e791840862 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -39,6 +39,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -635,21 +636,7 @@ static struct pci_driver cirrus_pci_driver = {
.remove = cirrus_pci_remove,
};
-static int __init cirrus_init(void)
-{
- if (drm_firmware_drivers_only())
- return -EINVAL;
-
- return pci_register_driver(&cirrus_pci_driver);
-}
-
-static void __exit cirrus_exit(void)
-{
- pci_unregister_driver(&cirrus_pci_driver);
-}
-
-module_init(cirrus_init);
-module_exit(cirrus_exit);
+drm_module_pci_driver(cirrus_pci_driver)
MODULE_DEVICE_TABLE(pci, pciidlist);
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 04146da2d1d8..d191e87a37b5 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -526,21 +526,33 @@ static int simpledrm_device_init_mm(struct simpledrm_device *sdev)
{
struct drm_device *dev = &sdev->dev;
struct platform_device *pdev = sdev->pdev;
- struct resource *mem;
+ struct resource *res, *mem;
void __iomem *screen_base;
int ret;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
return -EINVAL;
- ret = devm_aperture_acquire_from_firmware(dev, mem->start, resource_size(mem));
+ ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
if (ret) {
drm_err(dev, "could not acquire memory range %pr: error %d\n",
- mem, ret);
+ res, ret);
return ret;
}
+ mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ sdev->dev.driver->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
screen_base = devm_ioremap_wc(&pdev->dev, mem->start,
resource_size(mem));
if (!screen_base)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 4bf72470abef..2b8caa1efaa3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -241,6 +241,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
+ if (fbo->base.resource) {
+ ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ bo->resource = NULL;
+ }
+
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
@@ -509,7 +514,6 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
- bo->resource = NULL;
dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
@@ -637,7 +641,6 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
bo->ttm = ttm;
- bo->resource = NULL;
ttm_bo_assign_mem(bo, sys_res);
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 072e0baf2ab4..8cd4f3fb9f79 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -89,6 +89,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
spin_unlock(&rman->lock);
if (unlikely(ret)) {
+ ttm_resource_fini(man, *res);
kfree(node);
return ret;
}
@@ -108,6 +109,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&rman->lock);
+ ttm_resource_fini(man, res);
kfree(node);
}
@@ -156,7 +158,7 @@ int ttm_range_man_init_nocheck(struct ttm_device *bdev,
man->func = &ttm_range_manager_func;
- ttm_resource_manager_init(man, p_size);
+ ttm_resource_manager_init(man, bdev, p_size);
drm_mm_init(&rman->mm, 0, p_size);
spin_lock_init(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index df9efafa0f2f..29ee2eda7b7d 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -29,6 +29,14 @@
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_bo_driver.h>
+/**
+ * ttm_resource_init - resource object constructure
+ * @bo: buffer object this resources is allocated for
+ * @place: placement of the resource
+ * @res: the resource object to inistilize
+ *
+ * Initialize a new resource object. Counterpart of &ttm_resource_fini.
+ */
void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res)
@@ -41,9 +49,25 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
res->bus.offset = 0;
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
+ res->bo = bo;
}
EXPORT_SYMBOL(ttm_resource_init);
+/**
+ * ttm_resource_fini - resource destructor
+ * @man: the resource manager this resource belongs to
+ * @res: the resource to clean up
+ *
+ * Should be used by resource manager backends to clean up the TTM resource
+ * objects before freeing the underlying structure. Counterpart of
+ * &ttm_resource_init
+ */
+void ttm_resource_fini(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+}
+EXPORT_SYMBOL(ttm_resource_fini);
+
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res_ptr)
@@ -116,20 +140,31 @@ bool ttm_resource_compat(struct ttm_resource *res,
}
EXPORT_SYMBOL(ttm_resource_compat);
+void ttm_resource_set_bo(struct ttm_resource *res,
+ struct ttm_buffer_object *bo)
+{
+ spin_lock(&bo->bdev->lru_lock);
+ res->bo = bo;
+ spin_unlock(&bo->bdev->lru_lock);
+}
+
/**
* ttm_resource_manager_init
*
* @man: memory manager object to init
+ * @bdev: ttm device this manager belongs to
* @p_size: size managed area in pages.
*
* Initialise core parts of a manager object.
*/
void ttm_resource_manager_init(struct ttm_resource_manager *man,
+ struct ttm_device *bdev,
unsigned long p_size)
{
unsigned i;
spin_lock_init(&man->move_lock);
+ man->bdev = bdev;
man->size = p_size;
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
index 63aca52f75e1..2ced169513cb 100644
--- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
@@ -23,6 +23,7 @@ static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
static void ttm_sys_man_free(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
+ ttm_resource_fini(man, res);
kfree(res);
}
@@ -42,7 +43,7 @@ void ttm_sys_man_init(struct ttm_device *bdev)
man->use_tt = true;
man->func = &ttm_sys_manager_func;
- ttm_resource_manager_init(man, 0);
+ ttm_resource_manager_init(man, bdev, 0);
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
ttm_resource_manager_set_used(man, true);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index bd46396a1ae0..1afcd54fbbd5 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -219,6 +219,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
int ret;
u32 mmu_debug;
u32 ident1;
+ u64 mask;
v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
if (IS_ERR(v3d))
@@ -237,8 +238,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
return ret;
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
- dma_set_mask_and_coherent(dev,
- DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)));
+ mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
+ ret = dma_set_mask_and_coherent(dev, mask);
+ if (ret)
+ return ret;
+
v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
ident1 = V3D_READ(V3D_HUB_IDENT1);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 6d1281a343e9..e451cc5bcfac 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -355,8 +355,6 @@ static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
uint32_t page_index = bo_page_index(size);
struct vc4_bo *bo = NULL;
- size = roundup(size, PAGE_SIZE);
-
mutex_lock(&vc4->bo_lock);
if (page_index >= vc4->bo_cache.size_list_size)
goto out;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 16abc3a3d601..a03053c8e22c 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -37,6 +37,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_vblank.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
@@ -215,6 +217,7 @@ static void vc4_match_add_drivers(struct device *dev,
static int vc4_drm_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct rpi_firmware *firmware = NULL;
struct drm_device *drm;
struct vc4_dev *vc4;
struct device_node *node;
@@ -251,15 +254,34 @@ static int vc4_drm_bind(struct device *dev)
if (ret)
return ret;
- ret = component_bind_all(dev, drm);
+ node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
+ if (node) {
+ firmware = rpi_firmware_get(node);
+ of_node_put(node);
+
+ if (!firmware)
+ return -EPROBE_DEFER;
+ }
+
+ ret = drm_aperture_remove_framebuffers(false, &vc4_drm_driver);
if (ret)
return ret;
- ret = vc4_plane_create_additional_planes(drm);
+ if (firmware) {
+ ret = rpi_firmware_property(firmware,
+ RPI_FIRMWARE_NOTIFY_DISPLAY_DONE,
+ NULL, 0);
+ if (ret)
+ drm_warn(drm, "Couldn't stop firmware display driver: %d\n", ret);
+
+ rpi_firmware_put(firmware);
+ }
+
+ ret = component_bind_all(dev, drm);
if (ret)
- goto unbind_all;
+ return ret;
- ret = drm_aperture_remove_framebuffers(false, &vc4_drm_driver);
+ ret = vc4_plane_create_additional_planes(drm);
if (ret)
goto unbind_all;
@@ -357,6 +379,9 @@ static int __init vc4_drm_register(void)
{
int ret;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
ret = platform_register_drivers(component_drivers,
ARRAY_SIZE(component_drivers));
if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 053fbaf765ca..e3121eb5f605 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -104,6 +104,15 @@ static bool vc4_hdmi_mode_needs_scrambling(const struct drm_display_mode *mode)
return (mode->clock * 1000) > HDMI_14_MAX_TMDS_CLK;
}
+static bool vc4_hdmi_is_full_range_rgb(struct vc4_hdmi *vc4_hdmi,
+ const struct drm_display_mode *mode)
+{
+ struct vc4_hdmi_encoder *vc4_encoder = &vc4_hdmi->encoder;
+
+ return !vc4_encoder->hdmi_monitor ||
+ drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_FULL;
+}
+
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
@@ -481,7 +490,6 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
@@ -499,10 +507,10 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
drm_hdmi_avi_infoframe_quant_range(&frame.avi,
connector, mode,
- vc4_encoder->limited_rgb_range ?
- HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL);
- drm_hdmi_avi_infoframe_colorspace(&frame.avi, cstate);
+ vc4_hdmi_is_full_range_rgb(vc4_hdmi, mode) ?
+ HDMI_QUANTIZATION_RANGE_FULL :
+ HDMI_QUANTIZATION_RANGE_LIMITED);
+ drm_hdmi_avi_infoframe_colorimetry(&frame.avi, cstate);
drm_hdmi_avi_infoframe_bars(&frame.avi, cstate);
vc4_hdmi_write_infoframe(encoder, &frame);
@@ -726,7 +734,9 @@ static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder)
mutex_unlock(&vc4_hdmi->mutex);
}
-static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
+static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
+ const struct drm_display_mode *mode)
{
unsigned long flags;
u32 csc_ctl;
@@ -736,7 +746,7 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
VC4_HD_CSC_CTL_ORDER);
- if (enable) {
+ if (!vc4_hdmi_is_full_range_rgb(vc4_hdmi, mode)) {
/* CEA VICs other than #1 requre limited range RGB
* output unless overridden by an AVI infoframe.
* Apply a colorspace conversion to squash 0-255 down
@@ -766,44 +776,67 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
-static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
+/*
+ * If we need to output Full Range RGB, then use the unity matrix
+ *
+ * [ 1 0 0 0]
+ * [ 0 1 0 0]
+ * [ 0 0 1 0]
+ *
+ * Matrix is signed 2p13 fixed point, with signed 9p6 offsets
+ */
+static const u16 vc5_hdmi_csc_full_rgb_unity[3][4] = {
+ { 0x2000, 0x0000, 0x0000, 0x0000 },
+ { 0x0000, 0x2000, 0x0000, 0x0000 },
+ { 0x0000, 0x0000, 0x2000, 0x0000 },
+};
+
+/*
+ * CEA VICs other than #1 require limited range RGB output unless
+ * overridden by an AVI infoframe. Apply a colorspace conversion to
+ * squash 0-255 down to 16-235. The matrix here is:
+ *
+ * [ 0.8594 0 0 16]
+ * [ 0 0.8594 0 16]
+ * [ 0 0 0.8594 16]
+ *
+ * Matrix is signed 2p13 fixed point, with signed 9p6 offsets
+ */
+static const u16 vc5_hdmi_csc_full_rgb_to_limited_rgb[3][4] = {
+ { 0x1b80, 0x0000, 0x0000, 0x0400 },
+ { 0x0000, 0x1b80, 0x0000, 0x0400 },
+ { 0x0000, 0x0000, 0x1b80, 0x0400 },
+};
+
+static void vc5_hdmi_set_csc_coeffs(struct vc4_hdmi *vc4_hdmi,
+ const u16 coeffs[3][4])
{
- unsigned long flags;
- u32 csc_ctl;
+ lockdep_assert_held(&vc4_hdmi->hw_lock);
+
+ HDMI_WRITE(HDMI_CSC_12_11, (coeffs[0][1] << 16) | coeffs[0][0]);
+ HDMI_WRITE(HDMI_CSC_14_13, (coeffs[0][3] << 16) | coeffs[0][2]);
+ HDMI_WRITE(HDMI_CSC_22_21, (coeffs[1][1] << 16) | coeffs[1][0]);
+ HDMI_WRITE(HDMI_CSC_24_23, (coeffs[1][3] << 16) | coeffs[1][2]);
+ HDMI_WRITE(HDMI_CSC_32_31, (coeffs[2][1] << 16) | coeffs[2][0]);
+ HDMI_WRITE(HDMI_CSC_34_33, (coeffs[2][3] << 16) | coeffs[2][2]);
+}
- csc_ctl = 0x07; /* RGB_CONVERT_MODE = custom matrix, || USE_RGB_TO_YCBCR */
+static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
+ const struct drm_display_mode *mode)
+{
+ unsigned long flags;
+ u32 csc_ctl = VC5_MT_CP_CSC_CTL_ENABLE | VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM,
+ VC5_MT_CP_CSC_CTL_MODE);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- if (enable) {
- /* CEA VICs other than #1 requre limited range RGB
- * output unless overridden by an AVI infoframe.
- * Apply a colorspace conversion to squash 0-255 down
- * to 16-235. The matrix here is:
- *
- * [ 0.8594 0 0 16]
- * [ 0 0.8594 0 16]
- * [ 0 0 0.8594 16]
- * [ 0 0 0 1]
- * Matrix is signed 2p13 fixed point, with signed 9p6 offsets
- */
- HDMI_WRITE(HDMI_CSC_12_11, (0x0000 << 16) | 0x1b80);
- HDMI_WRITE(HDMI_CSC_14_13, (0x0400 << 16) | 0x0000);
- HDMI_WRITE(HDMI_CSC_22_21, (0x1b80 << 16) | 0x0000);
- HDMI_WRITE(HDMI_CSC_24_23, (0x0400 << 16) | 0x0000);
- HDMI_WRITE(HDMI_CSC_32_31, (0x0000 << 16) | 0x0000);
- HDMI_WRITE(HDMI_CSC_34_33, (0x0400 << 16) | 0x1b80);
- } else {
- /* Still use the matrix for full range, but make it unity.
- * Matrix is signed 2p13 fixed point, with signed 9p6 offsets
- */
- HDMI_WRITE(HDMI_CSC_12_11, (0x0000 << 16) | 0x2000);
- HDMI_WRITE(HDMI_CSC_14_13, (0x0000 << 16) | 0x0000);
- HDMI_WRITE(HDMI_CSC_22_21, (0x2000 << 16) | 0x0000);
- HDMI_WRITE(HDMI_CSC_24_23, (0x0000 << 16) | 0x0000);
- HDMI_WRITE(HDMI_CSC_32_31, (0x0000 << 16) | 0x0000);
- HDMI_WRITE(HDMI_CSC_34_33, (0x0000 << 16) | 0x2000);
- }
+ HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021);
+
+ if (!vc4_hdmi_is_full_range_rgb(vc4_hdmi, mode))
+ vc5_hdmi_set_csc_coeffs(vc4_hdmi, vc5_hdmi_csc_full_rgb_to_limited_rgb);
+ else
+ vc5_hdmi_set_csc_coeffs(vc4_hdmi, vc5_hdmi_csc_full_rgb_unity);
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
@@ -889,7 +922,6 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021);
HDMI_WRITE(HDMI_HORZA,
(vsync_pos ? VC5_HDMI_HORZA_VPOS : 0) |
(hsync_pos ? VC5_HDMI_HORZA_HPOS : 0) |
@@ -1113,24 +1145,16 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
- struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
unsigned long flags;
mutex_lock(&vc4_hdmi->mutex);
- if (vc4_encoder->hdmi_monitor &&
- drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED) {
- if (vc4_hdmi->variant->csc_setup)
- vc4_hdmi->variant->csc_setup(vc4_hdmi, true);
-
- vc4_encoder->limited_rgb_range = true;
- } else {
- if (vc4_hdmi->variant->csc_setup)
- vc4_hdmi->variant->csc_setup(vc4_hdmi, false);
-
- vc4_encoder->limited_rgb_range = false;
- }
+ if (vc4_hdmi->variant->csc_setup)
+ vc4_hdmi->variant->csc_setup(vc4_hdmi, conn_state, mode);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 36c0b082a43b..2b6aaafc020a 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -12,7 +12,6 @@
struct vc4_hdmi_encoder {
struct vc4_encoder base;
bool hdmi_monitor;
- bool limited_rgb_range;
};
static inline struct vc4_hdmi_encoder *
@@ -77,7 +76,9 @@ struct vc4_hdmi_variant {
void (*reset)(struct vc4_hdmi *vc4_hdmi);
/* Callback to enable / disable the CSC */
- void (*csc_setup)(struct vc4_hdmi *vc4_hdmi, bool enable);
+ void (*csc_setup)(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
+ const struct drm_display_mode *mode);
/* Callback to configure the video timings in the HDMI block */
void (*set_timings)(struct vc4_hdmi *vc4_hdmi,
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 7538b84a6dca..33410718089e 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -774,6 +774,9 @@ enum {
# define VC4_HD_CSC_CTL_RGB2YCC BIT(1)
# define VC4_HD_CSC_CTL_ENABLE BIT(0)
+# define VC5_MT_CP_CSC_CTL_ENABLE BIT(2)
+# define VC5_MT_CP_CSC_CTL_MODE_MASK VC4_MASK(1, 0)
+
# define VC4_DVP_HT_CLOCK_STOP_PIXEL BIT(1)
/* HVS display list information. */
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 2de61b63ef91..48d3c9955f0d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -248,6 +248,9 @@ void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
{
u32 i;
+ if (!objs)
+ return;
+
for (i = 0; i < objs->nents; i++)
drm_gem_object_put(objs->objs[i]);
virtio_gpu_array_free(objs);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 18d944c57883..91e63b12f60f 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -20,6 +20,8 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
+#define NUM_OVERLAY_PLANES 8
+
struct vkms_writeback_job {
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
struct iosys_map data[DRM_FORMAT_MAX_PLANES];
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 04406bd3ff02..ba0e82ae549a 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -32,6 +32,21 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
.get_modes = vkms_conn_get_modes,
};
+static int vkms_add_overlay_plane(struct vkms_device *vkmsdev, int index,
+ struct drm_crtc *crtc)
+{
+ struct vkms_plane *overlay;
+
+ overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY, index);
+ if (IS_ERR(overlay))
+ return PTR_ERR(overlay);
+
+ if (!overlay->base.possible_crtcs)
+ overlay->base.possible_crtcs = drm_crtc_mask(crtc);
+
+ return 0;
+}
+
int vkms_output_init(struct vkms_device *vkmsdev, int index)
{
struct vkms_output *output = &vkmsdev->output;
@@ -39,21 +54,21 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
struct drm_connector *connector = &output->connector;
struct drm_encoder *encoder = &output->encoder;
struct drm_crtc *crtc = &output->crtc;
- struct vkms_plane *primary, *cursor = NULL, *overlay = NULL;
+ struct vkms_plane *primary, *cursor = NULL;
int ret;
int writeback;
+ unsigned int n;
primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
if (IS_ERR(primary))
return PTR_ERR(primary);
if (vkmsdev->config->overlay) {
- overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY, index);
- if (IS_ERR(overlay))
- return PTR_ERR(overlay);
-
- if (!overlay->base.possible_crtcs)
- overlay->base.possible_crtcs = drm_crtc_mask(crtc);
+ for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
+ ret = vkms_add_overlay_plane(vkmsdev, index, crtc);
+ if (ret)
+ return ret;
+ }
}
if (vkmsdev->config->cursor) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index ebb4505a31a3..60e3cc537f36 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -117,6 +117,7 @@ nospace:
gman->used_gmr_pages -= (*res)->num_pages;
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
+ ttm_resource_fini(man, *res);
kfree(*res);
return -ENOSPC;
}
@@ -130,6 +131,7 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock);
gman->used_gmr_pages -= res->num_pages;
spin_unlock(&gman->lock);
+ ttm_resource_fini(man, res);
kfree(res);
}
@@ -160,7 +162,7 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
man->func = &vmw_gmrid_manager_func;
man->use_tt = true;
- ttm_resource_manager_init(man, 0);
+ ttm_resource_manager_init(man, &dev_priv->bdev, 0);
spin_lock_init(&gman->lock);
gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
index b0005b03a617..d3007bf1b8f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
@@ -49,6 +49,7 @@ static int vmw_sys_man_alloc(struct ttm_resource_manager *man,
static void vmw_sys_man_free(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
+ ttm_resource_fini(man, res);
kfree(res);
}
@@ -69,7 +70,7 @@ int vmw_sys_man_init(struct vmw_private *dev_priv)
man->use_tt = true;
man->func = &vmw_sys_manager_func;
- ttm_resource_manager_init(man, 0);
+ ttm_resource_manager_init(man, bdev, 0);
ttm_set_driver_manager(bdev, VMW_PL_SYSTEM, man);
ttm_resource_manager_set_used(man, true);
return 0;
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index d8d38d86d5c6..06cf477dbcdd 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -6,6 +6,7 @@ config DRM_ZYNQMP_DPSUB
depends on PHY_XILINX_ZYNQMP
depends on XILINX_ZYNQMP_DPDMA
select DMA_ENGINE
+ select DRM_DP_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select GENERIC_PHY
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 6f588dc09ba6..b1bbbb1d0a54 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -13,7 +13,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_managed.h>
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index ccc23d8686e8..75e93efd669f 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -243,6 +243,17 @@ config CROS_USBPD_NOTIFY
To compile this driver as a module, choose M here: the
module will be called cros_usbpd_notify.
+config CHROMEOS_PRIVACY_SCREEN
+ tristate "ChromeOS Privacy Screen support"
+ depends on ACPI
+ depends on DRM
+ select DRM_PRIVACY_SCREEN
+ help
+ This driver provides the support needed for the in-built electronic
+ privacy screen that is present on some ChromeOS devices. When enabled,
+ this should probably always be built into the kernel to avoid or
+ minimize drm probe deferral.
+
source "drivers/platform/chrome/wilco_ec/Kconfig"
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index f901d2e43166..5d4be9735d9d 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -4,6 +4,7 @@
CFLAGS_cros_ec_trace.o:= -I$(src)
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
+obj-$(CONFIG_CHROMEOS_PRIVACY_SCREEN) += chromeos_privacy_screen.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
obj-$(CONFIG_CHROMEOS_TBMC) += chromeos_tbmc.o
obj-$(CONFIG_CROS_EC) += cros_ec.o
diff --git a/drivers/platform/chrome/chromeos_privacy_screen.c b/drivers/platform/chrome/chromeos_privacy_screen.c
new file mode 100644
index 000000000000..77e9f5ee8e33
--- /dev/null
+++ b/drivers/platform/chrome/chromeos_privacy_screen.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * ChromeOS Privacy Screen support
+ *
+ * Copyright (C) 2022 Google LLC
+ *
+ * This is the Chromeos privacy screen provider, present on certain chromebooks,
+ * represented by a GOOG0010 device in the ACPI. This ACPI device, if present,
+ * will cause the i915 drm driver to probe defer until this driver registers
+ * the privacy-screen.
+ */
+
+#include <linux/acpi.h>
+#include <drm/drm_privacy_screen_driver.h>
+
+/*
+ * The DSM (Device Specific Method) constants below are the agreed API with
+ * the firmware team, on how to control privacy screen using ACPI methods.
+ */
+#define PRIV_SCRN_DSM_REVID 1 /* DSM version */
+#define PRIV_SCRN_DSM_FN_GET_STATUS 1 /* Get privacy screen status */
+#define PRIV_SCRN_DSM_FN_ENABLE 2 /* Enable privacy screen */
+#define PRIV_SCRN_DSM_FN_DISABLE 3 /* Disable privacy screen */
+
+static const guid_t chromeos_privacy_screen_dsm_guid =
+ GUID_INIT(0xc7033113, 0x8720, 0x4ceb,
+ 0x90, 0x90, 0x9d, 0x52, 0xb3, 0xe5, 0x2d, 0x73);
+
+static void
+chromeos_privacy_screen_get_hw_state(struct drm_privacy_screen
+ *drm_privacy_screen)
+{
+ union acpi_object *obj;
+ acpi_handle handle;
+ struct device *privacy_screen =
+ drm_privacy_screen_get_drvdata(drm_privacy_screen);
+
+ handle = acpi_device_handle(to_acpi_device(privacy_screen));
+ obj = acpi_evaluate_dsm(handle, &chromeos_privacy_screen_dsm_guid,
+ PRIV_SCRN_DSM_REVID,
+ PRIV_SCRN_DSM_FN_GET_STATUS, NULL);
+ if (!obj) {
+ dev_err(privacy_screen,
+ "_DSM failed to get privacy-screen state\n");
+ return;
+ }
+
+ if (obj->type != ACPI_TYPE_INTEGER)
+ dev_err(privacy_screen,
+ "Bad _DSM to get privacy-screen state\n");
+ else if (obj->integer.value == 1)
+ drm_privacy_screen->hw_state = drm_privacy_screen->sw_state =
+ PRIVACY_SCREEN_ENABLED;
+ else
+ drm_privacy_screen->hw_state = drm_privacy_screen->sw_state =
+ PRIVACY_SCREEN_DISABLED;
+
+ ACPI_FREE(obj);
+}
+
+static int
+chromeos_privacy_screen_set_sw_state(struct drm_privacy_screen
+ *drm_privacy_screen,
+ enum drm_privacy_screen_status state)
+{
+ union acpi_object *obj = NULL;
+ acpi_handle handle;
+ struct device *privacy_screen =
+ drm_privacy_screen_get_drvdata(drm_privacy_screen);
+
+ handle = acpi_device_handle(to_acpi_device(privacy_screen));
+
+ if (state == PRIVACY_SCREEN_DISABLED) {
+ obj = acpi_evaluate_dsm(handle,
+ &chromeos_privacy_screen_dsm_guid,
+ PRIV_SCRN_DSM_REVID,
+ PRIV_SCRN_DSM_FN_DISABLE, NULL);
+ } else if (state == PRIVACY_SCREEN_ENABLED) {
+ obj = acpi_evaluate_dsm(handle,
+ &chromeos_privacy_screen_dsm_guid,
+ PRIV_SCRN_DSM_REVID,
+ PRIV_SCRN_DSM_FN_ENABLE, NULL);
+ } else {
+ dev_err(privacy_screen,
+ "Bad attempt to set privacy-screen status to %u\n",
+ state);
+ return -EINVAL;
+ }
+
+ if (!obj) {
+ dev_err(privacy_screen,
+ "_DSM failed to set privacy-screen state\n");
+ return -EIO;
+ }
+
+ drm_privacy_screen->hw_state = drm_privacy_screen->sw_state = state;
+ ACPI_FREE(obj);
+ return 0;
+}
+
+static const struct drm_privacy_screen_ops chromeos_privacy_screen_ops = {
+ .get_hw_state = chromeos_privacy_screen_get_hw_state,
+ .set_sw_state = chromeos_privacy_screen_set_sw_state,
+};
+
+static int chromeos_privacy_screen_add(struct acpi_device *adev)
+{
+ struct drm_privacy_screen *drm_privacy_screen =
+ drm_privacy_screen_register(&adev->dev,
+ &chromeos_privacy_screen_ops,
+ &adev->dev);
+
+ if (IS_ERR(drm_privacy_screen)) {
+ dev_err(&adev->dev, "Error registering privacy-screen\n");
+ return PTR_ERR(drm_privacy_screen);
+ }
+
+ adev->driver_data = drm_privacy_screen;
+ dev_info(&adev->dev, "registered privacy-screen '%s'\n",
+ dev_name(&drm_privacy_screen->dev));
+
+ return 0;
+}
+
+static int chromeos_privacy_screen_remove(struct acpi_device *adev)
+{
+ struct drm_privacy_screen *drm_privacy_screen = acpi_driver_data(adev);
+
+ drm_privacy_screen_unregister(drm_privacy_screen);
+ return 0;
+}
+
+static const struct acpi_device_id chromeos_privacy_screen_device_ids[] = {
+ {"GOOG0010", 0}, /* Google's electronic privacy screen for eDP-1 */
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, chromeos_privacy_screen_device_ids);
+
+static struct acpi_driver chromeos_privacy_screen_driver = {
+ .name = "chromeos_privacy_screen_driver",
+ .class = "ChromeOS",
+ .ids = chromeos_privacy_screen_device_ids,
+ .ops = {
+ .add = chromeos_privacy_screen_add,
+ .remove = chromeos_privacy_screen_remove,
+ },
+};
+
+module_acpi_driver(chromeos_privacy_screen_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChromeOS ACPI Privacy Screen driver");
+MODULE_AUTHOR("Rajat Jain <rajatja@google.com>");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 098180fb1cfc..99fbe90c926e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -9869,7 +9869,7 @@ static int tpacpi_lcdshadow_init(struct ibm_init_struct *iibm)
return 0;
lcdshadow_dev = drm_privacy_screen_register(&tpacpi_pdev->dev,
- &lcdshadow_ops);
+ &lcdshadow_ops, NULL);
if (IS_ERR(lcdshadow_dev))
return PTR_ERR(lcdshadow_dev);
diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
index 84c56f525889..f8ef62542f7f 100644
--- a/drivers/video/fbdev/asiliantfb.c
+++ b/drivers/video/fbdev/asiliantfb.c
@@ -110,7 +110,7 @@ static const struct fb_ops asiliantfb_ops = {
static void asiliant_calc_dclk2(u32 *ppixclock, u8 *dclk2_m, u8 *dclk2_n, u8 *dclk2_div)
{
unsigned pixclock = *ppixclock;
- unsigned Ftarget = 1000000 * (1000000 / pixclock);
+ unsigned Ftarget;
unsigned n;
unsigned best_error = 0xffffffff;
unsigned best_m = 0xffffffff,
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 0fa7ede94fa6..b585339509b0 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -25,6 +25,7 @@
#include <linux/init.h>
#include <linux/linux_logo.h>
#include <linux/proc_fs.h>
+#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/kmod.h>
@@ -1557,18 +1558,36 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
/* check all firmware fbs and kick off if the base addr overlaps */
for_each_registered_fb(i) {
struct apertures_struct *gen_aper;
+ struct device *device;
if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
continue;
gen_aper = registered_fb[i]->apertures;
+ device = registered_fb[i]->device;
if (fb_do_apertures_overlap(gen_aper, a) ||
(primary && gen_aper && gen_aper->count &&
gen_aper->ranges[0].base == VGA_FB_PHYS)) {
printk(KERN_INFO "fb%d: switching to %s from %s\n",
i, name, registered_fb[i]->fix.id);
- do_unregister_framebuffer(registered_fb[i]);
+
+ /*
+ * If we kick-out a firmware driver, we also want to remove
+ * the underlying platform device, such as simple-framebuffer,
+ * VESA, EFI, etc. A native driver will then be able to
+ * allocate the memory range.
+ *
+ * If it's not a platform device, at least print a warning. A
+ * fix would add code to remove the device from the system.
+ */
+ if (dev_is_platform(device)) {
+ registered_fb[i]->forced_out = true;
+ platform_device_unregister(to_platform_device(device));
+ } else {
+ pr_warn("fb%d: cannot remove device\n", i);
+ do_unregister_framebuffer(registered_fb[i]);
+ }
}
}
}
@@ -1898,9 +1917,13 @@ EXPORT_SYMBOL(register_framebuffer);
void
unregister_framebuffer(struct fb_info *fb_info)
{
- mutex_lock(&registration_lock);
+ bool forced_out = fb_info->forced_out;
+
+ if (!forced_out)
+ mutex_lock(&registration_lock);
do_unregister_framebuffer(fb_info);
- mutex_unlock(&registration_lock);
+ if (!forced_out)
+ mutex_unlock(&registration_lock);
}
EXPORT_SYMBOL(unregister_framebuffer);
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index 3b134e1bbc38..68408c499627 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -489,7 +489,7 @@ static int s3c_fb_set_par(struct fb_info *info)
struct s3c_fb_win *win = info->par;
struct s3c_fb *sfb = win->parent;
void __iomem *regs = sfb->regs;
- void __iomem *buf = regs;
+ void __iomem *buf;
int win_no = win->index;
u32 alpha = 0;
u32 data;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 57541887188b..94fc9c6d0411 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -66,16 +66,36 @@ static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
return 0;
}
-struct simplefb_par;
+struct simplefb_par {
+ u32 palette[PSEUDO_PALETTE_SIZE];
+ struct resource *mem;
+#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
+ bool clks_enabled;
+ unsigned int clk_count;
+ struct clk **clks;
+#endif
+#if defined CONFIG_OF && defined CONFIG_REGULATOR
+ bool regulators_enabled;
+ u32 regulator_count;
+ struct regulator **regulators;
+#endif
+};
+
static void simplefb_clocks_destroy(struct simplefb_par *par);
static void simplefb_regulators_destroy(struct simplefb_par *par);
static void simplefb_destroy(struct fb_info *info)
{
+ struct simplefb_par *par = info->par;
+ struct resource *mem = par->mem;
+
simplefb_regulators_destroy(info->par);
simplefb_clocks_destroy(info->par);
if (info->screen_base)
iounmap(info->screen_base);
+
+ if (mem)
+ release_mem_region(mem->start, resource_size(mem));
}
static const struct fb_ops simplefb_ops = {
@@ -169,20 +189,6 @@ static int simplefb_parse_pd(struct platform_device *pdev,
return 0;
}
-struct simplefb_par {
- u32 palette[PSEUDO_PALETTE_SIZE];
-#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
- bool clks_enabled;
- unsigned int clk_count;
- struct clk **clks;
-#endif
-#if defined CONFIG_OF && defined CONFIG_REGULATOR
- bool regulators_enabled;
- u32 regulator_count;
- struct regulator **regulators;
-#endif
-};
-
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
/*
* Clock handling code.
@@ -405,7 +411,7 @@ static int simplefb_probe(struct platform_device *pdev)
struct simplefb_params params;
struct fb_info *info;
struct simplefb_par *par;
- struct resource *mem;
+ struct resource *res, *mem;
/*
* Generic drivers must not be registered if a framebuffer exists.
@@ -430,15 +436,28 @@ static int simplefb_probe(struct platform_device *pdev)
if (ret)
return ret;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
dev_err(&pdev->dev, "No memory resource\n");
return -EINVAL;
}
+ mem = request_mem_region(res->start, resource_size(res), "simplefb");
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ dev_warn(&pdev->dev, "simplefb: cannot reserve video memory at %pR\n", res);
+ mem = res;
+ }
+
info = framebuffer_alloc(sizeof(struct simplefb_par), &pdev->dev);
- if (!info)
- return -ENOMEM;
+ if (!info) {
+ ret = -ENOMEM;
+ goto error_release_mem_region;
+ }
platform_set_drvdata(pdev, info);
par = info->par;
@@ -495,6 +514,9 @@ static int simplefb_probe(struct platform_device *pdev)
info->var.xres, info->var.yres,
info->var.bits_per_pixel, info->fix.line_length);
+ if (mem != res)
+ par->mem = mem; /* release in clean-up handler */
+
ret = register_framebuffer(info);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
@@ -513,6 +535,9 @@ error_unmap:
iounmap(info->screen_base);
error_fb_release:
framebuffer_release(info);
+error_release_mem_region:
+ if (mem != res)
+ release_mem_region(mem->start, resource_size(mem));
return ret;
}
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 96e312a3eac7..d21f68f3ee44 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1351,7 +1351,12 @@ static int vga16fb_probe(struct platform_device *dev)
printk(KERN_INFO "vga16fb: mapped to 0x%p\n", info->screen_base);
par = info->par;
+#if defined(CONFIG_X86)
+ par->isVGA = screen_info.orig_video_isVGA == VIDEO_TYPE_VGAC;
+#else
+ /* non-x86 architectures treat orig_video_isVGA as a boolean flag */
par->isVGA = screen_info.orig_video_isVGA;
+#endif
par->palette_blanked = 0;
par->vesa_blanked = 0;
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index bda8aa7c2280..5286a53a1875 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -51,7 +51,9 @@ struct dw_mipi_dsi_plat_data {
unsigned int max_data_lanes;
enum drm_mode_status (*mode_valid)(void *priv_data,
- const struct drm_display_mode *mode);
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags,
+ u32 lanes, u32 format);
const struct dw_mipi_dsi_phy_ops *phy_ops;
const struct dw_mipi_dsi_host_ops *host_ops;
diff --git a/include/drm/drm_dp_aux_bus.h b/include/drm/dp/drm_dp_aux_bus.h
index 4f19b20b1dd6..4f19b20b1dd6 100644
--- a/include/drm/drm_dp_aux_bus.h
+++ b/include/drm/dp/drm_dp_aux_bus.h
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/dp/drm_dp_dual_mode_helper.h
index 7ee482265087..7ee482265087 100644
--- a/include/drm/drm_dp_dual_mode_helper.h
+++ b/include/drm/dp/drm_dp_dual_mode_helper.h
diff --git a/include/drm/drm_dp_helper.h b/include/drm/dp/drm_dp_helper.h
index c48bf958a967..69487bd8ed56 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/dp/drm_dp_helper.h
@@ -1041,11 +1041,8 @@ struct drm_panel;
#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
/* DPRX Event Status Indicator */
-#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */
-/* 0-5 sink count */
-# define DP_SINK_COUNT_CP_READY (1 << 6)
-
-#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */
+#define DP_SINK_COUNT_ESI 0x2002 /* same as 0x200 */
+#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* same as 0x201 */
#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */
# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0)
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/dp/drm_dp_mst_helper.h
index 78044ac5b59b..08276eb8c187 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/dp/drm_dp_mst_helper.h
@@ -23,7 +23,7 @@
#define _DRM_DP_MST_HELPER_H_
#include <linux/types.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_atomic.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
new file mode 100644
index 000000000000..f524db152413
--- /dev/null
+++ b/include/drm/drm_buddy.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __DRM_BUDDY_H__
+#define __DRM_BUDDY_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include <drm/drm_print.h>
+
+#define range_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ >= max__ || size__ > max__ - start__; \
+})
+
+struct drm_buddy_block {
+#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
+#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
+#define DRM_BUDDY_ALLOCATED (1 << 10)
+#define DRM_BUDDY_FREE (2 << 10)
+#define DRM_BUDDY_SPLIT (3 << 10)
+/* Free to be used, if needed in the future */
+#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
+#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
+ u64 header;
+
+ struct drm_buddy_block *left;
+ struct drm_buddy_block *right;
+ struct drm_buddy_block *parent;
+
+ void *private; /* owned by creator */
+
+ /*
+ * While the block is allocated by the user through drm_buddy_alloc*,
+ * the user has ownership of the link, for example to maintain within
+ * a list, if so desired. As soon as the block is freed with
+ * drm_buddy_free* ownership is given back to the mm.
+ */
+ struct list_head link;
+ struct list_head tmp_link;
+};
+
+/* Order-zero must be at least PAGE_SIZE */
+#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
+
+/*
+ * Binary Buddy System.
+ *
+ * Locking should be handled by the user, a simple mutex around
+ * drm_buddy_alloc* and drm_buddy_free* should suffice.
+ */
+struct drm_buddy {
+ /* Maintain a free list for each order. */
+ struct list_head *free_list;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
+ * address space. This gives us a simple way of finding a buddy block
+ * and performing the potentially recursive merge step when freeing a
+ * block. Nodes are either allocated or free, in which case they will
+ * also exist on the respective free list.
+ */
+ struct drm_buddy_block **roots;
+
+ /*
+ * Anything from here is public, and remains static for the lifetime of
+ * the mm. Everything above is considered do-not-touch.
+ */
+ unsigned int n_roots;
+ unsigned int max_order;
+
+ /* Must be at least PAGE_SIZE */
+ u64 chunk_size;
+ u64 size;
+ u64 avail;
+};
+
+static inline u64
+drm_buddy_block_offset(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_OFFSET;
+}
+
+static inline unsigned int
+drm_buddy_block_order(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_ORDER;
+}
+
+static inline unsigned int
+drm_buddy_block_state(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_STATE;
+}
+
+static inline bool
+drm_buddy_block_is_allocated(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_ALLOCATED;
+}
+
+static inline bool
+drm_buddy_block_is_free(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_FREE;
+}
+
+static inline bool
+drm_buddy_block_is_split(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_SPLIT;
+}
+
+static inline u64
+drm_buddy_block_size(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ return mm->chunk_size << drm_buddy_block_order(block);
+}
+
+int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size);
+
+void drm_buddy_fini(struct drm_buddy *mm);
+
+struct drm_buddy_block *
+drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order);
+
+int drm_buddy_alloc_range(struct drm_buddy *mm,
+ struct list_head *blocks,
+ u64 start, u64 size);
+
+void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
+
+void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects);
+
+void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
+void drm_buddy_block_print(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ struct drm_printer *p);
+
+#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index b501d0badaea..64cf5f88c05b 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -522,9 +522,9 @@ struct drm_display_info {
enum subpixel_order subpixel_order;
#define DRM_COLOR_FORMAT_RGB444 (1<<0)
-#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
-#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
-#define DRM_COLOR_FORMAT_YCRCB420 (1<<3)
+#define DRM_COLOR_FORMAT_YCBCR444 (1<<1)
+#define DRM_COLOR_FORMAT_YCBCR422 (1<<2)
+#define DRM_COLOR_FORMAT_YCBCR420 (1<<3)
/**
* @panel_orientation: Read only connector property for built-in panels,
@@ -592,10 +592,16 @@ struct drm_display_info {
bool rgb_quant_range_selectable;
/**
- * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
- * more stuff redundant with @bus_formats.
+ * @edid_hdmi_dc_rgb444_modes: Mask of supported hdmi deep color modes
+ * in RGB 4:4:4. Even more stuff redundant with @bus_formats.
*/
- u8 edid_hdmi_dc_modes;
+ u8 edid_hdmi_rgb444_dc_modes;
+
+ /**
+ * @edid_hdmi_dc_ycbcr444_modes: Mask of supported hdmi deep color
+ * modes in YCbCr 4:4:4. Even more stuff redundant with @bus_formats.
+ */
+ u8 edid_hdmi_ycbcr444_dc_modes;
/**
* @cea_rev: CEA revision of the HDMI sink.
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 4d01b4d89775..a70baea0636c 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -285,6 +285,10 @@ struct drm_crtc_state {
* Lookup table for converting pixel data after the color conversion
* matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not
* NULL) is an array of &struct drm_color_lut.
+ *
+ * Note that for mostly historical reasons stemming from Xorg heritage,
+ * this is also used to store the color map (also sometimes color lut,
+ * CLUT or color palette) for indexed formats like DRM_FORMAT_C8.
*/
struct drm_property_blob *gamma_lut;
@@ -1075,12 +1079,18 @@ struct drm_crtc {
/**
* @gamma_size: Size of legacy gamma ramp reported to userspace. Set up
* by calling drm_mode_crtc_set_gamma_size().
+ *
+ * Note that atomic drivers need to instead use
+ * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt().
*/
uint32_t gamma_size;
/**
* @gamma_store: Gamma ramp values used by the legacy SETGAMMA and
* GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size().
+ *
+ * Note that atomic drivers need to instead use
+ * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt().
*/
uint16_t *gamma_store;
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
index cf43561e60fa..ca022e960dcc 100644
--- a/include/drm/drm_dsc.h
+++ b/include/drm/drm_dsc.h
@@ -8,7 +8,7 @@
#ifndef DRM_DSC_H_
#define DRM_DSC_H_
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
/* VESA Display Stream Compression DSC 1.2 constants */
#define DSC_NUM_BUF_RANGES 15
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 18f6c700f6d0..144c495b99c4 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -401,8 +401,8 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode);
void
-drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
- const struct drm_connector_state *conn_state);
+drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
void
drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 05e194958265..6fe13cce2670 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -194,7 +194,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
#ifdef CONFIG_DEBUG_FS
void mipi_dbi_debugfs_init(struct drm_minor *minor);
#else
-#define mipi_dbi_debugfs_init NULL
+static inline void mipi_dbi_debugfs_init(struct drm_minor *minor) {}
#endif
#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index b84693fbd2b5..ec4f543c3d95 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -34,6 +34,7 @@ struct drm_modeset_lock;
* struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
* @ww_ctx: base acquire ctx
* @contended: used internally for -EDEADLK handling
+ * @stack_depot: used internally for contention debugging
* @locked: list of held locks
* @trylock_only: trylock mode used in atomic contexts/panic notifiers
* @interruptible: whether interruptible locking should be used.
diff --git a/include/drm/drm_module.h b/include/drm/drm_module.h
new file mode 100644
index 000000000000..4db1ae03d9a5
--- /dev/null
+++ b/include/drm/drm_module.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_MODULE_H
+#define DRM_MODULE_H
+
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_drv.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers registering DRM drivers during module
+ * initialization and shutdown. The provided helpers act like bus-specific
+ * module helpers, such as module_pci_driver(), but respect additional
+ * parameters that control DRM driver registration.
+ *
+ * Below is an example of initializing a DRM driver for a device on the
+ * PCI bus.
+ *
+ * .. code-block:: c
+ *
+ * struct pci_driver my_pci_drv = {
+ * };
+ *
+ * drm_module_pci_driver(my_pci_drv);
+ *
+ * The generated code will test if DRM drivers are enabled and register
+ * the PCI driver my_pci_drv. For more complex module initialization, you
+ * can still use module_init() and module_exit() in your driver.
+ */
+
+/*
+ * PCI drivers
+ */
+
+static inline int __init drm_pci_register_driver(struct pci_driver *pci_drv)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return pci_register_driver(pci_drv);
+}
+
+/**
+ * drm_module_pci_driver - Register a DRM driver for PCI-based devices
+ * @__pci_drv: the PCI driver structure
+ *
+ * Registers a DRM driver for devices on the PCI bus. The helper
+ * macro behaves like module_pci_driver() but tests the state of
+ * drm_firmware_drivers_only(). For more complex module initialization,
+ * use module_init() and module_exit() directly.
+ *
+ * Each module may only use this macro once. Calling it replaces
+ * module_init() and module_exit().
+ */
+#define drm_module_pci_driver(__pci_drv) \
+ module_driver(__pci_drv, drm_pci_register_driver, pci_unregister_driver)
+
+static inline int __init
+drm_pci_register_driver_if_modeset(struct pci_driver *pci_drv, int modeset)
+{
+ if (drm_firmware_drivers_only() && modeset == -1)
+ return -ENODEV;
+ if (modeset == 0)
+ return -ENODEV;
+
+ return pci_register_driver(pci_drv);
+}
+
+static inline void __exit
+drm_pci_unregister_driver_if_modeset(struct pci_driver *pci_drv, int modeset)
+{
+ pci_unregister_driver(pci_drv);
+}
+
+/**
+ * drm_module_pci_driver_if_modeset - Register a DRM driver for PCI-based devices
+ * @__pci_drv: the PCI driver structure
+ * @__modeset: an additional parameter that disables the driver
+ *
+ * This macro is deprecated and only provided for existing drivers. For
+ * new drivers, use drm_module_pci_driver().
+ *
+ * Registers a DRM driver for devices on the PCI bus. The helper macro
+ * behaves like drm_module_pci_driver() with an additional driver-specific
+ * flag. If __modeset is 0, the driver has been disabled, if __modeset is
+ * -1 the driver state depends on the global DRM state. For all other
+ * values, the PCI driver has been enabled. The default should be -1.
+ */
+#define drm_module_pci_driver_if_modeset(__pci_drv, __modeset) \
+ module_driver(__pci_drv, drm_pci_register_driver_if_modeset, \
+ drm_pci_unregister_driver_if_modeset, __modeset)
+
+/*
+ * Platform drivers
+ */
+
+static inline int __init
+drm_platform_driver_register(struct platform_driver *platform_drv)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return platform_driver_register(platform_drv);
+}
+
+/**
+ * drm_module_platform_driver - Register a DRM driver for platform devices
+ * @__platform_drv: the platform driver structure
+ *
+ * Registers a DRM driver for devices on the platform bus. The helper
+ * macro behaves like module_platform_driver() but tests the state of
+ * drm_firmware_drivers_only(). For more complex module initialization,
+ * use module_init() and module_exit() directly.
+ *
+ * Each module may only use this macro once. Calling it replaces
+ * module_init() and module_exit().
+ */
+#define drm_module_platform_driver(__platform_drv) \
+ module_driver(__platform_drv, drm_platform_driver_register, \
+ platform_driver_unregister)
+
+#endif
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 0c1102dc4d88..06759badf99f 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -516,7 +516,7 @@ struct drm_plane_funcs {
* This optional hook is used for the DRM to determine if the given
* format/modifier combination is valid for the plane. This allows the
* DRM to generate the correct format bitmask (which formats apply to
- * which modifier), and to valdiate modifiers at atomic_check time.
+ * which modifier), and to validate modifiers at atomic_check time.
*
* If not present, then any modifier in the plane's modifier
* list is allowed with any of the plane's formats.
diff --git a/include/drm/drm_privacy_screen_driver.h b/include/drm/drm_privacy_screen_driver.h
index 24591b607675..4ef246d5706f 100644
--- a/include/drm/drm_privacy_screen_driver.h
+++ b/include/drm/drm_privacy_screen_driver.h
@@ -73,10 +73,21 @@ struct drm_privacy_screen {
* for more info.
*/
enum drm_privacy_screen_status hw_state;
+ /**
+ * @drvdata: Private data owned by the privacy screen provider
+ */
+ void *drvdata;
};
+static inline
+void *drm_privacy_screen_get_drvdata(struct drm_privacy_screen *priv)
+{
+ return priv->drvdata;
+}
+
struct drm_privacy_screen *drm_privacy_screen_register(
- struct device *parent, const struct drm_privacy_screen_ops *ops);
+ struct device *parent, const struct drm_privacy_screen_ops *ops,
+ void *data);
void drm_privacy_screen_unregister(struct drm_privacy_screen *priv);
void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv);
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 6eae09e382d2..4fd727b52da1 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -105,11 +105,11 @@ struct ttm_resource_manager_func {
* @use_type: The memory type is enabled.
* @use_tt: If a TT object should be used for the backing store.
* @size: Size of the managed region.
+ * @bdev: ttm device this manager belongs to
* @func: structure pointer implementing the range manager. See above
* @move_lock: lock for move fence
- * static information. bdev::driver::io_mem_free is never used.
- * @lru: The lru list for this memory type.
* @move: The fence of the last pipelined move operation.
+ * @lru: The lru list for this memory type.
*
* This structure is used to identify and manage memory types for a device.
*/
@@ -119,20 +119,21 @@ struct ttm_resource_manager {
*/
bool use_type;
bool use_tt;
+ struct ttm_device *bdev;
uint64_t size;
const struct ttm_resource_manager_func *func;
spinlock_t move_lock;
/*
- * Protected by the global->lru_lock.
+ * Protected by @move_lock.
*/
-
- struct list_head lru[TTM_MAX_BO_PRIORITY];
+ struct dma_fence *move;
/*
- * Protected by @move_lock.
+ * Protected by the global->lru_lock.
*/
- struct dma_fence *move;
+
+ struct list_head lru[TTM_MAX_BO_PRIORITY];
};
/**
@@ -160,6 +161,7 @@ struct ttm_bus_placement {
* @mem_type: Resource type of the allocation.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
+ * @bo: weak reference to the BO, protected by ttm_device::lru_lock
*
* Structure indicating the placement and space resources used by a
* buffer object.
@@ -170,6 +172,7 @@ struct ttm_resource {
uint32_t mem_type;
uint32_t placement;
struct ttm_bus_placement bus;
+ struct ttm_buffer_object *bo;
};
/**
@@ -261,14 +264,20 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res);
+void ttm_resource_fini(struct ttm_resource_manager *man,
+ struct ttm_resource *res);
+
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
bool ttm_resource_compat(struct ttm_resource *res,
struct ttm_placement *placement);
+void ttm_resource_set_bo(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
+ struct ttm_device *bdev,
unsigned long p_size);
int ttm_resource_manager_evict_all(struct ttm_device *bdev,
diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h
new file mode 100644
index 000000000000..19fa0b5ae5ec
--- /dev/null
+++ b/include/linux/dma-buf-map.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pointer to dma-buf-mapped memory, plus helpers.
+ */
+
+#ifndef __DMA_BUF_MAP_H__
+#define __DMA_BUF_MAP_H__
+
+#include <linux/io.h>
+#include <linux/string.h>
+
+/**
+ * DOC: overview
+ *
+ * Calling dma-buf's vmap operation returns a pointer to the buffer's memory.
+ * Depending on the location of the buffer, users may have to access it with
+ * I/O operations or memory load/store operations. For example, copying to
+ * system memory could be done with memcpy(), copying to I/O memory would be
+ * done with memcpy_toio().
+ *
+ * .. code-block:: c
+ *
+ * void *vaddr = ...; // pointer to system memory
+ * memcpy(vaddr, src, len);
+ *
+ * void *vaddr_iomem = ...; // pointer to I/O memory
+ * memcpy_toio(vaddr, _iomem, src, len);
+ *
+ * When using dma-buf's vmap operation, the returned pointer is encoded as
+ * :c:type:`struct dma_buf_map <dma_buf_map>`.
+ * :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in
+ * system or I/O memory and a flag that signals the required method of
+ * accessing the buffer. Use the returned instance and the helper functions
+ * to access the buffer's memory in the correct way.
+ *
+ * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are
+ * actually independent from the dma-buf infrastructure. When sharing buffers
+ * among devices, drivers have to know the location of the memory to access
+ * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>`
+ * solves this problem for dma-buf and its users. If other drivers or
+ * sub-systems require similar functionality, the type could be generalized
+ * and moved to a more prominent header file.
+ *
+ * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is
+ * considered bad style. Rather then accessing its fields directly, use one
+ * of the provided helper functions, or implement your own. For example,
+ * instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized
+ * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with
+ * dma_buf_map_set_vaddr(). These helpers will set an address in system memory.
+ *
+ * .. code-block:: c
+ *
+ * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
+ *
+ * dma_buf_map_set_vaddr(&map, 0xdeadbeaf);
+ *
+ * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem().
+ *
+ * .. code-block:: c
+ *
+ * dma_buf_map_set_vaddr_iomem(&map, 0xdeadbeaf);
+ *
+ * Instances of struct dma_buf_map do not have to be cleaned up, but
+ * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
+ * always refer to system memory.
+ *
+ * .. code-block:: c
+ *
+ * dma_buf_map_clear(&map);
+ *
+ * Test if a mapping is valid with either dma_buf_map_is_set() or
+ * dma_buf_map_is_null().
+ *
+ * .. code-block:: c
+ *
+ * if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map))
+ * // always true
+ *
+ * Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared
+ * for equality with dma_buf_map_is_equal(). Mappings the point to different
+ * memory spaces, system or I/O, are never equal. That's even true if both
+ * spaces are located in the same address space, both mappings contain the
+ * same address value, or both mappings refer to NULL.
+ *
+ * .. code-block:: c
+ *
+ * struct dma_buf_map sys_map; // refers to system memory
+ * struct dma_buf_map io_map; // refers to I/O memory
+ *
+ * if (dma_buf_map_is_equal(&sys_map, &io_map))
+ * // always false
+ *
+ * A set up instance of struct dma_buf_map can be used to access or manipulate
+ * the buffer memory. Depending on the location of the memory, the provided
+ * helpers will pick the correct operations. Data can be copied into the memory
+ * with dma_buf_map_memcpy_to(). The address can be manipulated with
+ * dma_buf_map_incr().
+ *
+ * .. code-block:: c
+ *
+ * const void *src = ...; // source buffer
+ * size_t len = ...; // length of src
+ *
+ * dma_buf_map_memcpy_to(&map, src, len);
+ * dma_buf_map_incr(&map, len); // go to first byte after the memcpy
+ */
+
+/**
+ * struct dma_buf_map - Pointer to vmap'ed dma-buf memory.
+ * @vaddr_iomem: The buffer's address if in I/O memory
+ * @vaddr: The buffer's address if in system memory
+ * @is_iomem: True if the dma-buf memory is located in I/O
+ * memory, or false otherwise.
+ */
+struct dma_buf_map {
+ union {
+ void __iomem *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+};
+
+/**
+ * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
+ * @vaddr_: A system-memory address
+ */
+#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
+ { \
+ .vaddr = (vaddr_), \
+ .is_iomem = false, \
+ }
+
+/**
+ * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
+ * @map: The dma-buf mapping structure
+ * @vaddr: A system-memory address
+ *
+ * Sets the address and clears the I/O-memory flag.
+ */
+static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
+{
+ map->vaddr = vaddr;
+ map->is_iomem = false;
+}
+
+/**
+ * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory
+ * @map: The dma-buf mapping structure
+ * @vaddr_iomem: An I/O-memory address
+ *
+ * Sets the address and the I/O-memory flag.
+ */
+static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map,
+ void __iomem *vaddr_iomem)
+{
+ map->vaddr_iomem = vaddr_iomem;
+ map->is_iomem = true;
+}
+
+/**
+ * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
+ * @lhs: The dma-buf mapping structure
+ * @rhs: A dma-buf mapping structure to compare with
+ *
+ * Two dma-buf mapping structures are equal if they both refer to the same type of memory
+ * and to the same address within that memory.
+ *
+ * Returns:
+ * True is both structures are equal, or false otherwise.
+ */
+static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
+ const struct dma_buf_map *rhs)
+{
+ if (lhs->is_iomem != rhs->is_iomem)
+ return false;
+ else if (lhs->is_iomem)
+ return lhs->vaddr_iomem == rhs->vaddr_iomem;
+ else
+ return lhs->vaddr == rhs->vaddr;
+}
+
+/**
+ * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL
+ * @map: The dma-buf mapping structure
+ *
+ * Depending on the state of struct dma_buf_map.is_iomem, tests if the
+ * mapping is NULL.
+ *
+ * Returns:
+ * True if the mapping is NULL, or false otherwise.
+ */
+static inline bool dma_buf_map_is_null(const struct dma_buf_map *map)
+{
+ if (map->is_iomem)
+ return !map->vaddr_iomem;
+ return !map->vaddr;
+}
+
+/**
+ * dma_buf_map_is_set - Tests is the dma-buf mapping has been set
+ * @map: The dma-buf mapping structure
+ *
+ * Depending on the state of struct dma_buf_map.is_iomem, tests if the
+ * mapping has been set.
+ *
+ * Returns:
+ * True if the mapping is been set, or false otherwise.
+ */
+static inline bool dma_buf_map_is_set(const struct dma_buf_map *map)
+{
+ return !dma_buf_map_is_null(map);
+}
+
+/**
+ * dma_buf_map_clear - Clears a dma-buf mapping structure
+ * @map: The dma-buf mapping structure
+ *
+ * Clears all fields to zero; including struct dma_buf_map.is_iomem. So
+ * mapping structures that were set to point to I/O memory are reset for
+ * system memory. Pointers are cleared to NULL. This is the default.
+ */
+static inline void dma_buf_map_clear(struct dma_buf_map *map)
+{
+ if (map->is_iomem) {
+ map->vaddr_iomem = NULL;
+ map->is_iomem = false;
+ } else {
+ map->vaddr = NULL;
+ }
+}
+
+/**
+ * dma_buf_map_memcpy_to - Memcpy into dma-buf mapping
+ * @dst: The dma-buf mapping structure
+ * @src: The source buffer
+ * @len: The number of byte in src
+ *
+ * Copies data into a dma-buf mapping. The source buffer is in system
+ * memory. Depending on the buffer's location, the helper picks the correct
+ * method of accessing the memory.
+ */
+static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len)
+{
+ if (dst->is_iomem)
+ memcpy_toio(dst->vaddr_iomem, src, len);
+ else
+ memcpy(dst->vaddr, src, len);
+}
+
+/**
+ * dma_buf_map_incr - Increments the address stored in a dma-buf mapping
+ * @map: The dma-buf mapping structure
+ * @incr: The number of bytes to increment
+ *
+ * Increments the address stored in a dma-buf mapping. Depending on the
+ * buffer's location, the correct value will be updated.
+ */
+static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr)
+{
+ if (map->is_iomem)
+ map->vaddr_iomem += incr;
+ else
+ map->vaddr += incr;
+}
+
+#endif /* __DMA_BUF_MAP_H__ */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index eebf04325b34..a715df97b31a 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -458,8 +458,8 @@ void dma_resv_fini(struct dma_resv *obj);
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
- unsigned *pshared_count, struct dma_fence ***pshared);
+int dma_resv_get_fences(struct dma_resv *obj, bool write,
+ unsigned int *num_fences, struct dma_fence ***fences);
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
unsigned long timeout);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3da95842b207..9a14f3f8a329 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -502,6 +502,7 @@ struct fb_info {
} *apertures;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
+ bool forced_out; /* set when being removed by another driver */
};
static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f9348769e558..efa5c324369a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -230,7 +230,7 @@ extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \
-} while (0);
+} while (0)
/*
* Take/release a lock when not the owner will release it.
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 73ad784fca96..811ea668c4a1 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -91,6 +91,7 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_GET_POE_HAT_VAL = 0x00030049,
RPI_FIRMWARE_SET_POE_HAT_VAL = 0x00030050,
RPI_FIRMWARE_NOTIFY_XHCI_RESET = 0x00030058,
+ RPI_FIRMWARE_NOTIFY_DISPLAY_DONE = 0x00030066,
/* Dispmanx TAGS */
RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 0x00040001,
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index 061e700dd06c..9e40277d8185 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -84,14 +84,14 @@ struct drm_panfrost_wait_bo {
__s64 timeout_ns; /* absolute */
};
+/* Valid flags to pass to drm_panfrost_create_bo */
#define PANFROST_BO_NOEXEC 1
#define PANFROST_BO_HEAP 2
/**
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
*
- * There are currently no values for the flags argument, but it may be
- * used in a future extension.
+ * The flags argument is a bit mask of PANFROST_BO_* flags.
*/
struct drm_panfrost_create_bo {
__u32 size;