summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c32
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c47
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c3
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c23
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h3
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c19
-rw-r--r--drivers/gpu/drm/bridge/Kconfig4
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c15
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c20
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c43
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c12
-rw-r--r--drivers/gpu/drm/drm_atomic.c391
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c40
-rw-r--r--drivers/gpu/drm/drm_connector.c121
-rw-r--r--drivers/gpu/drm/drm_crtc.c35
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h3
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c282
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c9
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c5
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c2
-rw-r--r--drivers/gpu/drm/drm_hashtab.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c7
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c91
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c16
-rw-r--r--drivers/gpu/drm/drm_plane.c41
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c4
-rw-r--r--drivers/gpu/drm/drm_prime.c34
-rw-r--r--drivers/gpu/drm/drm_vm.c10
-rw-r--r--drivers/gpu/drm/drm_writeback.c350
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c62
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h1
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c10
-rw-r--r--drivers/gpu/drm/gma500/gtt.h2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c11
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c179
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c96
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c83
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c321
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c157
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c464
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c5
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c136
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c67
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h8
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h1
-rw-r--r--drivers/gpu/drm/i915/i915_query.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h150
-rw-r--r--drivers/gpu/drm/i915/i915_request.c45
-rw-r--r--drivers/gpu/drm/i915/i915_request.h4
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h131
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c18
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c1
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c121
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c40
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c84
-rw-r--r--drivers/gpu/drm/i915/intel_display.c571
-rw-r--r--drivers/gpu/drm/i915/intel_display.h11
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c219
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c70
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h27
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c13
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c176
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c9
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c76
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c42
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c389
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h7
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c117
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c381
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c172
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h22
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c38
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c207
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c20
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c129
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c12
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h8
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c94
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c50
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c150
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c110
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c31
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c53
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c17
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c66
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c76
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c7
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c24
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c54
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/fifo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mmu.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/event.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c109
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c1
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c1
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c1
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c58
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c1
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c65
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/atom.c2
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c5
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c156
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c16
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c86
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c73
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c6
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c5
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c95
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c42
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c11
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.h1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c2
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c14
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c8
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c18
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h3
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c15
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c98
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h6
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c5
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c32
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c5
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c5
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.c2
-rw-r--r--drivers/gpu/host1x/bus.c9
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/gpu/vga/vgaarb.c2
281 files changed, 5575 insertions, 3706 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ef9f3dab287f..69c13517ea3a 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
- drm_syncobj.o drm_lease.o
+ drm_syncobj.o drm_lease.o drm_writeback.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 03ee36739efe..f4c474a95875 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -310,20 +310,20 @@ static int acp_hw_init(void *handle)
pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
}
- adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
+ adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
GFP_KERNEL);
if (adev->acp.acp_cell == NULL)
return -ENOMEM;
- adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
+ adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL);
if (adev->acp.acp_res == NULL) {
kfree(adev->acp.acp_cell);
return -ENOMEM;
}
- i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
+ i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL);
if (i2s_pdata == NULL) {
kfree(adev->acp.acp_res);
kfree(adev->acp.acp_cell);
@@ -512,7 +512,7 @@ static int acp_hw_fini(void *handle)
if (adev->acp.acp_genpd) {
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
+ ret = pm_genpd_remove_device(dev);
/* If removal fails, dont giveup and try rest */
if (ret)
dev_err(dev, "remove dev from genpd failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 0ff36d45a597..ea79908dac4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -407,7 +407,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -504,7 +504,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
#undef HQD_N_REGS
#define HQD_N_REGS (19+4)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 6ef9762b4b00..19dd665e7307 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -395,7 +395,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -491,7 +491,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
#undef HQD_N_REGS
#define HQD_N_REGS (19+4+2+3+7)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index f0c0d3953f69..1db60aa5b7f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -504,7 +504,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -606,7 +606,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index def1010ac05e..77ad59ade85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -452,7 +452,7 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
- kzalloc(psl->ucNumEntries *
+ kcalloc(psl->ucNumEntries,
sizeof(struct amdgpu_phase_shedding_limits_entry),
GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 17d6b9fb6d77..dd11b7313ca0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -369,7 +369,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
/* Allocate pages table */
- adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
+ adev->gart.pages = vzalloc(array_size(sizeof(void *),
+ adev->gart.num_cpu_pages));
if (adev->gart.pages == NULL)
return -ENOMEM;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index d1f05489595b..b2286bc41aec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -202,7 +202,6 @@ error:
* 0 on success or negative error code.
*/
static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
- struct device *target_dev,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
@@ -210,7 +209,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
- r = drm_gem_map_attach(dma_buf, target_dev, attach);
+ r = drm_gem_map_attach(dma_buf, attach);
if (r)
return r;
@@ -341,9 +340,7 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
.map = drm_gem_dmabuf_kmap,
- .map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
- .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index d167e8ab76d3..e3878256743a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -53,7 +53,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
n -= adev->irq.ih.ring_size;
n /= size;
- gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
+ gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL);
if (!gtt_obj) {
DRM_ERROR("Failed to allocate %d pointers\n", n);
r = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 6cd518f6f7a4..b18c31a701e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1221,7 +1221,7 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.abort = false;
ectx.last_jump = 0;
if (ws)
- ectx.ws = kzalloc(4 * ws, GFP_ATOMIC);
+ ectx.ws = kcalloc(4, ws, GFP_ATOMIC);
else
ectx.ws = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index b6248c0578a1..c9d45cffca56 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -5679,8 +5679,9 @@ static int ci_parse_power_table(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
- adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
- state_array->ucNumEntries, GFP_KERNEL);
+ adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+ sizeof(struct amdgpu_ps),
+ GFP_KERNEL);
if (!adev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
@@ -5927,7 +5928,9 @@ static int ci_dpm_init(struct amdgpu_device *adev)
ci_set_private_data_variables_based_on_pptable(adev);
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+ kcalloc(4,
+ sizeof(struct amdgpu_clock_voltage_dependency_entry),
+ GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
ci_dpm_fini(adev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index d79e6f5234da..46de1fd18a7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2727,8 +2727,9 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
- adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
- state_array->ucNumEntries, GFP_KERNEL);
+ adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+ sizeof(struct amdgpu_ps),
+ GFP_KERNEL);
if (!adev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 9567dd0a01bc..d51318c695e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -7242,8 +7242,9 @@ static int si_parse_power_table(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
- adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
- state_array->ucNumEntries, GFP_KERNEL);
+ adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+ sizeof(struct amdgpu_ps),
+ GFP_KERNEL);
if (!adev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
@@ -7346,7 +7347,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
return ret;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+ kcalloc(4,
+ sizeof(struct amdgpu_clock_voltage_dependency_entry),
+ GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
amdgpu_free_extended_power_table(adev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a1c912bc959a..655950102827 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3915,8 +3915,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
/* Flip */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- /* update crtc fb */
- crtc->primary->fb = fb;
WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
WARN_ON(!acrtc_state->stream);
@@ -4628,8 +4626,8 @@ static int dm_update_crtcs_state(struct dc *dc,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_dm_connector *aconnector = NULL;
- struct drm_connector_state *new_con_state = NULL;
- struct dm_connector_state *dm_conn_state = NULL;
+ struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
+ struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
struct drm_plane_state *new_plane_state = NULL;
new_stream = NULL;
@@ -4650,19 +4648,22 @@ static int dm_update_crtcs_state(struct dc *dc,
/* TODO This hack should go away */
if (aconnector && enable) {
// Make sure fake sink is created in plug-in scenario
- new_con_state = drm_atomic_get_connector_state(state,
+ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
+ &aconnector->base);
+ drm_old_conn_state = drm_atomic_get_old_connector_state(state,
&aconnector->base);
- if (IS_ERR(new_con_state)) {
- ret = PTR_ERR_OR_ZERO(new_con_state);
+ if (IS_ERR(drm_new_conn_state)) {
+ ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
break;
}
- dm_conn_state = to_dm_connector_state(new_con_state);
+ dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
+ dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
new_stream = create_stream_for_sink(aconnector,
&new_crtc_state->mode,
- dm_conn_state);
+ dm_new_conn_state);
/*
* we can have no stream on ACTION_SET if a display
@@ -4781,8 +4782,17 @@ next_crtc:
*/
BUG_ON(dm_new_crtc_state->stream == NULL);
- /* Color managment settings */
- if (dm_new_crtc_state->base.color_mgmt_changed) {
+ /* Scaling or underscan settings */
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
+ update_stream_scaling_settings(
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.color_mgmt_changed ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index dea49dc9b518..b19dc4cfc030 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -435,7 +435,7 @@ bool dm_helpers_submit_i2c(
return false;
}
- msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
+ msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
if (!msgs)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
index 733bc5bd760b..a3c56cd8b396 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -365,7 +365,7 @@ void dm_logger_open(
entry->type = log_type;
entry->logger = logger;
- entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char),
+ entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE,
GFP_KERNEL);
entry->buf_offset = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index 217b8f1f7bf6..d28e9cf0e961 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -40,7 +40,7 @@ bool dal_vector_construct(
return false;
}
- vector->container = kzalloc(struct_size * capacity, GFP_KERNEL);
+ vector->container = kcalloc(capacity, struct_size, GFP_KERNEL);
if (vector->container == NULL)
return false;
vector->capacity = capacity;
@@ -67,7 +67,7 @@ bool dal_vector_presized_costruct(
return false;
}
- vector->container = kzalloc(struct_size * count, GFP_KERNEL);
+ vector->container = kcalloc(count, struct_size, GFP_KERNEL);
if (vector->container == NULL)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 599c7ab6befe..88b09dd758ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1079,13 +1079,15 @@ static void get_ss_info_from_atombios(
if (*ss_entries_num == 0)
return;
- ss_info = kzalloc(sizeof(struct spread_spectrum_info) * (*ss_entries_num),
+ ss_info = kcalloc(*ss_entries_num,
+ sizeof(struct spread_spectrum_info),
GFP_KERNEL);
ss_info_cur = ss_info;
if (ss_info == NULL)
return;
- ss_data = kzalloc(sizeof(struct spread_spectrum_data) * (*ss_entries_num),
+ ss_data = kcalloc(*ss_entries_num,
+ sizeof(struct spread_spectrum_data),
GFP_KERNEL);
if (ss_data == NULL)
goto out_free_info;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index 80038e0e610f..ab5483c0c502 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -98,7 +98,8 @@ struct gpio_service *dal_gpio_service_create(
if (number_of_bits) {
uint32_t index_of_uint = 0;
- slot = kzalloc(number_of_uints * sizeof(uint32_t),
+ slot = kcalloc(number_of_uints,
+ sizeof(uint32_t),
GFP_KERNEL);
if (!slot) {
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index b422fa2c6a79..98edaefa2b47 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1373,19 +1373,22 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+ rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
+ sizeof(*rgb_user),
GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
- rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
- axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
+ axix_x = kvcalloc(ramp->num_entries + 3, sizeof(*axix_x),
GFP_KERNEL);
if (!axix_x)
goto axix_x_alloc_fail;
- coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
+ GFP_KERNEL);
if (!coeff)
goto coeff_alloc_fail;
@@ -1512,13 +1515,15 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- rgb_user = kzalloc(sizeof(*rgb_user) * (GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS),
- GFP_KERNEL);
+ rgb_user = kcalloc(GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS,
+ sizeof(*rgb_user),
+ GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
- rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
- GFP_KERNEL);
+ rgb_regamma = kcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
+ GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
@@ -1579,19 +1584,21 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+ rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
+ sizeof(*rgb_user),
GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
- curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ curve = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*curve),
GFP_KERNEL);
if (!curve)
goto curve_alloc_fail;
- axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
+ axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x),
GFP_KERNEL);
if (!axix_x)
goto axix_x_alloc_fail;
- coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
+ GFP_KERNEL);
if (!coeff)
goto coeff_alloc_fail;
@@ -1668,8 +1675,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
- (MAX_HW_POINTS + _EXTRA_POINTS),
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
@@ -1693,8 +1700,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
kvfree(rgb_regamma);
} else if (trans == TRANSFER_FUNCTION_SRGB ||
trans == TRANSFER_FUNCTION_BT709) {
- rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
- (MAX_HW_POINTS + _EXTRA_POINTS),
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
@@ -1756,8 +1763,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
- (MAX_HW_POINTS + _EXTRA_POINTS),
+ rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_degamma),
GFP_KERNEL);
if (!rgb_degamma)
goto rgb_degamma_alloc_fail;
@@ -1776,8 +1783,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
kvfree(rgb_degamma);
} else if (trans == TRANSFER_FUNCTION_SRGB ||
trans == TRANSFER_FUNCTION_BT709) {
- rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
- (MAX_HW_POINTS + _EXTRA_POINTS),
+ rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_degamma),
GFP_KERNEL);
if (!rgb_degamma)
goto rgb_degamma_alloc_fail;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 27d4003aa2c7..fa344ceafc17 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -155,7 +155,8 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
if (core_freesync == NULL)
goto fail_alloc_context;
- core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
+ core_freesync->map = kcalloc(MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
+ sizeof(struct freesync_entity),
GFP_KERNEL);
if (core_freesync->map == NULL)
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 480eb2cdd55d..3d4c1b1ab8c4 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -141,19 +141,17 @@ struct mod_stats *mod_stats_create(struct dc *dc)
else
core_stats->entries = reg_data;
}
- core_stats->time = kzalloc(
- sizeof(struct stats_time_cache) *
- core_stats->entries,
+ core_stats->time = kcalloc(core_stats->entries,
+ sizeof(struct stats_time_cache),
GFP_KERNEL);
if (core_stats->time == NULL)
goto fail_construct_time;
core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
- core_stats->events = kzalloc(
- sizeof(struct stats_event_cache) *
- core_stats->event_entries,
- GFP_KERNEL);
+ core_stats->events = kcalloc(core_stats->event_entries,
+ sizeof(struct stats_event_cache),
+ GFP_KERNEL);
if (core_stats->events == NULL)
goto fail_construct_events;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 323990b77ead..91ffb7bc4ee7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -50,7 +50,7 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
return 0;
}
- hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
+ hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
if (hwmgr->ps == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 16903dc7fe0d..c3349b8fb58b 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -136,9 +136,6 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- if (!crtc->primary->fb)
- return;
-
clk_disable_unprepare(arcpgu->clk);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index ac92bce07ecd..edd15126bde9 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -7,30 +7,15 @@
*/
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
-static void armada_fb_destroy(struct drm_framebuffer *fb)
-{
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
-
- drm_framebuffer_cleanup(&dfb->fb);
- drm_gem_object_put_unlocked(&dfb->obj->obj);
- kfree(dfb);
-}
-
-static int armada_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *dfile, unsigned int *handle)
-{
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
- return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
-}
-
static const struct drm_framebuffer_funcs armada_fb_funcs = {
- .destroy = armada_fb_destroy,
- .create_handle = armada_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
@@ -78,7 +63,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
dfb->fmt = format;
dfb->mod = config;
- dfb->obj = obj;
+ dfb->fb.obj[0] = &obj->obj;
drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index 48073c4f54d8..5c130ff5da77 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -10,13 +10,12 @@
struct armada_framebuffer {
struct drm_framebuffer fb;
- struct armada_gem_object *obj;
uint8_t fmt;
uint8_t mod;
};
#define drm_fb_to_armada_fb(dfb) \
container_of(dfb, struct armada_framebuffer, fb)
-#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index a97f509743a5..3fb37c75c065 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -490,8 +490,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map_atomic = armada_gem_dmabuf_no_kmap,
- .unmap_atomic = armada_gem_dmabuf_no_kunmap,
.map = armada_gem_dmabuf_no_kmap,
.unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index c1ea5c36b006..843cac222e60 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -681,6 +681,7 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
drm_fb_cma_fbdev_fini(dev);
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
+ drm_atomic_helper_shutdown(dev);
drm_mode_config_cleanup(dev);
pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 73c875db45f4..1aecc74cc463 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -412,9 +412,10 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg);
}
-static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane)
+static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_plane_state *state)
{
- struct drm_crtc *crtc = plane->base.crtc;
+ struct drm_crtc *crtc = state->base.crtc;
struct drm_color_lut *lut;
int idx;
@@ -779,7 +780,7 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
atmel_hlcdc_plane_update_pos_and_size(plane, state);
atmel_hlcdc_plane_update_general_settings(plane, state);
atmel_hlcdc_plane_update_format(plane, state);
- atmel_hlcdc_plane_update_clut(plane);
+ atmel_hlcdc_plane_update_clut(plane, state);
atmel_hlcdc_plane_update_buffers(plane, state);
atmel_hlcdc_plane_update_disc_area(plane, state);
@@ -816,16 +817,6 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
}
-static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
-{
- struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-
- if (plane->base.fb)
- drm_framebuffer_put(plane->base.fb);
-
- drm_plane_cleanup(p);
-}
-
static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
@@ -1002,7 +993,7 @@ static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p,
static const struct drm_plane_funcs layer_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = atmel_hlcdc_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = atmel_hlcdc_plane_reset,
.atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
.atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index fa2c7997e2fd..bf6cad6c9178 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -82,9 +82,11 @@ config DRM_PARADE_PS8622
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
- depends on OF && RC_CORE
+ depends on OF
select DRM_KMS_HELPER
imply EXTCON
+ select INPUT
+ select RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index c255fc3e1be5..f2d43f24acfb 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1337,7 +1337,7 @@ static const struct mipi_dsi_host_ops cdns_dsi_ops = {
.transfer = cdns_dsi_transfer,
};
-static int cdns_dsi_resume(struct device *dev)
+static int __maybe_unused cdns_dsi_resume(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);
@@ -1350,7 +1350,7 @@ static int cdns_dsi_resume(struct device *dev)
return 0;
}
-static int cdns_dsi_suspend(struct device *dev)
+static int __maybe_unused cdns_dsi_suspend(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index ec8d0006ef7c..3c136f2b954f 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2077,7 +2077,7 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
return ret;
}
-void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
+void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
{
mutex_lock(&hdmi->mutex);
@@ -2103,13 +2103,6 @@ void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
}
mutex_unlock(&hdmi->mutex);
}
-
-void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense)
-{
- struct dw_hdmi *hdmi = dev_get_drvdata(dev);
-
- __dw_hdmi_setup_rx_sense(hdmi, hpd, rx_sense);
-}
EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense);
static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
@@ -2145,9 +2138,9 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
*/
if (intr_stat &
(HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
- __dw_hdmi_setup_rx_sense(hdmi,
- phy_stat & HDMI_PHY_HPD,
- phy_stat & HDMI_PHY_RX_SENSE);
+ dw_hdmi_setup_rx_sense(hdmi,
+ phy_stat & HDMI_PHY_HPD,
+ phy_stat & HDMI_PHY_RX_SENSE);
if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0)
cec_notifier_set_phys_addr(hdmi->cec_notifier,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index be2d7e488062..ce9db7aab225 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -92,7 +92,6 @@
#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
struct cirrus_crtc {
struct drm_crtc base;
@@ -117,11 +116,6 @@ struct cirrus_connector {
struct drm_connector base;
};
-struct cirrus_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
struct cirrus_mc {
resource_size_t vram_size;
resource_size_t vram_base;
@@ -152,7 +146,7 @@ struct cirrus_device {
struct cirrus_fbdev {
struct drm_fb_helper helper;
- struct cirrus_framebuffer gfb;
+ struct drm_framebuffer gfb;
void *sysram;
int size;
int x1, y1, x2, y2; /* dirty rect */
@@ -198,7 +192,7 @@ int cirrus_dumb_create(struct drm_file *file,
struct drm_mode_create_dumb *args);
int cirrus_framebuffer_init(struct drm_device *dev,
- struct cirrus_framebuffer *gfb,
+ struct drm_framebuffer *gfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 32fbfba2c623..b643ac92801c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
struct drm_gem_object *obj;
struct cirrus_bo *bo;
int src_offset, dst_offset;
- int bpp = afbdev->gfb.base.format->cpp[0];
+ int bpp = afbdev->gfb.format->cpp[0];
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
- obj = afbdev->gfb.obj;
+ obj = afbdev->gfb.obj[0];
bo = gem_to_cirrus_bo(obj);
/*
@@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
}
for (i = y; i < y + height; i++) {
/* assume equal stride for now */
- src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+ src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
}
@@ -204,7 +204,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
gfbdev->sysram = sysram;
gfbdev->size = size;
- fb = &gfbdev->gfb.base;
+ fb = &gfbdev->gfb;
if (!fb) {
DRM_INFO("fb is NULL\n");
return -EINVAL;
@@ -246,19 +246,19 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
- struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+ struct drm_framebuffer *gfb = &gfbdev->gfb;
drm_fb_helper_unregister_fbi(&gfbdev->helper);
- if (gfb->obj) {
- drm_gem_object_put_unlocked(gfb->obj);
- gfb->obj = NULL;
+ if (gfb->obj[0]) {
+ drm_gem_object_put_unlocked(gfb->obj[0]);
+ gfb->obj[0] = NULL;
}
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
- drm_framebuffer_unregister_private(&gfb->base);
- drm_framebuffer_cleanup(&gfb->base);
+ drm_framebuffer_unregister_private(gfb);
+ drm_framebuffer_cleanup(gfb);
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 26df1e8cd490..60d54e10a34d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -10,42 +10,25 @@
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "cirrus_drv.h"
-static int cirrus_create_handle(struct drm_framebuffer *fb,
- struct drm_file* file_priv,
- unsigned int* handle)
-{
- struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
- return drm_gem_handle_create(file_priv, cirrus_fb->obj, handle);
-}
-
-static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
- drm_gem_object_put_unlocked(cirrus_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
- .create_handle = cirrus_create_handle,
- .destroy = cirrus_user_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
int cirrus_framebuffer_init(struct drm_device *dev,
- struct cirrus_framebuffer *gfb,
+ struct drm_framebuffer *gfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
- drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
- gfb->obj = obj;
- ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+ drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd);
+ gfb->obj[0] = obj;
+ ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
@@ -60,7 +43,7 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
{
struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
- struct cirrus_framebuffer *cirrus_fb;
+ struct drm_framebuffer *fb;
u32 bpp;
int ret;
@@ -74,19 +57,19 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
if (obj == NULL)
return ERR_PTR(-ENOENT);
- cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
- if (!cirrus_fb) {
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb) {
drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
- ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+ ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
if (ret) {
drm_gem_object_put_unlocked(obj);
- kfree(cirrus_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return &cirrus_fb->base;
+ return fb;
}
static const struct drm_mode_config_funcs cirrus_mode_funcs = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index c91b9b054e3f..b529f8c8e2a6 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -101,17 +101,13 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
int x, int y, int atomic)
{
struct cirrus_device *cdev = crtc->dev->dev_private;
- struct drm_gem_object *obj;
- struct cirrus_framebuffer *cirrus_fb;
struct cirrus_bo *bo;
int ret;
u64 gpu_addr;
/* push the previous fb to system ram */
if (!atomic && fb) {
- cirrus_fb = to_cirrus_framebuffer(fb);
- obj = cirrus_fb->obj;
- bo = gem_to_cirrus_bo(obj);
+ bo = gem_to_cirrus_bo(fb->obj[0]);
ret = cirrus_bo_reserve(bo, false);
if (ret)
return ret;
@@ -119,9 +115,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
cirrus_bo_unreserve(bo);
}
- cirrus_fb = to_cirrus_framebuffer(crtc->primary->fb);
- obj = cirrus_fb->obj;
- bo = gem_to_cirrus_bo(obj);
+ bo = gem_to_cirrus_bo(crtc->primary->fb->obj[0]);
ret = cirrus_bo_reserve(bo, false);
if (ret)
@@ -133,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
return ret;
}
- if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+ if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
/* if pushing console in kmap it */
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 895741e9cd7d..178842380f75 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
+#include <drm/drm_writeback.h>
#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
@@ -325,6 +326,35 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
return fence_ptr;
}
+static int set_out_fence_for_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ s32 __user *fence_ptr)
+{
+ unsigned int index = drm_connector_index(connector);
+
+ if (!fence_ptr)
+ return 0;
+
+ if (put_user(-1, fence_ptr))
+ return -EFAULT;
+
+ state->connectors[index].out_fence_ptr = fence_ptr;
+
+ return 0;
+}
+
+static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ unsigned int index = drm_connector_index(connector);
+ s32 __user *fence_ptr;
+
+ fence_ptr = state->connectors[index].out_fence_ptr;
+ state->connectors[index].out_fence_ptr = NULL;
+
+ return fence_ptr;
+}
+
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
@@ -339,6 +369,7 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
const struct drm_display_mode *mode)
{
+ struct drm_crtc *crtc = state->crtc;
struct drm_mode_modeinfo umode;
/* Early return for no change. */
@@ -359,13 +390,13 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
drm_mode_copy(&state->mode, mode);
state->enable = true;
- DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
- mode->name, state);
+ DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+ mode->name, crtc->base.id, crtc->name, state);
} else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
- DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
- state);
+ DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+ crtc->base.id, crtc->name, state);
}
return 0;
@@ -388,6 +419,8 @@ EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
struct drm_property_blob *blob)
{
+ struct drm_crtc *crtc = state->crtc;
+
if (blob == state->mode_blob)
return 0;
@@ -397,19 +430,34 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
memset(&state->mode, 0, sizeof(state->mode));
if (blob) {
- if (blob->length != sizeof(struct drm_mode_modeinfo) ||
- drm_mode_convert_umode(state->crtc->dev, &state->mode,
- blob->data))
+ int ret;
+
+ if (blob->length != sizeof(struct drm_mode_modeinfo)) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
+ crtc->base.id, crtc->name,
+ blob->length);
return -EINVAL;
+ }
+
+ ret = drm_mode_convert_umode(crtc->dev,
+ &state->mode, blob->data);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
+ crtc->base.id, crtc->name,
+ ret, drm_get_mode_status_name(state->mode.status));
+ drm_mode_debug_printmodeline(&state->mode);
+ return -EINVAL;
+ }
state->mode_blob = drm_property_blob_get(blob);
state->enable = true;
- DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
- state->mode.name, state);
+ DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+ state->mode.name, crtc->base.id, crtc->name,
+ state);
} else {
state->enable = false;
- DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
- state);
+ DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+ crtc->base.id, crtc->name, state);
}
return 0;
@@ -539,10 +587,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
return -EFAULT;
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
- } else if (crtc->funcs->atomic_set_property)
+ } else if (crtc->funcs->atomic_set_property) {
return crtc->funcs->atomic_set_property(crtc, state, property, val);
- else
+ } else {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+ crtc->base.id, crtc->name,
+ property->base.id, property->name);
return -EINVAL;
+ }
return 0;
}
@@ -677,6 +729,51 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_connector_check - check connector state
+ * @connector: connector to check
+ * @state: connector state to check
+ *
+ * Provides core sanity checks for connector state.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int drm_atomic_connector_check(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_writeback_job *writeback_job = state->writeback_job;
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
+ return 0;
+
+ if (writeback_job->fb && !state->crtc) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ if (state->crtc)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+
+ if (writeback_job->fb && !crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
+ connector->base.id, connector->name,
+ state->crtc->base.id);
+ return -EINVAL;
+ }
+
+ if (writeback_job->out_fence && !writeback_job->fb) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
* @plane: plane to get state object for
@@ -700,6 +797,11 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
WARN_ON(!state->acquire_ctx);
+ /* the legacy pointers should never be set */
+ WARN_ON(plane->fb);
+ WARN_ON(plane->old_fb);
+ WARN_ON(plane->crtc);
+
plane_state = drm_atomic_get_existing_plane_state(state, plane);
if (plane_state)
return plane_state;
@@ -794,8 +896,11 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (property == plane->alpha_property) {
state->alpha = val;
} else if (property == plane->rotation_property) {
- if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
+ if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
+ plane->base.id, plane->name, val);
return -EINVAL;
+ }
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
@@ -807,6 +912,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return plane->funcs->atomic_set_property(plane, state,
property, val);
} else {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+ plane->base.id, plane->name,
+ property->base.id, property->name);
return -EINVAL;
}
@@ -914,10 +1022,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* either *both* CRTC and FB must be set, or neither */
if (state->crtc && !state->fb) {
- DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
+ plane->base.id, plane->name);
return -EINVAL;
} else if (state->fb && !state->crtc) {
- DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -927,7 +1037,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
- DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
+ DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
+ state->crtc->base.id, state->crtc->name,
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -936,7 +1048,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
- DRM_DEBUG_ATOMIC("Invalid pixel format %s, modifier 0x%llx\n",
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
+ plane->base.id, plane->name,
drm_get_format_name(state->fb->format->format,
&format_name),
state->fb->modifier);
@@ -948,7 +1061,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
state->crtc_h > INT_MAX ||
state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
- DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane->base.id, plane->name,
state->crtc_w, state->crtc_h,
state->crtc_x, state->crtc_y);
return -ERANGE;
@@ -962,8 +1076,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->src_x > fb_width - state->src_w ||
state->src_h > fb_height ||
state->src_y > fb_height - state->src_h) {
- DRM_DEBUG_ATOMIC("Invalid source coordinates "
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
+ plane->base.id, plane->name,
state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
@@ -1120,6 +1235,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
state->private_objs[index].old_state = obj->state;
state->private_objs[index].new_state = obj_state;
state->private_objs[index].ptr = obj;
+ obj_state->state = state;
state->num_private_objs = num_objs;
@@ -1278,6 +1394,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->link_status = val;
} else if (property == config->aspect_ratio_property) {
state->picture_aspect_ratio = val;
+ } else if (property == config->content_type_property) {
+ state->content_type = val;
} else if (property == connector->scaling_mode_property) {
state->scaling_mode = val;
} else if (property == connector->content_protection_property) {
@@ -1286,10 +1404,24 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
+ } else if (property == config->writeback_fb_id_property) {
+ struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+ int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
+ if (fb)
+ drm_framebuffer_put(fb);
+ return ret;
+ } else if (property == config->writeback_out_fence_ptr_property) {
+ s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+ return set_out_fence_for_connector(state->state, connector,
+ fence_ptr);
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
} else {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
+ connector->base.id, connector->name,
+ property->base.id, property->name);
return -EINVAL;
}
@@ -1363,10 +1495,17 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->link_status;
} else if (property == config->aspect_ratio_property) {
*val = state->picture_aspect_ratio;
+ } else if (property == config->content_type_property) {
+ *val = state->content_type;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
} else if (property == connector->content_protection_property) {
*val = state->content_protection;
+ } else if (property == config->writeback_fb_id_property) {
+ /* Writeback framebuffer is one-shot, write and forget */
+ *val = 0;
+ } else if (property == config->writeback_out_fence_ptr_property) {
+ *val = 0;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
@@ -1456,11 +1595,12 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
}
if (crtc)
- DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
- plane_state, crtc->base.id, crtc->name);
+ DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
+ plane->base.id, plane->name, plane_state,
+ crtc->base.id, crtc->name);
else
- DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
- plane_state);
+ DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
+ plane->base.id, plane->name, plane_state);
return 0;
}
@@ -1480,12 +1620,15 @@ void
drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb)
{
+ struct drm_plane *plane = plane_state->plane;
+
if (fb)
- DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
- fb->base.id, plane_state);
- else
- DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
+ fb->base.id, plane->base.id, plane->name,
plane_state);
+ else
+ DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
+ plane->base.id, plane->name, plane_state);
drm_framebuffer_assign(&plane_state->fb, fb);
}
@@ -1546,6 +1689,7 @@ int
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc)
{
+ struct drm_connector *connector = conn_state->connector;
struct drm_crtc_state *crtc_state;
if (conn_state->crtc == crtc)
@@ -1573,10 +1717,12 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
drm_connector_get(conn_state->connector);
conn_state->crtc = crtc;
- DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
+ DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
+ connector->base.id, connector->name,
conn_state, crtc->base.id, crtc->name);
} else {
- DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
+ DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
+ connector->base.id, connector->name,
conn_state);
}
@@ -1584,6 +1730,70 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
}
EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
+/*
+ * drm_atomic_get_writeback_job - return or allocate a writeback job
+ * @conn_state: Connector state to get the job for
+ *
+ * Writeback jobs have a different lifetime to the atomic state they are
+ * associated with. This convenience function takes care of allocating a job
+ * if there isn't yet one associated with the connector state, otherwise
+ * it just returns the existing job.
+ *
+ * Returns: The writeback job for the given connector state
+ */
+static struct drm_writeback_job *
+drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
+{
+ WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+
+ if (!conn_state->writeback_job)
+ conn_state->writeback_job =
+ kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
+
+ return conn_state->writeback_job;
+}
+
+/**
+ * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
+ * @conn_state: atomic state object for the connector
+ * @fb: fb to use for the connector
+ *
+ * This is used to set the framebuffer for a writeback connector, which outputs
+ * to a buffer instead of an actual physical connector.
+ * Changing the assigned framebuffer requires us to grab a reference to the new
+ * fb and drop the reference to the old fb, if there is one. This function
+ * takes care of all these details besides updating the pointer in the
+ * state object itself.
+ *
+ * Note: The only way conn_state can already have an fb set is if the commit
+ * sets the property more than once.
+ *
+ * See also: drm_writeback_connector_init()
+ *
+ * Returns: 0 on success
+ */
+int drm_atomic_set_writeback_fb_for_connector(
+ struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb)
+{
+ struct drm_writeback_job *job =
+ drm_atomic_get_writeback_job(conn_state);
+ if (!job)
+ return -ENOMEM;
+
+ drm_framebuffer_assign(&job->fb, fb);
+
+ if (fb)
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
+ fb->base.id, conn_state);
+ else
+ DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
+ conn_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
+
/**
* drm_atomic_add_affected_connectors - add connectors for crtc
* @state: atomic state
@@ -1672,6 +1882,9 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
+ DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
+ crtc->base.id, crtc->name, state);
+
drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
@@ -1702,6 +1915,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_plane_state *plane_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
@@ -1724,6 +1939,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
+ ret = drm_atomic_connector_check(conn, conn_state);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
+ conn->base.id, conn->name);
+ return ret;
+ }
+ }
+
if (config->funcs->atomic_check) {
ret = config->funcs->atomic_check(state->dev, state);
@@ -2048,45 +2272,6 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
/**
- * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
- *
- * @dev: drm device to check.
- * @plane_mask: plane mask for planes that were updated.
- * @ret: return value, can be -EDEADLK for a retry.
- *
- * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
- * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
- * is a common operation for each atomic update, so this call is split off as a
- * helper.
- */
-void drm_atomic_clean_old_fb(struct drm_device *dev,
- unsigned plane_mask,
- int ret)
-{
- struct drm_plane *plane;
-
- /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
- * locks (ie. while it is still safe to deref plane->state). We
- * need to do this here because the driver entry points cannot
- * distinguish between legacy and atomic ioctls.
- */
- drm_for_each_plane_mask(plane, dev, plane_mask) {
- if (ret == 0) {
- struct drm_framebuffer *new_fb = plane->state->fb;
- if (new_fb)
- drm_framebuffer_get(new_fb);
- plane->fb = new_fb;
- plane->crtc = plane->state->crtc;
-
- if (plane->old_fb)
- drm_framebuffer_put(plane->old_fb);
- }
- plane->old_fb = NULL;
- }
-}
-EXPORT_SYMBOL(drm_atomic_clean_old_fb);
-
-/**
* DOC: explicit fencing properties
*
* Explicit fencing allows userspace to control the buffer synchronization
@@ -2161,7 +2346,7 @@ static int setup_out_fence(struct drm_out_fence_state *fence_state,
return 0;
}
-static int prepare_crtc_signaling(struct drm_device *dev,
+static int prepare_signaling(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_mode_atomic *arg,
struct drm_file *file_priv,
@@ -2170,6 +2355,8 @@ static int prepare_crtc_signaling(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
int i, c = 0, ret;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
@@ -2235,6 +2422,43 @@ static int prepare_crtc_signaling(struct drm_device *dev,
c++;
}
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
+ struct drm_writeback_job *job;
+ struct drm_out_fence_state *f;
+ struct dma_fence *fence;
+ s32 __user *fence_ptr;
+
+ fence_ptr = get_out_fence_for_connector(state, conn);
+ if (!fence_ptr)
+ continue;
+
+ job = drm_atomic_get_writeback_job(conn_state);
+ if (!job)
+ return -ENOMEM;
+
+ f = krealloc(*fence_state, sizeof(**fence_state) *
+ (*num_fences + 1), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ memset(&f[*num_fences], 0, sizeof(*f));
+
+ f[*num_fences].out_fence_ptr = fence_ptr;
+ *fence_state = f;
+
+ fence = drm_writeback_get_out_fence((struct drm_writeback_connector *)conn);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = setup_out_fence(&f[(*num_fences)++], fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+
+ job->out_fence = fence;
+ }
+
/*
* Having this flag means user mode pends on event which will never
* reach due to lack of at least one CRTC for signaling
@@ -2245,11 +2469,11 @@ static int prepare_crtc_signaling(struct drm_device *dev,
return 0;
}
-static void complete_crtc_signaling(struct drm_device *dev,
- struct drm_atomic_state *state,
- struct drm_out_fence_state *fence_state,
- unsigned int num_fences,
- bool install_fds)
+static void complete_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_out_fence_state *fence_state,
+ unsigned int num_fences,
+ bool install_fds)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -2306,9 +2530,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
unsigned int copied_objs, copied_props;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
- struct drm_plane *plane;
struct drm_out_fence_state *fence_state;
- unsigned plane_mask;
int ret = 0;
unsigned int i, j, num_fences;
@@ -2348,7 +2570,6 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
retry:
- plane_mask = 0;
copied_objs = 0;
copied_props = 0;
fence_state = NULL;
@@ -2419,17 +2640,11 @@ retry:
copied_props++;
}
- if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
- !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
- plane = obj_to_plane(obj);
- plane_mask |= (1 << drm_plane_index(plane));
- plane->old_fb = plane->fb;
- }
drm_mode_object_put(obj);
}
- ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
- &num_fences);
+ ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
+ &num_fences);
if (ret)
goto out;
@@ -2445,9 +2660,7 @@ retry:
}
out:
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
- complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
+ complete_signaling(dev, state, fence_state, num_fences, !ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 130da5195f3b..17baf5057132 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,6 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_writeback.h>
#include <linux/dma-fence.h>
#include "drm_crtc_helper_internal.h"
@@ -1172,6 +1173,25 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
+static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ const struct drm_connector_helper_funcs *funcs;
+
+ funcs = connector->helper_private;
+
+ if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
+ WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+ funcs->atomic_commit(connector, new_conn_state->writeback_job);
+ }
+ }
+}
+
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
@@ -1251,6 +1271,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_bridge_enable(encoder->bridge);
}
+
+ drm_atomic_helper_commit_writebacks(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
@@ -2914,7 +2936,6 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- unsigned plane_mask = 0;
int ret, i;
state = drm_atomic_state_alloc(dev);
@@ -2957,17 +2978,10 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
goto free;
drm_atomic_set_fb_for_plane(plane_state, NULL);
-
- if (clean_old_fbs) {
- plane->old_fb = plane->fb;
- plane_mask |= BIT(drm_plane_index(plane));
- }
}
ret = drm_atomic_commit(state);
free:
- if (plane_mask)
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
drm_atomic_state_put(state);
return ret;
}
@@ -3129,13 +3143,8 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
state->acquire_ctx = ctx;
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- WARN_ON(plane->crtc != new_plane_state->crtc);
- WARN_ON(plane->fb != new_plane_state->fb);
- WARN_ON(plane->old_fb);
-
+ for_each_new_plane_in_state(state, plane, new_plane_state, i)
state->planes[i].old_state = plane->state;
- }
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
state->crtcs[i].old_state = crtc->state;
@@ -3660,6 +3669,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
if (state->crtc)
drm_connector_get(connector);
state->commit = NULL;
+
+ /* Don't copy over a writeback job, they are used only once */
+ state->writeback_job = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9b9ba5d5ec0c..2f9ebddd178e 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -87,6 +87,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
+ { DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
};
void drm_connector_ida_init(void)
@@ -195,6 +196,10 @@ int drm_connector_init(struct drm_device *dev,
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
ret = __drm_mode_object_add(dev, &connector->base,
DRM_MODE_OBJECT_CONNECTOR,
false, drm_connector_free);
@@ -249,7 +254,8 @@ int drm_connector_init(struct drm_device *dev,
config->num_connector++;
spin_unlock_irq(&config->connector_list_lock);
- if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
+ connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
drm_object_attach_property(&connector->base,
config->edid_property,
0);
@@ -720,6 +726,14 @@ static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
};
+static const struct drm_prop_enum_list drm_content_type_enum_list[] = {
+ { DRM_MODE_CONTENT_TYPE_NO_DATA, "No Data" },
+ { DRM_MODE_CONTENT_TYPE_GRAPHICS, "Graphics" },
+ { DRM_MODE_CONTENT_TYPE_PHOTO, "Photo" },
+ { DRM_MODE_CONTENT_TYPE_CINEMA, "Cinema" },
+ { DRM_MODE_CONTENT_TYPE_GAME, "Game" },
+};
+
static const struct drm_prop_enum_list drm_panel_orientation_enum_list[] = {
{ DRM_MODE_PANEL_ORIENTATION_NORMAL, "Normal" },
{ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, "Upside Down" },
@@ -997,6 +1011,84 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
/**
+ * DOC: HDMI connector properties
+ *
+ * content type (HDMI specific):
+ * Indicates content type setting to be used in HDMI infoframes to indicate
+ * content type for the external device, so that it adjusts it's display
+ * settings accordingly.
+ *
+ * The value of this property can be one of the following:
+ *
+ * No Data:
+ * Content type is unknown
+ * Graphics:
+ * Content type is graphics
+ * Photo:
+ * Content type is photo
+ * Cinema:
+ * Content type is cinema
+ * Game:
+ * Content type is game
+ *
+ * Drivers can set up this property by calling
+ * drm_connector_attach_content_type_property(). Decoding to
+ * infoframe values is done through
+ * drm_hdmi_get_content_type_from_property() and
+ * drm_hdmi_get_itc_bit_from_property().
+ */
+
+/**
+ * drm_connector_attach_content_type_property - attach content-type property
+ * @connector: connector to attach content type property on.
+ *
+ * Called by a driver the first time a HDMI connector is made.
+ */
+int drm_connector_attach_content_type_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_content_type_property(connector->dev))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.content_type_property,
+ DRM_MODE_CONTENT_TYPE_NO_DATA);
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_content_type_property);
+
+
+/**
+ * drm_hdmi_avi_infoframe_content_type() - fill the HDMI AVI infoframe
+ * content type information, based
+ * on correspondent DRM property.
+ * @frame: HDMI AVI infoframe
+ * @conn_state: DRM display connector state
+ *
+ */
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ switch (conn_state->content_type) {
+ case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ break;
+ case DRM_MODE_CONTENT_TYPE_CINEMA:
+ frame->content_type = HDMI_CONTENT_TYPE_CINEMA;
+ break;
+ case DRM_MODE_CONTENT_TYPE_GAME:
+ frame->content_type = HDMI_CONTENT_TYPE_GAME;
+ break;
+ case DRM_MODE_CONTENT_TYPE_PHOTO:
+ frame->content_type = HDMI_CONTENT_TYPE_PHOTO;
+ break;
+ default:
+ /* Graphics is the default(0) */
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ }
+
+ frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
+
+/**
* drm_create_tv_properties - create TV specific connector properties
* @dev: DRM device
* @num_modes: number of different TV formats (modes) supported
@@ -1261,6 +1353,33 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
/**
+ * drm_mode_create_content_type_property - create content type property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_content_type_property(struct drm_device *dev)
+{
+ if (dev->mode_config.content_type_property)
+ return 0;
+
+ dev->mode_config.content_type_property =
+ drm_property_create_enum(dev, 0, "content type",
+ drm_content_type_enum_list,
+ ARRAY_SIZE(drm_content_type_enum_list));
+
+ if (dev->mode_config.content_type_property == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_content_type_property);
+
+/**
* drm_mode_create_suggested_offset_properties - create suggests offset properties
* @dev: DRM device
*
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 98a36e6c69ad..f45e7a8d4acd 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -286,6 +286,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (WARN_ON(config->num_crtc >= 32))
return -EINVAL;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
crtc->dev = dev;
crtc->funcs = funcs;
@@ -469,23 +473,32 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set,
* connectors from it), hence we need to refcount the fbs across all
* crtcs. Atomic modeset will have saner semantics ...
*/
- drm_for_each_crtc(tmp, crtc->dev)
- tmp->primary->old_fb = tmp->primary->fb;
+ drm_for_each_crtc(tmp, crtc->dev) {
+ struct drm_plane *plane = tmp->primary;
+
+ plane->old_fb = plane->fb;
+ }
fb = set->fb;
ret = crtc->funcs->set_config(set, ctx);
if (ret == 0) {
- crtc->primary->crtc = fb ? crtc : NULL;
- crtc->primary->fb = fb;
+ struct drm_plane *plane = crtc->primary;
+
+ if (!plane->state) {
+ plane->crtc = fb ? crtc : NULL;
+ plane->fb = fb;
+ }
}
drm_for_each_crtc(tmp, crtc->dev) {
- if (tmp->primary->fb)
- drm_framebuffer_get(tmp->primary->fb);
- if (tmp->primary->old_fb)
- drm_framebuffer_put(tmp->primary->old_fb);
- tmp->primary->old_fb = NULL;
+ struct drm_plane *plane = tmp->primary;
+
+ if (plane->fb)
+ drm_framebuffer_get(plane->fb);
+ if (plane->old_fb)
+ drm_framebuffer_put(plane->old_fb);
+ plane->old_fb = NULL;
}
return ret;
@@ -640,7 +653,9 @@ retry:
ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
if (ret) {
- DRM_DEBUG_KMS("Invalid mode\n");
+ DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n",
+ ret, drm_get_mode_status_name(mode->status));
+ drm_mode_debug_printmodeline(mode);
goto out;
}
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 5d307b23a4e6..34499800932a 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -56,6 +56,9 @@ int drm_mode_setcrtc(struct drm_device *dev,
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);
+/* drm_modes.c */
+const char *drm_get_mode_status_name(enum drm_mode_status status);
+
/* IOCTLs */
int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 36c7609a4bd5..a7ba602a43a8 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1159,6 +1159,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
static const u16 psr_setup_time_us[] = {
PSR_SETUP_TIME(330),
PSR_SETUP_TIME(275),
+ PSR_SETUP_TIME(220),
PSR_SETUP_TIME(165),
PSR_SETUP_TIME(110),
PSR_SETUP_TIME(55),
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index f6910ebe4d0e..b553a6f2ff0e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -756,7 +756,7 @@ static void remove_compat_control_link(struct drm_device *dev)
if (!minor)
return;
- name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
+ name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
if (!name)
return;
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 39ac15ce4702..9e2ae02f31e0 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
return -EINVAL;
/* overflow checks for 32bit size calculations */
- /* NOTE: DIV_ROUND_UP() can overflow */
+ if (args->bpp > U32_MAX - 8)
+ return -EINVAL;
cpp = DIV_ROUND_UP(args->bpp, 8);
- if (!cpp || cpp > 0xffffffffU / args->width)
+ if (cpp > U32_MAX / args->width)
return -EINVAL;
stride = cpp * args->width;
- if (args->height > 0xffffffffU / stride)
+ if (args->height > U32_MAX / stride)
return -EINVAL;
/* test for wrap-around */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 40e1e24f2ff0..5dc742b27ca0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -163,8 +163,9 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
- /* HTC Vive VR Headset */
+ /* HTC Vive and Vive Pro VR Headsets */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+ { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
/* Oculus Rift DK1, DK2, and CV1 VR Headsets */
{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
@@ -687,562 +688,562 @@ static const struct minimode extra_modes[] = {
static const struct drm_display_mode edid_cea_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
- /* 1 - 640x480@60Hz */
+ /* 1 - 640x480@60Hz 4:3 */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 2 - 720x480@60Hz */
+ /* 2 - 720x480@60Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 3 - 720x480@60Hz */
+ /* 3 - 720x480@60Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 4 - 1280x720@60Hz */
+ /* 4 - 1280x720@60Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 5 - 1920x1080i@60Hz */
+ /* 5 - 1920x1080i@60Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 6 - 720(1440)x480i@60Hz */
+ /* 6 - 720(1440)x480i@60Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 7 - 720(1440)x480i@60Hz */
+ /* 7 - 720(1440)x480i@60Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 8 - 720(1440)x240@60Hz */
+ /* 8 - 720(1440)x240@60Hz 4:3 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 9 - 720(1440)x240@60Hz */
+ /* 9 - 720(1440)x240@60Hz 16:9 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 10 - 2880x480i@60Hz */
+ /* 10 - 2880x480i@60Hz 4:3 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 11 - 2880x480i@60Hz */
+ /* 11 - 2880x480i@60Hz 16:9 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 12 - 2880x240@60Hz */
+ /* 12 - 2880x240@60Hz 4:3 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 13 - 2880x240@60Hz */
+ /* 13 - 2880x240@60Hz 16:9 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 14 - 1440x480@60Hz */
+ /* 14 - 1440x480@60Hz 4:3 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 15 - 1440x480@60Hz */
+ /* 15 - 1440x480@60Hz 16:9 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 16 - 1920x1080@60Hz */
+ /* 16 - 1920x1080@60Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 17 - 720x576@50Hz */
+ /* 17 - 720x576@50Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 18 - 720x576@50Hz */
+ /* 18 - 720x576@50Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 19 - 1280x720@50Hz */
+ /* 19 - 1280x720@50Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 20 - 1920x1080i@50Hz */
+ /* 20 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 21 - 720(1440)x576i@50Hz */
+ /* 21 - 720(1440)x576i@50Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 22 - 720(1440)x576i@50Hz */
+ /* 22 - 720(1440)x576i@50Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 23 - 720(1440)x288@50Hz */
+ /* 23 - 720(1440)x288@50Hz 4:3 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 24 - 720(1440)x288@50Hz */
+ /* 24 - 720(1440)x288@50Hz 16:9 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 25 - 2880x576i@50Hz */
+ /* 25 - 2880x576i@50Hz 4:3 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 26 - 2880x576i@50Hz */
+ /* 26 - 2880x576i@50Hz 16:9 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 27 - 2880x288@50Hz */
+ /* 27 - 2880x288@50Hz 4:3 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 28 - 2880x288@50Hz */
+ /* 28 - 2880x288@50Hz 16:9 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 29 - 1440x576@50Hz */
+ /* 29 - 1440x576@50Hz 4:3 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 30 - 1440x576@50Hz */
+ /* 30 - 1440x576@50Hz 16:9 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 31 - 1920x1080@50Hz */
+ /* 31 - 1920x1080@50Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 32 - 1920x1080@24Hz */
+ /* 32 - 1920x1080@24Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 33 - 1920x1080@25Hz */
+ /* 33 - 1920x1080@25Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 34 - 1920x1080@30Hz */
+ /* 34 - 1920x1080@30Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 35 - 2880x480@60Hz */
+ /* 35 - 2880x480@60Hz 4:3 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 36 - 2880x480@60Hz */
+ /* 36 - 2880x480@60Hz 16:9 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 37 - 2880x576@50Hz */
+ /* 37 - 2880x576@50Hz 4:3 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 38 - 2880x576@50Hz */
+ /* 38 - 2880x576@50Hz 16:9 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 39 - 1920x1080i@50Hz */
+ /* 39 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 40 - 1920x1080i@100Hz */
+ /* 40 - 1920x1080i@100Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 41 - 1280x720@100Hz */
+ /* 41 - 1280x720@100Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 42 - 720x576@100Hz */
+ /* 42 - 720x576@100Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 43 - 720x576@100Hz */
+ /* 43 - 720x576@100Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 44 - 720(1440)x576i@100Hz */
+ /* 44 - 720(1440)x576i@100Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 45 - 720(1440)x576i@100Hz */
+ /* 45 - 720(1440)x576i@100Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 46 - 1920x1080i@120Hz */
+ /* 46 - 1920x1080i@120Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 47 - 1280x720@120Hz */
+ /* 47 - 1280x720@120Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 48 - 720x480@120Hz */
+ /* 48 - 720x480@120Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 49 - 720x480@120Hz */
+ /* 49 - 720x480@120Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 50 - 720(1440)x480i@120Hz */
+ /* 50 - 720(1440)x480i@120Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 51 - 720(1440)x480i@120Hz */
+ /* 51 - 720(1440)x480i@120Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 52 - 720x576@200Hz */
+ /* 52 - 720x576@200Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 53 - 720x576@200Hz */
+ /* 53 - 720x576@200Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 54 - 720(1440)x576i@200Hz */
+ /* 54 - 720(1440)x576i@200Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 55 - 720(1440)x576i@200Hz */
+ /* 55 - 720(1440)x576i@200Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 56 - 720x480@240Hz */
+ /* 56 - 720x480@240Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 57 - 720x480@240Hz */
+ /* 57 - 720x480@240Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 58 - 720(1440)x480i@240Hz */
+ /* 58 - 720(1440)x480i@240Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 59 - 720(1440)x480i@240Hz */
+ /* 59 - 720(1440)x480i@240Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 60 - 1280x720@24Hz */
+ /* 60 - 1280x720@24Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 61 - 1280x720@25Hz */
+ /* 61 - 1280x720@25Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 62 - 1280x720@30Hz */
+ /* 62 - 1280x720@30Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 63 - 1920x1080@120Hz */
+ /* 63 - 1920x1080@120Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 64 - 1920x1080@100Hz */
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 64 - 1920x1080@100Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 65 - 1280x720@24Hz */
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 65 - 1280x720@24Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 66 - 1280x720@25Hz */
+ /* 66 - 1280x720@25Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 67 - 1280x720@30Hz */
+ /* 67 - 1280x720@30Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 68 - 1280x720@50Hz */
+ /* 68 - 1280x720@50Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 69 - 1280x720@60Hz */
+ /* 69 - 1280x720@60Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 70 - 1280x720@100Hz */
+ /* 70 - 1280x720@100Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 71 - 1280x720@120Hz */
+ /* 71 - 1280x720@120Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 72 - 1920x1080@24Hz */
+ /* 72 - 1920x1080@24Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 73 - 1920x1080@25Hz */
+ /* 73 - 1920x1080@25Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 74 - 1920x1080@30Hz */
+ /* 74 - 1920x1080@30Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 75 - 1920x1080@50Hz */
+ /* 75 - 1920x1080@50Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 76 - 1920x1080@60Hz */
+ /* 76 - 1920x1080@60Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 77 - 1920x1080@100Hz */
+ /* 77 - 1920x1080@100Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 78 - 1920x1080@120Hz */
+ /* 78 - 1920x1080@120Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 79 - 1680x720@24Hz */
+ /* 79 - 1680x720@24Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 80 - 1680x720@25Hz */
+ /* 80 - 1680x720@25Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
2948, 3168, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 81 - 1680x720@30Hz */
+ /* 81 - 1680x720@30Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
2420, 2640, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 82 - 1680x720@50Hz */
+ /* 82 - 1680x720@50Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 83 - 1680x720@60Hz */
+ /* 83 - 1680x720@60Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 84 - 1680x720@100Hz */
+ /* 84 - 1680x720@100Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 85 - 1680x720@120Hz */
+ /* 85 - 1680x720@120Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 86 - 2560x1080@24Hz */
+ /* 86 - 2560x1080@24Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 87 - 2560x1080@25Hz */
+ /* 87 - 2560x1080@25Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 88 - 2560x1080@30Hz */
+ /* 88 - 2560x1080@30Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 89 - 2560x1080@50Hz */
+ /* 89 - 2560x1080@50Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 90 - 2560x1080@60Hz */
+ /* 90 - 2560x1080@60Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 91 - 2560x1080@100Hz */
+ /* 91 - 2560x1080@100Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 92 - 2560x1080@120Hz */
+ /* 92 - 2560x1080@120Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 93 - 3840x2160p@24Hz 16:9 */
+ /* 93 - 3840x2160@24Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 94 - 3840x2160p@25Hz 16:9 */
+ /* 94 - 3840x2160@25Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 95 - 3840x2160p@30Hz 16:9 */
+ /* 95 - 3840x2160@30Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 96 - 3840x2160p@50Hz 16:9 */
+ /* 96 - 3840x2160@50Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 97 - 3840x2160p@60Hz 16:9 */
+ /* 97 - 3840x2160@60Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 98 - 4096x2160p@24Hz 256:135 */
+ /* 98 - 4096x2160@24Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 99 - 4096x2160p@25Hz 256:135 */
+ /* 99 - 4096x2160@25Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 100 - 4096x2160p@30Hz 256:135 */
+ /* 100 - 4096x2160@30Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 101 - 4096x2160p@50Hz 256:135 */
+ /* 101 - 4096x2160@50Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 102 - 4096x2160p@60Hz 256:135 */
+ /* 102 - 4096x2160@60Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 103 - 3840x2160p@24Hz 64:27 */
+ /* 103 - 3840x2160@24Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 104 - 3840x2160p@25Hz 64:27 */
+ /* 104 - 3840x2160@25Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 105 - 3840x2160p@30Hz 64:27 */
+ /* 105 - 3840x2160@30Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 106 - 3840x2160p@50Hz 64:27 */
+ /* 106 - 3840x2160@50Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 107 - 3840x2160p@60Hz 64:27 */
+ /* 107 - 3840x2160@60Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
@@ -1633,7 +1634,8 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
edid[0x7e] = valid_extensions;
- new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ new = kmalloc_array(valid_extensions + 1, EDID_LENGTH,
+ GFP_KERNEL);
if (!new)
goto out;
@@ -4873,6 +4875,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
/*
+ * As some drivers don't support atomic, we can't use connector state.
+ * So just initialize the frame with default values, just the same way
+ * as it's done with other properties here.
+ */
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ frame->itc = 0;
+
+ /*
* Populate picture aspect ratio from either
* user input (if specified) or from the CEA mode list.
*/
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 2ee1eaa66188..cab14f253384 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -368,7 +368,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
struct drm_plane *plane;
struct drm_atomic_state *state;
int i, ret;
- unsigned int plane_mask;
struct drm_modeset_acquire_ctx ctx;
drm_modeset_acquire_init(&ctx, 0);
@@ -381,7 +380,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
state->acquire_ctx = &ctx;
retry:
- plane_mask = 0;
drm_for_each_plane(plane, dev) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -391,9 +389,6 @@ retry:
plane_state->rotation = DRM_MODE_ROTATE_0;
- plane->old_fb = plane->fb;
- plane_mask |= 1 << drm_plane_index(plane);
-
/* disable non-primary: */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
continue;
@@ -430,8 +425,6 @@ retry:
ret = drm_atomic_commit(state);
out_state:
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
if (ret == -EDEADLK)
goto backoff;
@@ -1164,7 +1157,7 @@ EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
* @info: fbdev registered by the helper
* @rect: info about rectangle to fill
*
- * A wrapper around cfb_imageblit implemented by fbdev core
+ * A wrapper around cfb_fillrect implemented by fbdev core
*/
void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index bfedceff87bb..46b11e46edbd 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -836,8 +836,6 @@ retry:
goto unlock;
plane_mask |= BIT(drm_plane_index(plane));
-
- plane->old_fb = plane->fb;
}
/* This list is only filled when disable_crtcs is set. */
@@ -852,9 +850,6 @@ retry:
ret = drm_atomic_commit(state);
unlock:
- if (plane_mask)
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index acfbc0641a06..2810d4131411 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -253,7 +253,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct dma_buf *dma_buf;
struct dma_fence *fence;
- if (plane->state->fb == state->fb || !state->fb)
+ if (!state->fb)
return 0;
dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index dae18e58e79b..c92b00d42ece 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -47,7 +47,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
if (size <= PAGE_SIZE / sizeof(*ht->table))
ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
else
- ht->table = vzalloc(size*sizeof(*ht->table));
+ ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 0d4cfb232576..fe49fb0356b5 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -334,6 +334,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->aspect_ratio_allowed = req->value;
break;
+ case DRM_CLIENT_CAP_WRITEBACK_CONNECTORS:
+ if (!file_priv->atomic)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->writeback_connectors = req->value;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 3c54044214db..d69e4fc1ee77 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -80,7 +80,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
* page-table instead (that's probably faster anyhow...).
*/
/* note: use vmalloc() because num_pages could be large... */
- page_map = vmalloc(num_pages * sizeof(struct page *));
+ page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
if (!page_map)
return NULL;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 3166026a1874..3cc5fbd78ee2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -239,6 +239,32 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
+static u64 rb_to_hole_size(struct rb_node *rb)
+{
+ return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
+}
+
+static void insert_hole_size(struct rb_root_cached *root,
+ struct drm_mm_node *node)
+{
+ struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
+ u64 x = node->hole_size;
+ bool first = true;
+
+ while (*link) {
+ rb = *link;
+ if (x > rb_to_hole_size(rb)) {
+ link = &rb->rb_left;
+ } else {
+ link = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb_hole_size, rb, link);
+ rb_insert_color_cached(&node->rb_hole_size, root, first);
+}
+
static void add_hole(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
@@ -247,7 +273,7 @@ static void add_hole(struct drm_mm_node *node)
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
- RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+ insert_hole_size(&mm->holes_size, node);
RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
list_add(&node->hole_stack, &mm->hole_stack);
@@ -258,7 +284,7 @@ static void rm_hole(struct drm_mm_node *node)
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
list_del(&node->hole_stack);
- rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+ rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
node->hole_size = 0;
@@ -282,38 +308,39 @@ static inline u64 rb_hole_size(struct rb_node *rb)
static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
{
- struct rb_node *best = NULL;
- struct rb_node **link = &mm->holes_size.rb_node;
+ struct rb_node *rb = mm->holes_size.rb_root.rb_node;
+ struct drm_mm_node *best = NULL;
- while (*link) {
- struct rb_node *rb = *link;
+ do {
+ struct drm_mm_node *node =
+ rb_entry(rb, struct drm_mm_node, rb_hole_size);
- if (size <= rb_hole_size(rb)) {
- link = &rb->rb_left;
- best = rb;
+ if (size <= node->hole_size) {
+ best = node;
+ rb = rb->rb_right;
} else {
- link = &rb->rb_right;
+ rb = rb->rb_left;
}
- }
+ } while (rb);
- return rb_hole_size_to_node(best);
+ return best;
}
static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
{
+ struct rb_node *rb = mm->holes_addr.rb_node;
struct drm_mm_node *node = NULL;
- struct rb_node **link = &mm->holes_addr.rb_node;
- while (*link) {
+ while (rb) {
u64 hole_start;
- node = rb_hole_addr_to_node(*link);
+ node = rb_hole_addr_to_node(rb);
hole_start = __drm_mm_hole_node_start(node);
if (addr < hole_start)
- link = &node->rb_hole_addr.rb_left;
+ rb = node->rb_hole_addr.rb_left;
else if (addr > hole_start + node->hole_size)
- link = &node->rb_hole_addr.rb_right;
+ rb = node->rb_hole_addr.rb_right;
else
break;
}
@@ -326,9 +353,6 @@ first_hole(struct drm_mm *mm,
u64 start, u64 end, u64 size,
enum drm_mm_insert_mode mode)
{
- if (RB_EMPTY_ROOT(&mm->holes_size))
- return NULL;
-
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
@@ -355,7 +379,7 @@ next_hole(struct drm_mm *mm,
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
- return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
+ return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
case DRM_MM_INSERT_LOW:
return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
@@ -426,6 +450,11 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_reserve_node);
+static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
+{
+ return rb ? rb_to_hole_size(rb) : 0;
+}
+
/**
* drm_mm_insert_node_in_range - ranged search for space and insert @node
* @mm: drm_mm to allocate from
@@ -451,18 +480,26 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
{
struct drm_mm_node *hole;
u64 remainder_mask;
+ bool once;
DRM_MM_BUG_ON(range_start >= range_end);
if (unlikely(size == 0 || range_end - range_start < size))
return -ENOSPC;
+ if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
+ return -ENOSPC;
+
if (alignment <= 1)
alignment = 0;
+ once = mode & DRM_MM_INSERT_ONCE;
+ mode &= ~DRM_MM_INSERT_ONCE;
+
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
- for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
- hole = next_hole(mm, hole, mode)) {
+ for (hole = first_hole(mm, range_start, range_end, size, mode);
+ hole;
+ hole = once ? NULL : next_hole(mm, hole, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
@@ -587,9 +624,9 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
- rb_replace_node(&old->rb_hole_size,
- &new->rb_hole_size,
- &mm->holes_size);
+ rb_replace_node_cached(&old->rb_hole_size,
+ &new->rb_hole_size,
+ &mm->holes_size);
rb_replace_node(&old->rb_hole_addr,
&new->rb_hole_addr,
&mm->holes_addr);
@@ -885,7 +922,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
INIT_LIST_HEAD(&mm->hole_stack);
mm->interval_tree = RB_ROOT_CACHED;
- mm->holes_size = RB_ROOT;
+ mm->holes_size = RB_ROOT_CACHED;
mm->holes_addr = RB_ROOT;
/* Clever trick to avoid a special case in the free hole tracking. */
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index e5c653357024..21e353bd3948 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -145,6 +145,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
+ /* only expose writeback connectors if userspace understands them */
+ if (!file_priv->writeback_connectors &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
+ continue;
+
if (drm_lease_held(file_priv, connector->base.id)) {
if (count < card_res->count_connectors &&
put_user(connector->base.id, connector_id + count)) {
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c78ca0e84ffd..7f552d5fa88e 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1257,7 +1257,7 @@ static const char * const drm_mode_status_names[] = {
#undef MODE_STATUS
-static const char *drm_get_mode_status_name(enum drm_mode_status status)
+const char *drm_get_mode_status_name(enum drm_mode_status status)
{
int index = status + 3;
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 308d442a531b..965530a6f4cd 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/module.h>
+#include <drm/drm_device.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@@ -94,6 +95,9 @@ EXPORT_SYMBOL(drm_panel_remove);
*
* An error is returned if the panel is already attached to another connector.
*
+ * When unloading, the driver should detach from the panel by calling
+ * drm_panel_detach().
+ *
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
@@ -101,6 +105,13 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
if (panel->connector)
return -EBUSY;
+ panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
+ if (!panel->link) {
+ dev_err(panel->dev, "failed to link panel to %s\n",
+ dev_name(connector->dev->dev));
+ return -EINVAL;
+ }
+
panel->connector = connector;
panel->drm = connector->dev;
@@ -115,10 +126,15 @@ EXPORT_SYMBOL(drm_panel_attach);
* Detaches a panel from the connector it is attached to. If a panel is not
* attached to any connector this is effectively a no-op.
*
+ * This function should not be called by the panel device itself. It
+ * is only for the drm device that called drm_panel_attach().
+ *
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_detach(struct drm_panel *panel)
{
+ device_link_del(panel->link);
+
panel->connector = NULL;
panel->drm = NULL;
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 035054455301..df0b4ebbedbf 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -177,6 +177,10 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (WARN_ON(config->num_total_plane >= 32))
return -EINVAL;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
return ret;
@@ -561,19 +565,20 @@ int drm_plane_check_pixel_format(struct drm_plane *plane,
if (i == plane->format_count)
return -EINVAL;
- if (!plane->modifier_count)
- return 0;
+ if (plane->funcs->format_mod_supported) {
+ if (!plane->funcs->format_mod_supported(plane, format, modifier))
+ return -EINVAL;
+ } else {
+ if (!plane->modifier_count)
+ return 0;
- for (i = 0; i < plane->modifier_count; i++) {
- if (modifier == plane->modifiers[i])
- break;
+ for (i = 0; i < plane->modifier_count; i++) {
+ if (modifier == plane->modifiers[i])
+ break;
+ }
+ if (i == plane->modifier_count)
+ return -EINVAL;
}
- if (i == plane->modifier_count)
- return -EINVAL;
-
- if (plane->funcs->format_mod_supported &&
- !plane->funcs->format_mod_supported(plane, format, modifier))
- return -EINVAL;
return 0;
}
@@ -650,9 +655,11 @@ static int __setplane_internal(struct drm_plane *plane,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, ctx);
if (!ret) {
- plane->crtc = crtc;
- plane->fb = fb;
- drm_framebuffer_get(plane->fb);
+ if (!plane->state) {
+ plane->crtc = crtc;
+ plane->fb = fb;
+ drm_framebuffer_get(plane->fb);
+ }
} else {
plane->old_fb = NULL;
}
@@ -1092,8 +1099,10 @@ retry:
/* Keep the old fb, don't unref it. */
plane->old_fb = NULL;
} else {
- plane->fb = fb;
- drm_framebuffer_get(fb);
+ if (!plane->state) {
+ plane->fb = fb;
+ drm_framebuffer_get(fb);
+ }
}
out:
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index f88f68161519..2010794943bc 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -502,6 +502,7 @@ EXPORT_SYMBOL(drm_plane_helper_update);
int drm_plane_helper_disable(struct drm_plane *plane)
{
struct drm_plane_state *plane_state;
+ struct drm_framebuffer *old_fb;
/* crtc helpers love to call disable functions for already disabled hw
* functions. So cope with that. */
@@ -521,8 +522,9 @@ int drm_plane_helper_disable(struct drm_plane *plane)
plane_state->plane = plane;
plane_state->crtc = NULL;
+ old_fb = plane_state->fb;
drm_atomic_set_fb_for_plane(plane_state, NULL);
- return drm_plane_helper_commit(plane, plane_state, plane->fb);
+ return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_plane_helper_disable);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 397b46b33739..186db2e4c57a 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -186,7 +186,6 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
/**
* drm_gem_map_attach - dma_buf attach implementation for GEM
* @dma_buf: buffer to attach device to
- * @target_dev: not used
* @attach: buffer attachment data
*
* Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
@@ -195,7 +194,7 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
*
* Returns 0 on success, negative error code on failure.
*/
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_prime_attachment *prime_attach;
@@ -435,35 +434,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
/**
- * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
- * @dma_buf: buffer to be mapped
- * @page_num: page number within the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
- */
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
-
-/**
- * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @page_num: page number within the buffer
- * @addr: virtual address of the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
- */
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
-
-/**
* drm_gem_dmabuf_kmap - map implementation for GEM
* @dma_buf: buffer to be mapped
* @page_num: page number within the buffer
@@ -520,9 +490,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = drm_gem_dmabuf_kmap,
- .map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
- .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 2660543ad86a..c3301046dfaa 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -100,7 +100,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
* map, get the page, increment the use count and return it.
*/
#if IS_ENABLED(CONFIG_AGP)
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
@@ -173,7 +173,7 @@ vm_fault_error:
return VM_FAULT_SIGBUS; /* Disallow mremap */
}
#else
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
@@ -189,7 +189,7 @@ static int drm_vm_fault(struct vm_fault *vmf)
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
-static int drm_vm_shm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
@@ -291,7 +291,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
-static int drm_vm_dma_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
@@ -326,7 +326,7 @@ static int drm_vm_dma_fault(struct vm_fault *vmf)
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
-static int drm_vm_sg_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
new file mode 100644
index 000000000000..827395071f0b
--- /dev/null
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_property.h>
+#include <drm/drm_writeback.h>
+#include <drm/drmP.h>
+#include <linux/dma-fence.h>
+
+/**
+ * DOC: overview
+ *
+ * Writeback connectors are used to expose hardware which can write the output
+ * from a CRTC to a memory buffer. They are used and act similarly to other
+ * types of connectors, with some important differences:
+ * - Writeback connectors don't provide a way to output visually to the user.
+ * - Writeback connectors should always report as "disconnected" (so that
+ * clients which don't understand them will ignore them).
+ * - Writeback connectors don't have EDID.
+ *
+ * A framebuffer may only be attached to a writeback connector when the
+ * connector is attached to a CRTC. The WRITEBACK_FB_ID property which sets the
+ * framebuffer applies only to a single commit (see below). A framebuffer may
+ * not be attached while the CRTC is off.
+ *
+ * Unlike with planes, when a writeback framebuffer is removed by userspace DRM
+ * makes no attempt to remove it from active use by the connector. This is
+ * because no method is provided to abort a writeback operation, and in any
+ * case making a new commit whilst a writeback is ongoing is undefined (see
+ * WRITEBACK_OUT_FENCE_PTR below). As soon as the current writeback is finished,
+ * the framebuffer will automatically no longer be in active use. As it will
+ * also have already been removed from the framebuffer list, there will be no
+ * way for any userspace application to retrieve a reference to it in the
+ * intervening period.
+ *
+ * Writeback connectors have some additional properties, which userspace
+ * can use to query and control them:
+ *
+ * "WRITEBACK_FB_ID":
+ * Write-only object property storing a DRM_MODE_OBJECT_FB: it stores the
+ * framebuffer to be written by the writeback connector. This property is
+ * similar to the FB_ID property on planes, but will always read as zero
+ * and is not preserved across commits.
+ * Userspace must set this property to an output buffer every time it
+ * wishes the buffer to get filled.
+ *
+ * "WRITEBACK_PIXEL_FORMATS":
+ * Immutable blob property to store the supported pixel formats table. The
+ * data is an array of u32 DRM_FORMAT_* fourcc values.
+ * Userspace can use this blob to find out what pixel formats are supported
+ * by the connector's writeback engine.
+ *
+ * "WRITEBACK_OUT_FENCE_PTR":
+ * Userspace can use this property to provide a pointer for the kernel to
+ * fill with a sync_file file descriptor, which will signal once the
+ * writeback is finished. The value should be the address of a 32-bit
+ * signed integer, cast to a u64.
+ * Userspace should wait for this fence to signal before making another
+ * commit affecting any of the same CRTCs, Planes or Connectors.
+ * **Failure to do so will result in undefined behaviour.**
+ * For this reason it is strongly recommended that all userspace
+ * applications making use of writeback connectors *always* retrieve an
+ * out-fence for the commit and use it appropriately.
+ * From userspace, this property will always read as zero.
+ */
+
+#define fence_to_wb_connector(x) container_of(x->lock, \
+ struct drm_writeback_connector, \
+ fence_lock)
+
+static const char *drm_writeback_fence_get_driver_name(struct dma_fence *fence)
+{
+ struct drm_writeback_connector *wb_connector =
+ fence_to_wb_connector(fence);
+
+ return wb_connector->base.dev->driver->name;
+}
+
+static const char *
+drm_writeback_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct drm_writeback_connector *wb_connector =
+ fence_to_wb_connector(fence);
+
+ return wb_connector->timeline_name;
+}
+
+static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static const struct dma_fence_ops drm_writeback_fence_ops = {
+ .get_driver_name = drm_writeback_fence_get_driver_name,
+ .get_timeline_name = drm_writeback_fence_get_timeline_name,
+ .enable_signaling = drm_writeback_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+};
+
+static int create_writeback_properties(struct drm_device *dev)
+{
+ struct drm_property *prop;
+
+ if (!dev->mode_config.writeback_fb_id_property) {
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "WRITEBACK_FB_ID",
+ DRM_MODE_OBJECT_FB);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_fb_id_property = prop;
+ }
+
+ if (!dev->mode_config.writeback_pixel_formats_property) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_ATOMIC |
+ DRM_MODE_PROP_IMMUTABLE,
+ "WRITEBACK_PIXEL_FORMATS", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_pixel_formats_property = prop;
+ }
+
+ if (!dev->mode_config.writeback_out_fence_ptr_property) {
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "WRITEBACK_OUT_FENCE_PTR", 0,
+ U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_out_fence_ptr_property = prop;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs drm_writeback_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/**
+ * drm_writeback_connector_init - Initialize a writeback connector and its properties
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @con_funcs: Connector funcs vtable
+ * @enc_helper_funcs: Encoder helper funcs vtable to be used by the internal encoder
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values. It will also create an internal encoder associated with the
+ * drm_writeback_connector and set it to use the @enc_helper_funcs vtable for
+ * the encoder helper.
+ *
+ * Drivers should always use this function instead of drm_connector_init() to
+ * set up writeback connectors.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ const struct drm_encoder_helper_funcs *enc_helper_funcs,
+ const u32 *formats, int n_formats)
+{
+ struct drm_property_blob *blob;
+ struct drm_connector *connector = &wb_connector->base;
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret = create_writeback_properties(dev);
+
+ if (ret != 0)
+ return ret;
+
+ blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
+ formats);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
+ ret = drm_encoder_init(dev, &wb_connector->encoder,
+ &drm_writeback_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ goto fail;
+
+ connector->interlace_allowed = 0;
+
+ ret = drm_connector_init(dev, connector, con_funcs,
+ DRM_MODE_CONNECTOR_WRITEBACK);
+ if (ret)
+ goto connector_fail;
+
+ ret = drm_mode_connector_attach_encoder(connector,
+ &wb_connector->encoder);
+ if (ret)
+ goto attach_fail;
+
+ INIT_LIST_HEAD(&wb_connector->job_queue);
+ spin_lock_init(&wb_connector->job_lock);
+
+ wb_connector->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&wb_connector->fence_lock);
+ snprintf(wb_connector->timeline_name,
+ sizeof(wb_connector->timeline_name),
+ "CONNECTOR:%d-%s", connector->base.id, connector->name);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_out_fence_ptr_property, 0);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_fb_id_property, 0);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_pixel_formats_property,
+ blob->base.id);
+ wb_connector->pixel_formats_blob_ptr = blob;
+
+ return 0;
+
+attach_fail:
+ drm_connector_cleanup(connector);
+connector_fail:
+ drm_encoder_cleanup(&wb_connector->encoder);
+fail:
+ drm_property_blob_put(blob);
+ return ret;
+}
+EXPORT_SYMBOL(drm_writeback_connector_init);
+
+/**
+ * drm_writeback_queue_job - Queue a writeback job for later signalling
+ * @wb_connector: The writeback connector to queue a job on
+ * @job: The job to queue
+ *
+ * This function adds a job to the job_queue for a writeback connector. It
+ * should be considered to take ownership of the writeback job, and so any other
+ * references to the job must be cleared after calling this function.
+ *
+ * Drivers must ensure that for a given writeback connector, jobs are queued in
+ * exactly the same order as they will be completed by the hardware (and
+ * signaled via drm_writeback_signal_completion).
+ *
+ * For every call to drm_writeback_queue_job() there must be exactly one call to
+ * drm_writeback_signal_completion()
+ *
+ * See also: drm_writeback_signal_completion()
+ */
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ list_add_tail(&job->list_entry, &wb_connector->job_queue);
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+}
+EXPORT_SYMBOL(drm_writeback_queue_job);
+
+/*
+ * @cleanup_work: deferred cleanup of a writeback job
+ *
+ * The job cannot be cleaned up directly in drm_writeback_signal_completion,
+ * because it may be called in interrupt context. Dropping the framebuffer
+ * reference can sleep, and so the cleanup is deferred to a workqueue.
+ */
+static void cleanup_work(struct work_struct *work)
+{
+ struct drm_writeback_job *job = container_of(work,
+ struct drm_writeback_job,
+ cleanup_work);
+ drm_framebuffer_put(job->fb);
+ kfree(job);
+}
+
+
+/**
+ * drm_writeback_signal_completion - Signal the completion of a writeback job
+ * @wb_connector: The writeback connector whose job is complete
+ * @status: Status code to set in the writeback out_fence (0 for success)
+ *
+ * Drivers should call this to signal the completion of a previously queued
+ * writeback job. It should be called as soon as possible after the hardware
+ * has finished writing, and may be called from interrupt context.
+ * It is the driver's responsibility to ensure that for a given connector, the
+ * hardware completes writeback jobs in the same order as they are queued.
+ *
+ * Unless the driver is holding its own reference to the framebuffer, it must
+ * not be accessed after calling this function.
+ *
+ * See also: drm_writeback_queue_job()
+ */
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+ int status)
+{
+ unsigned long flags;
+ struct drm_writeback_job *job;
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ job = list_first_entry_or_null(&wb_connector->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+ if (job) {
+ list_del(&job->list_entry);
+ if (job->out_fence) {
+ if (status)
+ dma_fence_set_error(job->out_fence, status);
+ dma_fence_signal(job->out_fence);
+ dma_fence_put(job->out_fence);
+ }
+ }
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+
+ if (WARN_ON(!job))
+ return;
+
+ INIT_WORK(&job->cleanup_work, cleanup_work);
+ queue_work(system_long_wq, &job->cleanup_work);
+}
+EXPORT_SYMBOL(drm_writeback_signal_completion);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector)
+{
+ struct dma_fence *fence;
+
+ if (WARN_ON(wb_connector->base.connector_type !=
+ DRM_MODE_CONNECTOR_WRITEBACK))
+ return NULL;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ dma_fence_init(fence, &drm_writeback_fence_ops,
+ &wb_connector->fence_lock, wb_connector->fence_context,
+ ++wb_connector->fence_seqno);
+
+ return fence;
+}
+EXPORT_SYMBOL(drm_writeback_get_out_fence);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 7c3030b7e586..6d29777884f9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1723,8 +1723,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- dsi->clks = devm_kzalloc(dev,
- sizeof(*dsi->clks) * dsi->driver_data->num_clks,
+ dsi->clks = devm_kcalloc(dev,
+ dsi->driver_data->num_clks, sizeof(*dsi->clks),
GFP_KERNEL);
if (!dsi->clks)
return -ENOMEM;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 5ce84025d1cb..6127ef25acd6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1271,7 +1271,8 @@ static int fimc_probe(struct platform_device *pdev)
/* construct formats/limits array */
num_formats = ARRAY_SIZE(fimc_formats) + ARRAY_SIZE(fimc_tiled_formats);
- formats = devm_kzalloc(dev, sizeof(*formats) * num_formats, GFP_KERNEL);
+ formats = devm_kcalloc(dev, num_formats, sizeof(*formats),
+ GFP_KERNEL);
if (!formats)
return -ENOMEM;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index e99dd1e4ba65..35ac66730563 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1202,8 +1202,9 @@ static int gsc_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- formats = devm_kzalloc(dev, sizeof(*formats) *
- (ARRAY_SIZE(gsc_formats)), GFP_KERNEL);
+ formats = devm_kcalloc(dev,
+ ARRAY_SIZE(gsc_formats), sizeof(*formats),
+ GFP_KERNEL);
if (!formats)
return -ENOMEM;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 38a2a7f1204b..eb9915da7dec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -263,8 +263,6 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
if (!state->crtc)
return;
- plane->crtc = state->crtc;
-
if (exynos_crtc->ops->update_plane)
exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 09c4bc0b1859..db91932550cf 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1692,7 +1692,7 @@ static int hdmi_clk_init(struct hdmi_context *hdata)
if (!count)
return 0;
- clks = devm_kzalloc(dev, sizeof(*clks) * count, GFP_KERNEL);
+ clks = devm_kcalloc(dev, count, sizeof(*clks), GFP_KERNEL);
if (!clks)
return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index c51d9259c7a7..204c8e452eb7 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -251,7 +251,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
if (!fb)
return;
- offset = psbfb->gtt->offset;
+ offset = to_gtt_range(fb->obj[0])->offset;
stride = fb->pitches[0];
switch (fb->format->depth) {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index cb0a2ae916e0..8fa4ef192c1e 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -33,6 +33,7 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
@@ -40,14 +41,9 @@
#include "framebuffer.h"
#include "gtt.h"
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle);
-
static const struct drm_framebuffer_funcs psb_fb_funcs = {
- .destroy = psb_user_framebuffer_destroy,
- .create_handle = psb_user_framebuffer_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
@@ -96,17 +92,18 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
struct psb_fbdev *fbdev = info->par;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
+ struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
/*
* We have to poke our nose in here. The core fb code assumes
* panning is part of the hardware that can be invoked before
* the actual fb is mapped. In our case that isn't quite true.
*/
- if (psbfb->gtt->npage) {
+ if (gtt->npage) {
/* GTT roll shifts in 4K pages, we need to shift the right
number of pages */
int pages = info->fix.line_length >> 12;
- psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+ psb_gtt_roll(dev, gtt, var->yoffset * pages);
}
return 0;
}
@@ -117,13 +114,14 @@ static int psbfb_vm_fault(struct vm_fault *vmf)
struct psb_framebuffer *psbfb = vma->vm_private_data;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
int page_num;
int i;
unsigned long address;
int ret;
unsigned long pfn;
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
- psbfb->gtt->offset;
+ gtt->offset;
page_num = vma_pages(vma);
address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
@@ -246,7 +244,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
return -EINVAL;
drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
- fb->gtt = gt;
+ fb->base.obj[0] = &gt->gem;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
@@ -518,8 +516,8 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
drm_framebuffer_unregister_private(&psbfb->base);
drm_framebuffer_cleanup(&psbfb->base);
- if (psbfb->gtt)
- drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
+ if (psbfb->base.obj[0])
+ drm_gem_object_unreference_unlocked(psbfb->base.obj[0]);
return 0;
}
@@ -576,44 +574,6 @@ static void psb_fbdev_fini(struct drm_device *dev)
dev_priv->fbdev = NULL;
}
-/**
- * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
- * @fb: framebuffer
- * @file_priv: our DRM file
- * @handle: returned handle
- *
- * Our framebuffer object is a GTT range which also contains a GEM
- * object. We need to turn it into a handle for userspace. GEM will do
- * the work for us
- */
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct gtt_range *r = psbfb->gtt;
- return drm_gem_handle_create(file_priv, &r->gem, handle);
-}
-
-/**
- * psb_user_framebuffer_destroy - destruct user created fb
- * @fb: framebuffer
- *
- * User framebuffers are backed by GEM objects so all we have to do is
- * clean up a bit and drop the reference, GEM will handle the fallout
- */
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct gtt_range *r = psbfb->gtt;
-
- /* Let DRM do its clean up */
- drm_framebuffer_cleanup(fb);
- /* We are no longer using the resource in GEM */
- drm_gem_object_unreference_unlocked(&r->gem);
- kfree(fb);
-}
-
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 395f20b07aab..23dc3c5f8f0d 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -31,7 +31,6 @@ struct psb_framebuffer {
struct drm_framebuffer base;
struct address_space *addr_space;
struct fb_info *fbdev;
- struct gtt_range *gtt;
};
struct psb_fbdev {
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index f3c48a2be71b..c8f071c47daf 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -60,7 +60,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -78,10 +78,10 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
/* We are displaying this buffer, make sure it is actually loaded
into the GTT */
- ret = psb_gtt_pin(psbfb->gtt);
+ ret = psb_gtt_pin(gtt);
if (ret < 0)
goto gma_pipe_set_base_exit;
- start = psbfb->gtt->offset;
+ start = gtt->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
@@ -129,7 +129,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
gma_pipe_cleaner:
/* If there was a previous display we can now unpin it */
if (old_fb)
- psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+ psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
gma_pipe_set_base_exit:
gma_power_end(dev);
@@ -491,7 +491,7 @@ void gma_crtc_disable(struct drm_crtc *crtc)
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
- gt = to_psb_fb(crtc->primary->fb)->gtt;
+ gt = to_gtt_range(crtc->primary->fb->obj[0]);
psb_gtt_unpin(gt);
}
}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index cdbb350c9d5d..cb0c3a2a1fd4 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -53,6 +53,8 @@ struct gtt_range {
int roll; /* Roll applied to the GTT entries */
};
+#define to_gtt_range(x) container_of(x, struct gtt_range, gem)
+
extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
const char *name, int backed,
u32 align);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 5c066448be5b..881d613cc2e5 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -196,7 +196,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (!gma_power_begin(dev, true))
return 0;
- start = psbfb->gtt->offset;
+ start = to_gtt_range(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 7171b7475f58..237041a37532 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -239,7 +239,7 @@ static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
if (read_vbt_r10(addr, &vbt))
return -1;
- gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
+ gct = kmalloc_array(vbt.panel_count, sizeof(*gct), GFP_KERNEL);
if (!gct)
return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 0fff269d3fe6..1b7fd6a9d8a5 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -600,7 +600,6 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -617,7 +616,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
- start = psbfb->gtt->offset;
+ start = to_gtt_range(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index f2ee6aa10afa..1d40746ab625 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -429,13 +429,20 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
+#define MAX_ARG_LEN 32
+
static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- u8 buf[args_len*2 + 2], status;
- struct i2c_msg msgs[args_len + 3];
+ u8 buf[MAX_ARG_LEN*2 + 2], status;
+ struct i2c_msg msgs[MAX_ARG_LEN + 3];
int i, ret;
+ if (args_len > MAX_ARG_LEN) {
+ DRM_ERROR("Need to increase arg length\n");
+ return false;
+ }
+
psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 6ebd8842dbcc..0038c976536a 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -589,13 +589,22 @@ out:
return ret;
}
+#define MAX_WRITE_RANGE_BUF 32
+
static void
reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
{
struct i2c_client *client = priv->hdmi;
- u8 buf[cnt+1];
+ /* This is the maximum size of the buffer passed in */
+ u8 buf[MAX_WRITE_RANGE_BUF + 1];
int ret;
+ if (cnt > MAX_WRITE_RANGE_BUF) {
+ dev_err(&client->dev, "Fixed write buffer too small (%d)\n",
+ MAX_WRITE_RANGE_BUF);
+ return;
+ }
+
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
@@ -805,7 +814,7 @@ static void
tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
union hdmi_infoframe *frame)
{
- u8 buf[32];
+ u8 buf[MAX_WRITE_RANGE_BUF];
ssize_t len;
len = hdmi_infoframe_pack(frame, buf, sizeof(buf));
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 7c9ec4f4f36c..380eeb2a0e83 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+ ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6d8180e8d1e2..120e24c3fc62 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -273,8 +273,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
for_each_pipe(dev_priv, pipe) {
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
- vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
}
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 1c120683e958..00b788cf8b13 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -300,16 +300,16 @@ static int cursor_mode_to_drm(int mode)
int cursor_pixel_formats_index = 4;
switch (mode) {
- case CURSOR_MODE_128_ARGB_AX:
+ case MCURSOR_MODE_128_ARGB_AX:
cursor_pixel_formats_index = 0;
break;
- case CURSOR_MODE_256_ARGB_AX:
+ case MCURSOR_MODE_256_ARGB_AX:
cursor_pixel_formats_index = 1;
break;
- case CURSOR_MODE_64_ARGB_AX:
+ case MCURSOR_MODE_64_ARGB_AX:
cursor_pixel_formats_index = 2;
break;
- case CURSOR_MODE_64_32B_AX:
+ case MCURSOR_MODE_64_32B_AX:
cursor_pixel_formats_index = 3;
break;
@@ -342,8 +342,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -ENODEV;
val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
- mode = val & CURSOR_MODE;
- plane->enabled = (mode != CURSOR_MODE_DISABLE);
+ mode = val & MCURSOR_MODE;
+ plane->enabled = (mode != MCURSOR_MODE_DISABLE);
if (!plane->enabled)
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 78e55aafc8bc..23296547da95 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1585,8 +1585,9 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
mm->type = INTEL_GVT_MM_GGTT;
nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
- mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
- vgpu->gvt->device_info.gtt_entry_size);
+ mm->ggtt_mm.virtual_ggtt =
+ vzalloc(array_size(nr_entries,
+ vgpu->gvt->device_info.gtt_entry_size));
if (!mm->ggtt_mm.virtual_ggtt) {
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 05d15a095310..2ff0d40281a9 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -361,9 +361,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
#define gvt_ggtt_sz(gvt) \
- ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+ ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index e4960aff68bd..b31eb36fc102 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -267,7 +267,7 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
- vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2));
if (!vgpu->mmio.vreg)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 0f949554d118..708170e61625 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -446,9 +446,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
#define CTX_CONTEXT_CONTROL_VAL 0x03
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+bool is_inhibit_context(struct intel_context *ce)
{
- u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
+ const u32 *reg_state = ce->lrc_reg_state;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
@@ -501,7 +501,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(s->shadow_ctx, ring_id))
+ !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
continue;
if (mmio->mask)
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 0439eb8057a8..5c3b9ff9f96a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index c2d183b91500..7f5e01df95ee 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -54,11 +54,8 @@ static void set_context_pdp_root_pointer(
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
@@ -128,9 +125,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
@@ -205,7 +201,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static inline bool is_gvt_request(struct i915_request *req)
{
- return i915_gem_context_force_single_submission(req->ctx);
+ return i915_gem_context_force_single_submission(req->gem_context);
}
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -280,10 +276,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static void shadow_context_descriptor_update(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc = 0;
desc = ce->lrc_desc;
@@ -292,7 +286,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
* like GEN8_CTX_* cached in desc_template
*/
desc &= U64_MAX << 12;
- desc |= ctx->desc_template & ((1ULL << 12) - 1);
+ desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
ce->lrc_desc = desc;
}
@@ -300,12 +294,11 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_request *req = workload->req;
void *shadow_ring_buffer_va;
u32 *cs;
- struct i915_request *req = workload->req;
- if (IS_KABYLAKE(req->i915) &&
- is_inhibit_context(req->ctx, req->engine->id))
+ if (IS_KABYLAKE(req->i915) && is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/* allocate shadow ring buffer */
@@ -353,92 +346,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- int ring_id = workload->ring_id;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_ring *ring;
+ struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
+ struct intel_context *ce;
+ struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (workload->shadowed)
+ if (workload->req)
return 0;
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ce = intel_context_pin(shadow_ctx, engine);
+ if (IS_ERR(ce)) {
+ gvt_vgpu_err("fail to pin shadow context\n");
+ return PTR_ERR(ce);
+ }
+
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(shadow_ctx,
- dev_priv->engine[ring_id]);
+ if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(ce);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
- goto err_scan;
+ goto err_unpin;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
- goto err_scan;
+ goto err_shadow;
}
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = intel_context_pin(shadow_ctx, engine);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
+ rq = i915_request_alloc(engine, shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
goto err_shadow;
}
+ workload->req = i915_request_get(rq);
ret = populate_shadow_context(workload);
if (ret)
- goto err_unpin;
- workload->shadowed = true;
- return 0;
+ goto err_req;
-err_unpin:
- intel_context_unpin(shadow_ctx, engine);
+ return 0;
+err_req:
+ rq = fetch_and_zero(&workload->req);
+ i915_request_put(rq);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
- return ret;
-}
-
-static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
-{
- int ring_id = workload->ring_id;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct i915_request *rq;
- struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- int ret;
-
- rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto err_unpin;
- }
-
- gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
-
- workload->req = i915_request_get(rq);
- ret = copy_workload_to_ring_buffer(workload);
- if (ret)
- goto err_unpin;
- return 0;
-
err_unpin:
- intel_context_unpin(shadow_ctx, engine);
- release_shadow_wa_ctx(&workload->wa_ctx);
+ intel_context_unpin(ce);
return ret;
}
@@ -517,21 +485,13 @@ err:
return ret;
}
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- struct intel_vgpu_workload *workload = container_of(wa_ctx,
- struct intel_vgpu_workload,
- wa_ctx);
- int ring_id = workload->ring_id;
- struct intel_vgpu_submission *s = &workload->vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
- struct execlist_ring_context *shadow_ring_context;
- struct page *page;
-
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ struct intel_vgpu_workload *workload =
+ container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
+ struct i915_request *rq = workload->req;
+ struct execlist_ring_context *shadow_ring_context =
+ (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -539,9 +499,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
- kunmap_atomic(shadow_ring_context);
- return 0;
}
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -633,7 +590,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
- ret = intel_gvt_generate_request(workload);
+ ret = copy_workload_to_ring_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to generate request\n");
goto err_unpin_mm;
@@ -670,12 +627,9 @@ err_unpin_mm:
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- int ret = 0;
+ int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
@@ -687,10 +641,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
goto out;
ret = prepare_workload(workload);
- if (ret) {
- intel_context_unpin(shadow_ctx, engine);
- goto out;
- }
out:
if (ret)
@@ -765,27 +715,23 @@ out:
static void update_guest_context(struct intel_vgpu_workload *workload)
{
+ struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- int ring_id = workload->ring_id;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
- gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+ gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
+ workload->ctx_desc.lrca);
+ context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
context_page_num = 19;
i = 2;
@@ -858,6 +804,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_request *rq = workload->req;
int event;
mutex_lock(&gvt->lock);
@@ -866,11 +813,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
*/
- if (workload->req) {
- struct drm_i915_private *dev_priv =
- workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine =
- dev_priv->engine[workload->ring_id];
+ if (rq) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -886,8 +829,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- i915_request_put(fetch_and_zero(&workload->req));
-
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
update_guest_context(workload);
@@ -896,10 +837,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
- mutex_lock(&dev_priv->drm.struct_mutex);
+
/* unpin shadow ctx as the shadow_ctx update is done */
- intel_context_unpin(s->shadow_ctx, engine);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&rq->i915->drm.struct_mutex);
+ intel_context_unpin(rq->hw_context);
+ mutex_unlock(&rq->i915->drm.struct_mutex);
+
+ i915_request_put(fetch_and_zero(&workload->req));
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -1270,7 +1214,6 @@ alloc_workload(struct intel_vgpu *vgpu)
atomic_set(&workload->shadow_ctx_active, 0);
workload->status = -EINPROGRESS;
- workload->shadowed = false;
workload->vgpu = vgpu;
return workload;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 6c644782193e..21eddab4a9cd 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -83,7 +83,6 @@ struct intel_vgpu_workload {
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
- bool shadowed;
int status;
struct intel_vgpu_mm *shadow_mm;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 2e0a02a80fe4..572a18c2bfb5 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -121,7 +121,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
- gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
+ gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
GFP_KERNEL);
if (!gvt->types)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 13e7b9e4a6e6..698af45e229c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
- if (ppgtt->base.file != stats->file_priv)
+ if (ppgtt->vm.file != stats->file_priv)
continue;
}
@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
dpy_count, dpy_size);
seq_printf(m, "%llu [%pa] gtt total\n",
- ggtt->base.total, &ggtt->mappable_end);
+ ggtt->vm.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf)));
@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct i915_request,
client_link);
rcu_read_lock();
- task = pid_task(request && request->ctx->pid ?
- request->ctx->pid : file->pid,
+ task = pid_task(request && request->gem_context->pid ?
+ request->gem_context->pid : file->pid,
PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
@@ -1162,19 +1162,28 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- pm_ier = I915_READ(GEN6_PMIER);
- pm_imr = I915_READ(GEN6_PMIMR);
- pm_isr = I915_READ(GEN6_PMISR);
- pm_iir = I915_READ(GEN6_PMIIR);
- pm_mask = I915_READ(GEN6_PMINTRMSK);
- } else {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+ /*
+ * The equivalent to the PM ISR & IIR cannot be read
+ * without affecting the current state of the system
+ */
+ pm_isr = 0;
+ pm_iir = 0;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
pm_ier = I915_READ(GEN8_GT_IER(2));
pm_imr = I915_READ(GEN8_GT_IMR(2));
pm_isr = I915_READ(GEN8_GT_ISR(2));
pm_iir = I915_READ(GEN8_GT_IIR(2));
- pm_mask = I915_READ(GEN6_PMINTRMSK);
+ } else {
+ pm_ier = I915_READ(GEN6_PMIER);
+ pm_imr = I915_READ(GEN6_PMIMR);
+ pm_isr = I915_READ(GEN6_PMISR);
+ pm_iir = I915_READ(GEN6_PMIIR);
}
+ pm_mask = I915_READ(GEN6_PMINTRMSK);
+
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -1182,8 +1191,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
- seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
- pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+
+ seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ pm_ier, pm_imr, pm_mask);
+ if (INTEL_GEN(dev_priv) <= 10)
+ seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+ pm_isr, pm_iir);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1895,7 +1908,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.format->cpp[0] * 8,
fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
- describe_obj(m, fbdev_fb->obj);
+ describe_obj(m, intel_fb_obj(&fbdev_fb->base));
seq_putc(m, '\n');
}
#endif
@@ -1913,7 +1926,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.format->cpp[0] * 8,
fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
- describe_obj(m, fb->obj);
+ describe_obj(m, intel_fb_obj(&fb->base));
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
@@ -2630,8 +2643,6 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 psrperf = 0;
- u32 stat[3];
- enum pipe pipe;
bool enabled = false;
bool sink_support;
@@ -2652,47 +2663,17 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled)
- enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
- else
- enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
- } else {
- for_each_pipe(dev_priv, pipe) {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain))
- continue;
-
- stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- enabled = true;
-
- intel_display_power_put(dev_priv, power_domain);
- }
- }
+ if (dev_priv->psr.psr2_enabled)
+ enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+ else
+ enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
seq_printf(m, "Main link in standby mode: %s\n",
yesno(dev_priv->psr.link_standby));
- seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
-
- if (!HAS_DDI(dev_priv))
- for_each_pipe(dev_priv, pipe) {
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- seq_printf(m, " pipe %c", pipe_name(pipe));
- }
- seq_puts(m, "\n");
+ seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
/*
- * VLV/CHV PSR has no kind of performance counter
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -4245,8 +4226,13 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(dev_priv);
fs_reclaim_release(GFP_KERNEL);
- if (val & DROP_IDLE)
- drain_delayed_work(&dev_priv->gt.idle_work);
+ if (val & DROP_IDLE) {
+ do {
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ flush_delayed_work(&dev_priv->gt.retire_work);
+ drain_delayed_work(&dev_priv->gt.idle_work);
+ } while (READ_ONCE(dev_priv->gt.awake));
+ }
if (val & DROP_FREED)
i915_gem_drain_freed_objects(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9c449b8d8eab..be71fdf8d92e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,6 +67,7 @@ bool __i915_inject_load_failure(const char *func, int line)
if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
i915_modparams.inject_load_failure, func, line);
+ i915_modparams.inject_load_failure = 0;
return true;
}
@@ -117,16 +118,15 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
static bool i915_error_injected(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
- return i915_modparams.inject_load_failure &&
- i915_load_fail_count == i915_modparams.inject_load_failure;
+ return i915_load_fail_count && !i915_modparams.inject_load_failure;
#else
return false;
#endif
}
-#define i915_load_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, \
- i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
+#define i915_load_error(i915, fmt, ...) \
+ __i915_printk(i915, \
+ i915_error_injected(i915) ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
@@ -233,6 +233,8 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
if (id)
DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -634,26 +636,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static void i915_gem_fini(struct drm_i915_private *dev_priv)
-{
- /* Flush any outstanding unpin_work. */
- i915_gem_drain_workqueue(dev_priv);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uc_fini_hw(dev_priv);
- intel_uc_fini(dev_priv);
- i915_gem_cleanup_engines(dev_priv);
- i915_gem_contexts_fini(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- intel_uc_fini_misc(dev_priv);
- i915_gem_cleanup_userptr(dev_priv);
-
- i915_gem_drain_freed_objects(dev_priv);
-
- WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1553,12 +1535,30 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
return false;
}
+static int i915_drm_prepare(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ int err;
+
+ /*
+ * NB intel_display_suspend() may issue new requests after we've
+ * ostensibly marked the GPU as ready-to-sleep here. We need to
+ * split out that work and pull it forward so that after point,
+ * the GPU is not woken again.
+ */
+ err = i915_gem_suspend(i915);
+ if (err)
+ dev_err(&i915->drm.pdev->dev,
+ "GEM idle failed, suspend/resume might fail\n");
+
+ return err;
+}
+
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
pci_power_t opregion_target_state;
- int error;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
@@ -1575,13 +1575,6 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(pdev);
- error = i915_gem_suspend(dev_priv);
- if (error) {
- dev_err(&pdev->dev,
- "GEM idle failed, resume might fail\n");
- goto out;
- }
-
intel_display_suspend(dev);
intel_dp_mst_suspend(dev);
@@ -1600,7 +1593,6 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
- intel_uncore_suspend(dev_priv);
intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1609,10 +1601,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_csr_ucode_suspend(dev_priv);
-out:
enable_rpm_wakeref_asserts(dev_priv);
- return error;
+ return 0;
}
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
@@ -1623,7 +1614,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
+ i915_gem_suspend_late(dev_priv);
+
intel_display_set_init_power(dev_priv, false);
+ intel_uncore_suspend(dev_priv);
/*
* In case of firmware assisted context save/restore don't manually
@@ -2081,6 +2075,22 @@ out:
return ret;
}
+static int i915_pm_prepare(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ if (!dev) {
+ dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_prepare(dev);
+}
+
static int i915_pm_suspend(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2731,6 +2741,7 @@ const struct dev_pm_ops i915_pm_ops = {
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME]
*/
+ .prepare = i915_pm_prepare,
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 34c125e2d90c..79209ee25c21 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -85,8 +85,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180514"
-#define DRIVER_TIMESTAMP 1526300884
+#define DRIVER_DATE "20180606"
+#define DRIVER_TIMESTAMP 1528323047
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -607,7 +607,6 @@ struct i915_psr {
bool link_standby;
bool colorimetry_support;
bool alpm;
- bool has_hw_tracking;
bool psr2_enabled;
u8 sink_sync_latency;
bool debug;
@@ -1049,9 +1048,9 @@ struct intel_vbt_data {
/* Feature bits */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
+ unsigned int int_lvds_support:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
unsigned int panel_type:4;
@@ -1067,7 +1066,6 @@ struct intel_vbt_data {
int vswing;
bool low_vswing;
bool initialized;
- bool support;
int bpp;
struct edp_power_seq pps;
} edp;
@@ -1078,8 +1076,8 @@ struct intel_vbt_data {
bool require_aux_wakeup;
int idle_frames;
enum psr_lines_to_wait lines_to_wait;
- int tp1_wakeup_time;
- int tp2_tp3_wakeup_time;
+ int tp1_wakeup_time_us;
+ int tp2_tp3_wakeup_time_us;
} psr;
struct {
@@ -1843,6 +1841,7 @@ struct drm_i915_private {
*/
struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
} contexts;
@@ -1950,7 +1949,9 @@ struct drm_i915_private {
*/
struct i915_perf_stream *exclusive_stream;
+ struct intel_context *pinned_ctx;
u32 specific_ctx_id;
+ u32 specific_ctx_id_mask;
struct hrtimer poll_check_timer;
wait_queue_head_t poll_wq;
@@ -2743,6 +2744,8 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
int intel_engines_init(struct drm_i915_private *dev_priv);
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
+
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
@@ -3164,10 +3167,12 @@ void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
+void i915_gem_fini(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
int i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
@@ -3208,7 +3213,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
- return container_of(vm, struct i915_hw_ppgtt, base);
+ return container_of(vm, struct i915_hw_ppgtt, vm);
}
/* i915_gem_fence_reg.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3704f4c0c2c9..86f1f9aaa119 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+ return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
size, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -139,6 +139,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
@@ -181,6 +183,8 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
void i915_gem_park(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
@@ -193,6 +197,8 @@ void i915_gem_park(struct drm_i915_private *i915)
void i915_gem_unpark(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->gt.active_requests);
@@ -243,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma;
u64 pinned;
- pinned = ggtt->base.reserved;
+ pinned = ggtt->vm.reserved;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
- list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = ggtt->base.total;
+ args->aper_size = ggtt->vm.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -1217,9 +1223,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
wmb();
} else {
page_base += offset & PAGE_MASK;
@@ -1240,8 +1246,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
- ggtt->base.clear_range(&ggtt->base,
- node.start, node.size);
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
@@ -1420,9 +1425,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
wmb(); /* flush modifications to the GGTT (insert_page) */
} else {
page_base += offset & PAGE_MASK;
@@ -1449,8 +1454,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
- ggtt->base.clear_range(&ggtt->base,
- node.start, node.size);
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
@@ -1993,7 +1997,7 @@ compute_partial_view(struct drm_i915_gem_object *obj,
*/
int i915_gem_fault(struct vm_fault *vmf)
{
-#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
@@ -3003,7 +3007,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
- struct i915_request *request = NULL;
+ struct i915_request *request;
/*
* During the reset sequence, we must prevent the engine from
@@ -3014,52 +3018,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
*/
intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
- /*
- * Prevent the signaler thread from updating the request
- * state (by calling dma_fence_signal) as we are processing
- * the reset. The write from the GPU of the seqno is
- * asynchronous and the signaler thread may see a different
- * value to us and declare the request complete, even though
- * the reset routine have picked that request as the active
- * (incomplete) request. This conflict is not handled
- * gracefully!
- */
- kthread_park(engine->breadcrumbs.signaler);
-
- /*
- * Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its execlists->tasklet *just* as we are
- * calling engine->init_hw() and also writing the ELSP.
- * Turning off the execlists->tasklet until the reset is over
- * prevents the race.
- *
- * Note that this needs to be a single atomic operation on the
- * tasklet (flush existing tasks, prevent new tasks) to prevent
- * a race between reset and set-wedged. It is not, so we do the best
- * we can atm and make sure we don't lock the machine up in the more
- * common case of recursively being called from set-wedged from inside
- * i915_reset.
- */
- if (!atomic_read(&engine->execlists.tasklet.count))
- tasklet_kill(&engine->execlists.tasklet);
- tasklet_disable(&engine->execlists.tasklet);
-
- /*
- * We're using worker to queue preemption requests from the tasklet in
- * GuC submission mode.
- * Even though tasklet was disabled, we may still have a worker queued.
- * Let's make sure that all workers scheduled before disabling the
- * tasklet are completed before continuing with the reset.
- */
- if (engine->i915->guc.preempt_wq)
- flush_workqueue(engine->i915->guc.preempt_wq);
-
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- request = i915_gem_find_active_request(engine);
+ request = engine->reset.prepare(engine);
if (request && request->fence.error == -EIO)
request = ERR_PTR(-EIO); /* Previous reset failed! */
@@ -3111,7 +3070,7 @@ static void skip_request(struct i915_request *request)
static void engine_skip_context(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct i915_gem_context *hung_ctx = request->ctx;
+ struct i915_gem_context *hung_ctx = request->gem_context;
struct i915_timeline *timeline = request->timeline;
unsigned long flags;
@@ -3121,7 +3080,7 @@ static void engine_skip_context(struct i915_request *request)
spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
list_for_each_entry_continue(request, &engine->timeline.requests, link)
- if (request->ctx == hung_ctx)
+ if (request->gem_context == hung_ctx)
skip_request(request);
list_for_each_entry(request, &timeline->requests, link)
@@ -3167,11 +3126,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
}
if (stalled) {
- i915_gem_context_mark_guilty(request->ctx);
+ i915_gem_context_mark_guilty(request->gem_context);
skip_request(request);
/* If this context is now banned, skip all pending requests. */
- if (i915_gem_context_is_banned(request->ctx))
+ if (i915_gem_context_is_banned(request->gem_context))
engine_skip_context(request);
} else {
/*
@@ -3181,7 +3140,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
*/
request = i915_gem_find_active_request(engine);
if (request) {
- i915_gem_context_mark_innocent(request->ctx);
+ i915_gem_context_mark_innocent(request->gem_context);
dma_fence_set_error(&request->fence, -EAGAIN);
/* Rewind the engine to replay the incomplete rq */
@@ -3210,13 +3169,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
if (request)
request = i915_gem_reset_request(engine, request, stalled);
- if (request) {
- DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->global_seqno);
- }
-
/* Setup the CS to resume from the breadcrumb of the hung request */
- engine->reset_hw(engine, request);
+ engine->reset.reset(engine, request);
}
void i915_gem_reset(struct drm_i915_private *dev_priv,
@@ -3230,14 +3184,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct i915_gem_context *ctx;
+ struct intel_context *ce;
i915_gem_reset_engine(engine,
engine->hangcheck.active_request,
stalled_mask & ENGINE_MASK(id));
- ctx = fetch_and_zero(&engine->last_retired_context);
- if (ctx)
- intel_context_unpin(ctx, engine);
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
/*
* Ostensibily, we always want a context loaded for powersaving,
@@ -3264,8 +3218,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
- tasklet_enable(&engine->execlists.tasklet);
- kthread_unpark(engine->breadcrumbs.signaler);
+ engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
}
@@ -3543,6 +3496,22 @@ new_requests_since_last_retire(const struct drm_i915_private *i915)
work_pending(&i915->gt.idle_work.work));
}
+static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return;
+
+ GEM_BUG_ON(i915->gt.active_requests);
+ for_each_engine(engine, i915, id) {
+ GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
+ GEM_BUG_ON(engine->last_retired_context !=
+ to_intel_context(i915->kernel_context, engine));
+ }
+}
+
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
@@ -3554,6 +3523,24 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ return;
+
+ /*
+ * Flush out the last user context, leaving only the pinned
+ * kernel context resident. When we are idling on the kernel_context,
+ * no more new requests (with a context switch) are emitted and we
+ * can finally rest. A consequence is that the idle work handler is
+ * always called at least twice before idling (and if the system is
+ * idle that implies a round trip through the retire worker).
+ */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_switch_to_kernel_context(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
+ READ_ONCE(dev_priv->gt.active_requests));
+
/*
* Wait for last execlists context complete, but bail out in case a
* new request is submitted. As we don't trust the hardware, we
@@ -3587,6 +3574,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
epoch = __i915_gem_park(dev_priv);
+ assert_kernel_context_is_current(dev_priv);
+
rearm_hangcheck = false;
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3735,7 +3724,29 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
{
- return i915_gem_active_wait(&tl->last_request, flags);
+ struct i915_request *rq;
+ long ret;
+
+ rq = i915_gem_active_get_unlocked(&tl->last_request);
+ if (!rq)
+ return 0;
+
+ /*
+ * "Race-to-idle".
+ *
+ * Switching to the kernel context is often used a synchronous
+ * step prior to idling, e.g. in suspend for flushing all
+ * current operations to memory before sleeping. These we
+ * want to complete as quickly as possible to avoid prolonged
+ * stalls, so allow the gpu to boost to maximum clocks.
+ */
+ if (flags & I915_WAIT_FOR_IDLE_BOOST)
+ gen6_rps_boost(rq, NULL);
+
+ ret = i915_request_wait(rq, flags, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(rq);
+
+ return ret < 0 ? ret : 0;
}
static int wait_for_engines(struct drm_i915_private *i915)
@@ -3753,6 +3764,9 @@ static int wait_for_engines(struct drm_i915_private *i915)
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
+ GEM_TRACE("flags=%x (%s)\n",
+ flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked");
+
/* If the device is asleep, we have no requests outstanding */
if (!READ_ONCE(i915->gt.awake))
return 0;
@@ -3769,6 +3783,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
return err;
}
i915_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
return wait_for_engines(i915);
} else {
@@ -4357,7 +4372,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 flags)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_address_space *vm = &dev_priv->ggtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct i915_vma *vma;
int ret;
@@ -4945,25 +4960,26 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+void i915_gem_sanitize(struct drm_i915_private *i915)
{
- struct i915_gem_context *kernel_context = i915->kernel_context;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
- GEM_BUG_ON(engine->last_retired_context != kernel_context);
- }
-}
+ GEM_TRACE("\n");
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
- if (i915_terminally_wedged(&i915->gpu_error)) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_get(i915);
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+ /*
+ * As we have just resumed the machine and woken the device up from
+ * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+ * back to defaults, recovering from whatever wedged state we left it
+ * in and so worth trying to use the device once more.
+ */
+ if (i915_terminally_wedged(&i915->gpu_error))
i915_gem_unset_wedged(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- }
/*
* If we inherit context state from the BIOS or earlier occupants
@@ -4975,6 +4991,18 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
*/
if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+
+ /* Reset the submission backend after resume as well as the GPU reset */
+ for_each_engine(engine, i915, id) {
+ if (engine->reset.reset)
+ engine->reset.reset(engine, NULL);
+ }
+
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_runtime_pm_put(i915);
+
+ i915_gem_contexts_lost(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
}
int i915_gem_suspend(struct drm_i915_private *dev_priv)
@@ -4982,6 +5010,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
int ret;
+ GEM_TRACE("\n");
+
intel_runtime_pm_get(dev_priv);
intel_suspend_gt_powersave(dev_priv);
@@ -5002,13 +5032,13 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST);
if (ret && ret != -EIO)
goto err_unlock;
assert_kernel_context_is_current(dev_priv);
}
- i915_gem_contexts_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
intel_uc_suspend(dev_priv);
@@ -5028,6 +5058,24 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
if (WARN_ON(!intel_engines_are_idle(dev_priv)))
i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
+ intel_runtime_pm_put(dev_priv);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ return ret;
+}
+
+void i915_gem_suspend_late(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct list_head *phases[] = {
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
+ NULL
+ }, **phase;
+
/*
* Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup,
@@ -5047,20 +5095,22 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
- intel_uc_sanitize(dev_priv);
- i915_gem_sanitize(dev_priv);
- intel_runtime_pm_put(dev_priv);
- return 0;
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
- return ret;
+ intel_uc_sanitize(i915);
+ i915_gem_sanitize(i915);
}
void i915_gem_resume(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
@@ -5234,9 +5284,15 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
/* Only when the HW is re-initialised, can we replay the requests */
ret = __i915_gem_restart_engines(dev_priv);
+ if (ret)
+ goto cleanup_uc;
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
+
+cleanup_uc:
+ intel_uc_fini_hw(dev_priv);
+ goto out;
}
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
@@ -5357,12 +5413,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
- /*
- * We need to fallback to 4K pages since gvt gtt handling doesn't
- * support huge page entries - we will need to check either hypervisor
- * mm can support huge guest page or just do emulation in gvt.
- */
- if (intel_vgpu_active(dev_priv))
+ /* We need to fallback to 4K pages if host doesn't support huge gtt. */
+ if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
@@ -5502,6 +5554,28 @@ err_unlock:
return ret;
}
+void i915_gem_fini(struct drm_i915_private *dev_priv)
+{
+ i915_gem_suspend_late(dev_priv);
+
+ /* Flush any outstanding unpin_work. */
+ i915_gem_drain_workqueue(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_hw(dev_priv);
+ intel_uc_fini(dev_priv);
+ i915_gem_cleanup_engines(dev_priv);
+ i915_gem_contexts_fini(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_uc_fini_misc(dev_priv);
+ i915_gem_cleanup_userptr(dev_priv);
+
+ i915_gem_drain_freed_objects(dev_priv);
+
+ WARN_ON(!list_empty(&dev_priv->contexts.list));
+}
+
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
i915_gem_sanitize(i915);
@@ -5666,16 +5740,17 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv)
return 0;
}
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+int i915_gem_freeze_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
struct list_head *phases[] = {
- &dev_priv->mm.unbound_list,
- &dev_priv->mm.bound_list,
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
NULL
- }, **p;
+ }, **phase;
- /* Called just before we write the hibernation image.
+ /*
+ * Called just before we write the hibernation image.
*
* We need to update the domain tracking to reflect that the CPU
* will be accessing all the pages to create and restore from the
@@ -5689,15 +5764,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
* the objects as well, see i915_gem_freeze()
*/
- i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
- i915_gem_drain_freed_objects(dev_priv);
+ i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
+ i915_gem_drain_freed_objects(i915);
- spin_lock(&dev_priv->mm.obj_lock);
- for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, mm.link)
- __start_cpu_write(obj);
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
}
- spin_unlock(&dev_priv->mm.obj_lock);
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 525920404ede..261da577829a 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -26,6 +26,7 @@
#define __I915_GEM_H__
#include <linux/bug.h>
+#include <linux/interrupt.h>
struct drm_i915_private;
@@ -62,9 +63,12 @@ struct drm_i915_private;
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_DUMP_ON(expr) \
+ do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
#define GEM_TRACE_DUMP() do { } while (0)
+#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
#define I915_NUM_ENGINES 8
@@ -72,4 +76,16 @@ struct drm_i915_private;
void i915_gem_park(struct drm_i915_private *i915);
void i915_gem_unpark(struct drm_i915_private *i915);
+static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
+{
+ if (atomic_inc_return(&t->count) == 1)
+ tasklet_unlock_wait(t);
+}
+
+static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
+{
+ if (atomic_dec_return(&t->count) == 0)
+ tasklet_kill(t);
+}
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 33f8a4b3c981..b2c7ac1b074d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
- if (!ce->state)
- continue;
-
- WARN_ON(ce->pin_count);
- if (ce->ring)
- intel_ring_free(ce->ring);
-
- __i915_gem_object_release_unless_active(ce->state->obj);
+ if (ce->ops)
+ ce->ops->destroy(ce);
}
kfree(ctx->name);
@@ -203,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
if (ctx->ppgtt)
- i915_ppgtt_close(&ctx->ppgtt->base);
+ i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx);
@@ -214,10 +208,19 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
int ret;
unsigned int max;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
max = GEN11_MAX_CONTEXT_HW_ID;
- else
- max = MAX_CONTEXT_HW_ID;
+ } else {
+ /*
+ * When using GuC in proxy submission, GuC consumes the
+ * highest bit in the context id to indicate proxy submission.
+ */
+ if (USES_GUC_SUBMISSION(dev_priv))
+ max = MAX_GUC_CONTEXT_HW_ID;
+ else
+ max = MAX_CONTEXT_HW_ID;
+ }
+
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, max, GFP_KERNEL);
@@ -246,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -266,6 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
+ unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -283,6 +287,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->i915 = dev_priv;
ctx->sched.priority = I915_PRIORITY_NORMAL;
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+ struct intel_context *ce = &ctx->__engine[n];
+
+ ce->gem_context = ctx;
+ }
+
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
@@ -514,16 +524,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv, id) {
- engine->legacy_active_context = NULL;
- engine->legacy_active_ppgtt = NULL;
-
- if (!engine->last_retired_context)
- continue;
-
- intel_context_unpin(engine->last_retired_context, engine);
- engine->last_retired_context = NULL;
- }
+ for_each_engine(engine, dev_priv, id)
+ intel_engine_lost_context(engine);
}
void i915_gem_contexts_fini(struct drm_i915_private *i915)
@@ -583,58 +585,119 @@ last_request_on_engine(struct i915_timeline *timeline,
{
struct i915_request *rq;
- if (timeline == &engine->timeline)
- return NULL;
+ GEM_BUG_ON(timeline == &engine->timeline);
rq = i915_gem_active_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex);
- if (rq && rq->engine == engine)
+ if (rq && rq->engine == engine) {
+ GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
+ timeline->name, engine->name,
+ rq->fence.context, rq->fence.seqno);
+ GEM_BUG_ON(rq->timeline != timeline);
return rq;
+ }
return NULL;
}
-static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
+static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
{
- struct i915_timeline *timeline;
+ struct drm_i915_private *i915 = engine->i915;
+ const struct intel_context * const ce =
+ to_intel_context(i915->kernel_context, engine);
+ struct i915_timeline *barrier = ce->ring->timeline;
+ struct intel_ring *ring;
+ bool any_active = false;
- list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
- if (last_request_on_engine(timeline, engine))
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
+ struct i915_request *rq;
+
+ rq = last_request_on_engine(ring->timeline, engine);
+ if (!rq)
+ continue;
+
+ any_active = true;
+
+ if (rq->hw_context == ce)
+ continue;
+
+ /*
+ * Was this request submitted after the previous
+ * switch-to-kernel-context?
+ */
+ if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
+ GEM_TRACE("%s needs barrier for %llx:%d\n",
+ ring->timeline->name,
+ rq->fence.context,
+ rq->fence.seqno);
return false;
+ }
+
+ GEM_TRACE("%s has barrier after %llx:%d\n",
+ ring->timeline->name,
+ rq->fence.context,
+ rq->fence.seqno);
}
- return intel_engine_has_kernel_context(engine);
+ /*
+ * If any other timeline was still active and behind the last barrier,
+ * then our last switch-to-kernel-context must still be queued and
+ * will run last (leaving the engine in the kernel context when it
+ * eventually idles).
+ */
+ if (any_active)
+ return true;
+
+ /* The engine is idle; check that it is idling in the kernel context. */
+ return engine->last_retired_context == ce;
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
- struct i915_timeline *timeline;
enum intel_engine_id id;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915->kernel_context);
- i915_retire_requests(dev_priv);
+ i915_retire_requests(i915);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, i915, id) {
+ struct intel_ring *ring;
struct i915_request *rq;
- if (engine_has_idle_kernel_context(engine))
+ GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
+ if (engine_has_kernel_context_barrier(engine))
continue;
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ GEM_TRACE("emit barrier on %s\n", engine->name);
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
/* Queue this switch after all other activity */
- list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
struct i915_request *prev;
- prev = last_request_on_engine(timeline, engine);
- if (prev)
- i915_sw_fence_await_sw_fence_gfp(&rq->submit,
- &prev->submit,
- I915_FENCE_GFP);
+ prev = last_request_on_engine(ring->timeline, engine);
+ if (!prev)
+ continue;
+
+ if (prev->gem_context == i915->kernel_context)
+ continue;
+
+ GEM_TRACE("add barrier on %s for %llx:%d\n",
+ engine->name,
+ prev->fence.context,
+ prev->fence.seqno);
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ &prev->submit,
+ I915_FENCE_GFP);
+ i915_timeline_sync_set(rq->timeline, &prev->fence);
}
/*
@@ -747,11 +810,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
- args->value = ctx->ppgtt->base.total;
+ args->value = ctx->ppgtt->vm.total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
+ args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else
- args->value = to_i915(dev)->ggtt.base.total;
+ args->value = to_i915(dev)->ggtt.vm.total;
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
args->value = i915_gem_context_no_error_capture(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ace3b129c189..b116e4942c10 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -30,6 +30,7 @@
#include <linux/radix-tree.h>
#include "i915_gem.h"
+#include "i915_scheduler.h"
struct pid;
@@ -45,6 +46,13 @@ struct intel_ring;
#define DEFAULT_CONTEXT_HANDLE 0
+struct intel_context;
+
+struct intel_context_ops {
+ void (*unpin)(struct intel_context *ce);
+ void (*destroy)(struct intel_context *ce);
+};
+
/**
* struct i915_gem_context - client state
*
@@ -144,11 +152,14 @@ struct i915_gem_context {
/** engine: per-engine logical HW state */
struct intel_context {
+ struct i915_gem_context *gem_context;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
+
+ const struct intel_context_ops *ops;
} __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@@ -263,25 +274,26 @@ to_intel_context(struct i915_gem_context *ctx,
return &ctx->__engine[engine->id];
}
-static inline struct intel_ring *
+static inline struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
return engine->context_pin(engine, ctx);
}
-static inline void __intel_context_pin(struct i915_gem_context *ctx,
- const struct intel_engine_cs *engine)
+static inline void __intel_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
GEM_BUG_ON(!ce->pin_count);
ce->pin_count++;
}
-static inline void intel_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static inline void intel_context_unpin(struct intel_context *ce)
{
- engine->context_unpin(engine, ctx);
+ GEM_BUG_ON(!ce->pin_count);
+ if (--ce->pin_count)
+ return;
+
+ GEM_BUG_ON(!ce->ops);
+ ce->ops->unpin(ce);
}
/* i915_gem_context.c */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 69a7aec49e84..82e2ca17a441 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
i915_gem_object_unpin_map(obj);
}
-static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = i915_gem_dmabuf_kmap,
- .map_atomic = i915_gem_dmabuf_kmap_atomic,
.unmap = i915_gem_dmabuf_kunmap,
- .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f627a8c47c58..eefd449502e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -703,7 +703,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->ctx = ctx;
- eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
+ eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
eb->context_flags = 0;
if (ctx->flags & CONTEXT_NO_ZEROMAP)
@@ -943,9 +943,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
if (cache->node.allocated) {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
- ggtt->base.clear_range(&ggtt->base,
- cache->node.start,
- cache->node.size);
+ ggtt->vm.clear_range(&ggtt->vm,
+ cache->node.start,
+ cache->node.size);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -1016,7 +1016,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
- (&ggtt->base.mm, &cache->node,
+ (&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -1037,9 +1037,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, page),
- offset, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
} else {
offset += page << PAGE_SHIFT;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 996ab2ad6c45..284ae9574f03 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -42,7 +42,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
-#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
/**
* DOC: Global GTT views
@@ -190,19 +190,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 1;
}
-static int ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
+static int gen6_ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
{
u32 pte_flags;
- int ret;
-
- if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
- ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
- vma->size);
- if (ret)
- return ret;
- }
/* Currently applicable only to VLV */
pte_flags = 0;
@@ -214,6 +206,22 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
return 0;
}
+static int gen8_ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ int ret;
+
+ if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start, vma->size);
+ if (ret)
+ return ret;
+ }
+
+ return gen6_ppgtt_bind_vma(vma, cache_level, unused);
+}
+
static void ppgtt_unbind_vma(struct i915_vma *vma)
{
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
@@ -489,7 +497,7 @@ static int __setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
gfp_t gfp)
{
- p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
+ p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
if (unlikely(!p->page))
return -ENOMEM;
@@ -506,7 +514,7 @@ static int __setup_page_dma(struct i915_address_space *vm,
static int setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p)
{
- return __setup_page_dma(vm, p, I915_GFP_DMA);
+ return __setup_page_dma(vm, p, __GFP_HIGHMEM);
}
static void cleanup_page_dma(struct i915_address_space *vm,
@@ -520,8 +528,8 @@ static void cleanup_page_dma(struct i915_address_space *vm,
#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
-#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
-#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
+#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
+#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
static void fill_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
@@ -614,7 +622,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
{
struct i915_page_table *pt;
- pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
+ pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
@@ -651,7 +659,7 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
struct i915_page_directory *pd;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
+ pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
@@ -685,7 +693,7 @@ static int __pdp_init(struct i915_address_space *vm,
const unsigned int pdpes = i915_pdpes_per_pdp(vm);
pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
- GFP_KERNEL | __GFP_NOWARN);
+ I915_GFP_ALLOW_FAIL);
if (unlikely(!pdp->page_directory))
return -ENOMEM;
@@ -765,53 +773,6 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
-/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct i915_request *rq,
- unsigned entry,
- dma_addr_t addr)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- BUG_ON(entry >= 4);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
- *cs++ = upper_32_bits(addr);
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
- *cs++ = lower_32_bits(addr);
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- int i, ret;
-
- for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- ret = gen8_write_pdp(rq, i, pd_daddr);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
-}
-
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
@@ -819,7 +780,7 @@ static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -1012,7 +973,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
gen8_pte_t *vaddr;
bool ret;
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
pd = pdp->page_directory[idx->pdpe];
vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
do {
@@ -1043,7 +1004,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
break;
}
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
pd = pdp->page_directory[idx->pdpe];
}
@@ -1229,7 +1190,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
@@ -1272,7 +1233,7 @@ free_scratch_page:
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct drm_i915_private *dev_priv = vm->i915;
enum vgt_g2v_type msg;
int i;
@@ -1333,13 +1294,13 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
int i;
for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
+ if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
continue;
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
}
- cleanup_px(&ppgtt->base, &ppgtt->pml4);
+ cleanup_px(&ppgtt->vm, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1353,7 +1314,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (use_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
gen8_free_scratch(vm);
}
@@ -1489,7 +1450,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
gen8_pte_t scratch_pte,
struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory *pd;
u32 pdpe;
@@ -1499,7 +1460,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
u64 pd_start = start;
u32 pde;
- if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
+ if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
continue;
seq_printf(m, "\tPDPE #%d\n", pdpe);
@@ -1507,7 +1468,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
u32 pte;
gen8_pte_t *pt_vaddr;
- if (pd->page_table[pde] == ppgtt->base.scratch_pt)
+ if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
continue;
pt_vaddr = kmap_atomic_px(pt);
@@ -1540,10 +1501,10 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
const gen8_pte_t scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
- u64 start = 0, length = ppgtt->base.total;
+ u64 start = 0, length = ppgtt->vm.total;
if (use_4lvl(vm)) {
u64 pml4e;
@@ -1551,7 +1512,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
struct i915_page_directory_pointer *pdp;
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
+ if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
continue;
seq_printf(m, " PML4E #%llu\n", pml4e);
@@ -1564,10 +1525,10 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
struct i915_page_directory *pd;
- u64 start = 0, length = ppgtt->base.total;
+ u64 start = 0, length = ppgtt->vm.total;
u64 from = start;
unsigned int pdpe;
@@ -1603,11 +1564,11 @@ unwind:
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct drm_i915_private *dev_priv = vm->i915;
int ret;
- ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ ppgtt->vm.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1ULL << 48 :
1ULL << 32;
@@ -1615,27 +1576,26 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
* And we are not sure about the latter so play safe for now.
*/
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
- ppgtt->base.pt_kmap_wc = true;
+ ppgtt->vm.pt_kmap_wc = true;
- ret = gen8_init_scratch(&ppgtt->base);
+ ret = gen8_init_scratch(&ppgtt->vm);
if (ret) {
- ppgtt->base.total = 0;
+ ppgtt->vm.total = 0;
return ret;
}
if (use_4lvl(vm)) {
- ret = setup_px(&ppgtt->base, &ppgtt->pml4);
+ ret = setup_px(&ppgtt->vm, &ppgtt->pml4);
if (ret)
goto free_scratch;
- gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
+ gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
- ppgtt->switch_mm = gen8_mm_switch_4lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
} else {
- ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
+ ret = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
if (ret)
goto free_scratch;
@@ -1647,36 +1607,35 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
}
}
- ppgtt->switch_mm = gen8_mm_switch_3lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
}
if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, true);
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
+ ppgtt->vm.bind_vma = gen8_ppgtt_bind_vma;
+ ppgtt->vm.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.set_pages = ppgtt_set_pages;
+ ppgtt->vm.clear_pages = clear_pages;
ppgtt->debug_dump = gen8_dump_ppgtt;
return 0;
free_scratch:
- gen8_free_scratch(&ppgtt->base);
+ gen8_free_scratch(&ppgtt->vm);
return ret;
}
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_table *unused;
gen6_pte_t scratch_pte;
u32 pd_entry, pte, pde;
- u32 start = 0, length = ppgtt->base.total;
+ u32 start = 0, length = ppgtt->vm.total;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, 0);
@@ -1974,7 +1933,7 @@ static int gen6_init_scratch(struct i915_address_space *vm)
{
int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
@@ -2013,8 +1972,8 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
+ struct i915_address_space *vm = &ppgtt->vm;
+ struct drm_i915_private *dev_priv = ppgtt->vm.i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
@@ -2022,16 +1981,16 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
+ BUG_ON(!drm_mm_initialized(&ggtt->vm.mm));
ret = gen6_init_scratch(vm);
if (ret)
return ret;
- ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
+ ret = i915_gem_gtt_insert(&ggtt->vm, &ppgtt->node,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_COLOR_UNEVICTABLE,
- 0, ggtt->base.total,
+ 0, ggtt->vm.total,
PIN_HIGH);
if (ret)
goto err_out;
@@ -2064,16 +2023,16 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
u32 pde;
gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
- ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
+ ppgtt->pd.page_table[pde] = ppgtt->vm.scratch_pt;
}
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
+ struct drm_i915_private *dev_priv = ppgtt->vm.i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- ppgtt->base.pte_encode = ggtt->base.pte_encode;
+ ppgtt->vm.pte_encode = ggtt->vm.pte_encode;
if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
ppgtt->switch_mm = gen6_mm_switch;
else if (IS_HASWELL(dev_priv))
@@ -2087,24 +2046,24 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
- ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+ ppgtt->vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
- gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+ gen6_scratch_va_range(ppgtt, 0, ppgtt->vm.total);
+ gen6_write_page_range(ppgtt, 0, ppgtt->vm.total);
- ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
+ ret = gen6_alloc_va_range(&ppgtt->vm, 0, ppgtt->vm.total);
if (ret) {
- gen6_ppgtt_cleanup(&ppgtt->base);
+ gen6_ppgtt_cleanup(&ppgtt->vm);
return ret;
}
- ppgtt->base.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
- ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->vm.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->vm.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->vm.bind_vma = gen6_ppgtt_bind_vma;
+ ppgtt->vm.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.set_pages = ppgtt_set_pages;
+ ppgtt->vm.clear_pages = clear_pages;
+ ppgtt->vm.cleanup = gen6_ppgtt_cleanup;
ppgtt->debug_dump = gen6_dump_ppgtt;
DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
@@ -2120,8 +2079,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_private *dev_priv)
{
- ppgtt->base.i915 = dev_priv;
- ppgtt->base.dma = &dev_priv->drm.pdev->dev;
+ ppgtt->vm.i915 = dev_priv;
+ ppgtt->vm.dma = &dev_priv->drm.pdev->dev;
if (INTEL_GEN(dev_priv) < 8)
return gen6_ppgtt_init(ppgtt);
@@ -2231,10 +2190,10 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
}
kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv, name);
- ppgtt->base.file = fpriv;
+ i915_address_space_init(&ppgtt->vm, dev_priv, name);
+ ppgtt->vm.file = fpriv;
- trace_i915_ppgtt_create(&ppgtt->base);
+ trace_i915_ppgtt_create(&ppgtt->vm);
return ppgtt;
}
@@ -2268,16 +2227,16 @@ void i915_ppgtt_release(struct kref *kref)
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
- trace_i915_ppgtt_release(&ppgtt->base);
+ trace_i915_ppgtt_release(&ppgtt->vm);
- ppgtt_destroy_vma(&ppgtt->base);
+ ppgtt_destroy_vma(&ppgtt->vm);
- GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
- GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
- GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
- ppgtt->base.cleanup(&ppgtt->base);
- i915_address_space_fini(&ppgtt->base);
+ ppgtt->vm.cleanup(&ppgtt->vm);
+ i915_address_space_fini(&ppgtt->vm);
kfree(ppgtt);
}
@@ -2373,7 +2332,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
i915_ggtt_invalidate(dev_priv);
}
@@ -2716,16 +2675,16 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
- appgtt->base.allocate_va_range) {
- ret = appgtt->base.allocate_va_range(&appgtt->base,
- vma->node.start,
- vma->size);
+ appgtt->vm.allocate_va_range) {
+ ret = appgtt->vm.allocate_va_range(&appgtt->vm,
+ vma->node.start,
+ vma->size);
if (ret)
return ret;
}
- appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
- pte_flags);
+ appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
+ pte_flags);
}
if (flags & I915_VMA_GLOBAL_BIND) {
@@ -2748,7 +2707,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
- struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
+ struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
vm->clear_range(vm, vma->node.start, vma->size);
}
@@ -2815,30 +2774,30 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
- if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
+ if (WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
err = -ENODEV;
goto err_ppgtt;
}
- if (ppgtt->base.allocate_va_range) {
+ if (ppgtt->vm.allocate_va_range) {
/* Note we only pre-allocate as far as the end of the global
* GTT. On 48b / 4-level page-tables, the difference is very,
* very significant! We have to preallocate as GVT/vgpu does
* not like the page directory disappearing.
*/
- err = ppgtt->base.allocate_va_range(&ppgtt->base,
- 0, ggtt->base.total);
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
+ 0, ggtt->vm.total);
if (err)
goto err_ppgtt;
}
i915->mm.aliasing_ppgtt = ppgtt;
- GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
- ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+ GEM_BUG_ON(ggtt->vm.bind_vma != ggtt_bind_vma);
+ ggtt->vm.bind_vma = aliasing_gtt_bind_vma;
- GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
- ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
+ GEM_BUG_ON(ggtt->vm.unbind_vma != ggtt_unbind_vma);
+ ggtt->vm.unbind_vma = aliasing_gtt_unbind_vma;
return 0;
@@ -2858,8 +2817,8 @@ void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
i915_ppgtt_put(ppgtt);
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.bind_vma = ggtt_bind_vma;
+ ggtt->vm.unbind_vma = ggtt_unbind_vma;
}
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2883,7 +2842,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -2891,16 +2850,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt->base.clear_range(&ggtt->base, hole_start,
- hole_end - hole_start);
+ ggtt->vm.clear_range(&ggtt->vm, hole_start,
+ hole_end - hole_start);
}
/* And finally clear the reserved guard page */
- ggtt->base.clear_range(&ggtt->base,
- ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
ret = i915_gem_init_aliasing_ppgtt(dev_priv);
@@ -2925,11 +2883,11 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
struct i915_vma *vma, *vn;
struct pagevec *pvec;
- ggtt->base.closed = true;
+ ggtt->vm.closed = true;
mutex_lock(&dev_priv->drm.struct_mutex);
- GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
- list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
+ GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
WARN_ON(i915_vma_unbind(vma));
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -2941,12 +2899,12 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
- if (drm_mm_initialized(&ggtt->base.mm)) {
+ if (drm_mm_initialized(&ggtt->vm.mm)) {
intel_vgt_deballoon(dev_priv);
- i915_address_space_fini(&ggtt->base);
+ i915_address_space_fini(&ggtt->vm);
}
- ggtt->base.cleanup(&ggtt->base);
+ ggtt->vm.cleanup(&ggtt->vm);
pvec = &dev_priv->mm.wc_stash;
if (pvec->nr) {
@@ -2996,7 +2954,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
phys_addr_t phys_addr;
int ret;
@@ -3020,7 +2978,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
+ ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -3326,7 +3284,7 @@ static void setup_private_pat(struct drm_i915_private *dev_priv)
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3350,25 +3308,25 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
size = gen8_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- ggtt->base.cleanup = gen6_gmch_remove;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.insert_page = gen8_ggtt_insert_page;
- ggtt->base.clear_range = nop_clear_range;
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.bind_vma = ggtt_bind_vma;
+ ggtt->vm.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.set_pages = ggtt_set_pages;
+ ggtt->vm.clear_pages = clear_pages;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->base.clear_range = gen8_ggtt_clear_range;
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
- ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
- ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->base.clear_range != nop_clear_range)
- ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->vm.clear_range != nop_clear_range)
+ ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
}
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3380,7 +3338,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3407,29 +3365,29 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
- ggtt->base.clear_range = gen6_ggtt_clear_range;
- ggtt->base.insert_page = gen6_ggtt_insert_page;
- ggtt->base.insert_entries = gen6_ggtt_insert_entries;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = gen6_gmch_remove;
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.bind_vma = ggtt_bind_vma;
+ ggtt->vm.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.set_pages = ggtt_set_pages;
+ ggtt->vm.clear_pages = clear_pages;
+ ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
if (HAS_EDRAM(dev_priv))
- ggtt->base.pte_encode = iris_pte_encode;
+ ggtt->vm.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev_priv))
- ggtt->base.pte_encode = hsw_pte_encode;
+ ggtt->vm.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev_priv))
- ggtt->base.pte_encode = byt_pte_encode;
+ ggtt->vm.pte_encode = byt_pte_encode;
else if (INTEL_GEN(dev_priv) >= 7)
- ggtt->base.pte_encode = ivb_pte_encode;
+ ggtt->vm.pte_encode = ivb_pte_encode;
else
- ggtt->base.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_encode = snb_pte_encode;
return ggtt_probe_common(ggtt, size);
}
@@ -3441,7 +3399,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
phys_addr_t gmadr_base;
int ret;
@@ -3451,23 +3409,21 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
return -EIO;
}
- intel_gtt_get(&ggtt->base.total,
- &gmadr_base,
- &ggtt->mappable_end);
+ intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
ggtt->gmadr =
(struct resource) DEFINE_RES_MEM(gmadr_base,
ggtt->mappable_end);
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
- ggtt->base.insert_page = i915_ggtt_insert_page;
- ggtt->base.insert_entries = i915_ggtt_insert_entries;
- ggtt->base.clear_range = i915_ggtt_clear_range;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = i915_gmch_remove;
+ ggtt->vm.insert_page = i915_ggtt_insert_page;
+ ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+ ggtt->vm.clear_range = i915_ggtt_clear_range;
+ ggtt->vm.bind_vma = ggtt_bind_vma;
+ ggtt->vm.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.set_pages = ggtt_set_pages;
+ ggtt->vm.clear_pages = clear_pages;
+ ggtt->vm.cleanup = i915_gmch_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
@@ -3486,8 +3442,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- ggtt->base.i915 = dev_priv;
- ggtt->base.dma = &dev_priv->drm.pdev->dev;
+ ggtt->vm.i915 = dev_priv;
+ ggtt->vm.dma = &dev_priv->drm.pdev->dev;
if (INTEL_GEN(dev_priv) <= 5)
ret = i915_gmch_probe(ggtt);
@@ -3504,27 +3460,29 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* restriction!
*/
if (USES_GUC(dev_priv)) {
- ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
- ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+ ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
- if ((ggtt->base.total - 1) >> 32) {
+ if ((ggtt->vm.total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
" of address space! Found %lldM!\n",
- ggtt->base.total >> 20);
- ggtt->base.total = 1ULL << 32;
- ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+ ggtt->vm.total >> 20);
+ ggtt->vm.total = 1ULL << 32;
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
- if (ggtt->mappable_end > ggtt->base.total) {
+ if (ggtt->mappable_end > ggtt->vm.total) {
DRM_ERROR("mappable aperture extends past end of GGTT,"
" aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->base.total);
- ggtt->mappable_end = ggtt->base.total;
+ &ggtt->mappable_end, ggtt->vm.total);
+ ggtt->mappable_end = ggtt->vm.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("DSM size = %lluM\n",
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
@@ -3551,9 +3509,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* and beyond the end of the GTT if we do not provide a guard.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_address_space_init(&ggtt->base, dev_priv, "[global]");
+ i915_address_space_init(&ggtt->vm, dev_priv, "[global]");
if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
- ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+ ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
@@ -3576,7 +3534,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
out_gtt_cleanup:
- ggtt->base.cleanup(&ggtt->base);
+ ggtt->vm.cleanup(&ggtt->vm);
return ret;
}
@@ -3610,34 +3568,31 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *obj, *on;
+ struct i915_vma *vma, *vn;
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+ ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
- bool ggtt_bound = false;
- struct i915_vma *vma;
+ GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
- for_each_ggtt_vma(vma, obj) {
- if (!i915_vma_unbind(vma))
- continue;
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+ continue;
- WARN_ON(i915_vma_bind(vma, obj->cache_level,
- PIN_UPDATE));
- ggtt_bound = true;
- }
+ if (!i915_vma_unbind(vma))
+ continue;
- if (ggtt_bound)
- WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
- ggtt->base.closed = false;
+ ggtt->vm.closed = false;
if (INTEL_GEN(dev_priv) >= 8) {
struct intel_ppat *ppat = &dev_priv->ppat;
@@ -3657,8 +3612,10 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ppgtt = dev_priv->mm.aliasing_ppgtt;
else
ppgtt = i915_vm_to_ppgtt(vm);
+ if (!ppgtt)
+ continue;
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+ gen6_write_page_range(ppgtt, 0, ppgtt->vm.total);
}
}
@@ -3880,7 +3837,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
@@ -3977,7 +3934,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
@@ -3988,7 +3945,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
mode = DRM_MM_INSERT_BEST;
if (flags & PIN_HIGH)
- mode = DRM_MM_INSERT_HIGH;
+ mode = DRM_MM_INSERT_HIGHEST;
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
@@ -4008,6 +3965,15 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
+ if (mode & DRM_MM_INSERT_ONCE) {
+ err = drm_mm_insert_node_in_range(&vm->mm, node,
+ size, alignment, color,
+ start, end,
+ DRM_MM_INSERT_BEST);
+ if (err != -ENOSPC)
+ return err;
+ }
+
if (flags & PIN_NOEVICT)
return -ENOSPC;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index aec4f73574f4..197c2c06ecb7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -65,7 +65,7 @@ typedef u64 gen8_pde_t;
typedef u64 gen8_ppgtt_pdpe_t;
typedef u64 gen8_ppgtt_pml4e_t;
-#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -367,7 +367,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
* the spec.
*/
struct i915_ggtt {
- struct i915_address_space base;
+ struct i915_address_space vm;
struct io_mapping iomap; /* Mapping to our CPU mappable region */
struct resource gmadr; /* GMADR resource */
@@ -385,7 +385,7 @@ struct i915_ggtt {
};
struct i915_hw_ppgtt {
- struct i915_address_space base;
+ struct i915_address_space vm;
struct kref ref;
struct drm_mm_node node;
unsigned long pd_dirty_rings;
@@ -543,7 +543,7 @@ static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, base);
+ return container_of(vm, struct i915_ggtt, vm);
}
#define INTEL_MAX_PPAT_ENTRIES 8
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 1036e8686916..3210cedfa46c 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
- so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
+ so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 5757fb7c4b5a..55e84e71f526 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -480,7 +480,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
- &i915->ggtt.base.inactive_list, vm_link) {
+ &i915->ggtt.vm.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ad949cc30928..79a347295e00 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -642,7 +642,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret)
goto err;
- vma = i915_vma_instance(obj, &ggtt->base, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_pages;
@@ -653,7 +653,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
- ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
@@ -666,7 +666,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index df234dc23274..758234d20f4e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -973,8 +973,7 @@ i915_error_object_create(struct drm_i915_private *i915,
void __iomem *s;
int ret;
- ggtt->base.insert_page(&ggtt->base, dma, slot,
- I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
@@ -993,7 +992,7 @@ unwind:
out:
compress_fini(&compress, dst);
- ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
}
@@ -1287,9 +1286,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq)
{
- erq->context = request->ctx->hw_id;
+ struct i915_gem_context *ctx = request->gem_context;
+
+ erq->context = ctx->hw_id;
erq->sched_attr = request->sched.attr;
- erq->ban_score = atomic_read(&request->ctx->ban_score);
+ erq->ban_score = atomic_read(&ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
@@ -1297,7 +1298,7 @@ static void record_request(struct i915_request *request,
erq->tail = request->tail;
rcu_read_lock();
- erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+ erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
rcu_read_unlock();
}
@@ -1461,12 +1462,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
request = i915_gem_find_active_request(engine);
if (request) {
+ struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
- ee->vm = request->ctx->ppgtt ?
- &request->ctx->ppgtt->base : &ggtt->base;
+ ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
- record_context(&ee->context, request->ctx);
+ record_context(&ee->context, ctx);
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
@@ -1483,11 +1484,10 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->ctx =
i915_error_object_create(i915,
- to_intel_context(request->ctx,
- engine)->state);
+ request->hw_context->state);
error->simulated |=
- i915_gem_context_no_error_capture(request->ctx);
+ i915_gem_context_no_error_capture(ctx);
ee->rq_head = request->head;
ee->rq_post = request->postfix;
@@ -1563,17 +1563,17 @@ static void capture_active_buffers(struct i915_gpu_state *error)
static void capture_pinned_buffers(struct i915_gpu_state *error)
{
- struct i915_address_space *vm = &error->i915->ggtt.base;
+ struct i915_address_space *vm = &error->i915->ggtt.vm;
struct drm_i915_error_buffer *bo;
struct i915_vma *vma;
int count_inactive, count_active;
count_inactive = 0;
- list_for_each_entry(vma, &vm->active_list, vm_link)
+ list_for_each_entry(vma, &vm->inactive_list, vm_link)
count_inactive++;
count_active = 0;
- list_for_each_entry(vma, &vm->inactive_list, vm_link)
+ list_for_each_entry(vma, &vm->active_list, vm_link)
count_active++;
bo = NULL;
@@ -1667,7 +1667,16 @@ static void capture_reg_state(struct i915_gpu_state *error)
}
/* 4: Everything else */
- if (INTEL_GEN(dev_priv) >= 8) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ error->ier = I915_READ(GEN8_DE_MISC_IER);
+ error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
+ error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
+ error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
+ error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
+ error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
+ error->ngtier = 6;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index dac0f8c4c1cf..58910f1dc67c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -58,7 +58,7 @@ struct i915_gpu_state {
u32 eir;
u32 pgtbl_er;
u32 ier;
- u32 gtier[4], ngtier;
+ u32 gtier[6], ngtier;
u32 ccid;
u32 derrmr;
u32 forcewake;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f9bc3aaa90d0..2fd92a886789 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2640,7 +2640,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (IS_CNL_WITH_PORT_F(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 11)
tmp_mask |= CNL_AUX_CHANNEL_F;
if (iir & tmp_mask) {
@@ -3920,7 +3921,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
de_port_masked |= CNL_AUX_CHANNEL_F;
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 66ea3552c63e..49fcc4679db6 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -130,9 +130,6 @@ i915_param_named_unsafe(invert_brightness, int, 0600,
i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)");
-i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
- "Enable command parsing (true=enabled [default], false=disabled)");
-
i915_param_named(mmio_debug, int, 0600,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6684025b7af8..aebe0469ddaa 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -58,7 +58,6 @@ struct drm_printer;
param(unsigned int, inject_load_failure, 0) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
- param(bool, enable_cmd_parser, true) \
param(bool, enable_hangcheck, true) \
param(bool, fastboot, false) \
param(bool, prefault_disable, false) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 4364922e935d..97a91e6af7e3 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -340,7 +340,6 @@ static const struct intel_device_info intel_valleyview_info = {
GEN(7),
.is_lp = 1,
.num_pipes = 2,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_gmch_display = 1,
@@ -433,7 +432,6 @@ static const struct intel_device_info intel_cherryview_info = {
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
.has_rc6 = 1,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 019bd2d073ad..a6c8d61add0c 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -737,12 +737,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
continue;
}
- /*
- * XXX: Just keep the lower 21 bits for now since I'm not
- * entirely sure if the HW touches any of the higher bits in
- * this field
- */
- ctx_id = report32[2] & 0x1fffff;
+ ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
/*
* Squash whatever is in the CTX_ID field if it's marked as
@@ -1203,6 +1198,33 @@ static int i915_oa_read(struct i915_perf_stream *stream,
return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
}
+static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx)
+{
+ struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_context *ce;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(&i915->drm);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * As the ID is the gtt offset of the context's vma we
+ * pin the vma to ensure the ID remains fixed.
+ *
+ * NB: implied RCS engine...
+ */
+ ce = intel_context_pin(ctx, engine);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ce))
+ return ce;
+
+ i915->perf.oa.pinned_ctx = ce;
+
+ return ce;
+}
+
/**
* oa_get_render_ctx_id - determine and hold ctx hw id
* @stream: An i915-perf stream opened for OA metrics
@@ -1215,40 +1237,76 @@ static int i915_oa_read(struct i915_perf_stream *stream,
*/
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct drm_i915_private *i915 = stream->dev_priv;
+ struct intel_context *ce;
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
- } else {
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
- struct intel_ring *ring;
- int ret;
-
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
+ ce = oa_pin_context(i915, stream->ctx);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+ switch (INTEL_GEN(i915)) {
+ case 7: {
/*
- * As the ID is the gtt offset of the context's vma we
- * pin the vma to ensure the ID remains fixed.
- *
- * NB: implied RCS engine...
+ * On Haswell we don't do any post processing of the reports
+ * and don't need to use the mask.
*/
- ring = intel_context_pin(stream->ctx, engine);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (IS_ERR(ring))
- return PTR_ERR(ring);
+ i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
+ i915->perf.oa.specific_ctx_id_mask = 0;
+ break;
+ }
+ case 8:
+ case 9:
+ case 10:
+ if (USES_GUC_SUBMISSION(i915)) {
+ /*
+ * When using GuC, the context descriptor we write in
+ * i915 is read by GuC and rewritten before it's
+ * actually written into the hardware. The LRCA is
+ * what is put into the context id field of the
+ * context descriptor by GuC. Because it's aligned to
+ * a page, the lower 12bits are always at 0 and
+ * dropped by GuC. They won't be part of the context
+ * ID in the OA reports, so squash those lower bits.
+ */
+ i915->perf.oa.specific_ctx_id =
+ lower_32_bits(ce->lrc_desc) >> 12;
- /*
- * Explicitly track the ID (instead of calling
- * i915_ggtt_offset() on the fly) considering the difference
- * with gen8+ and execlists
- */
- dev_priv->perf.oa.specific_ctx_id =
- i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
+ /*
+ * GuC uses the top bit to signal proxy submission, so
+ * ignore that bit.
+ */
+ i915->perf.oa.specific_ctx_id_mask =
+ (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
+ } else {
+ i915->perf.oa.specific_ctx_id = stream->ctx->hw_id;
+ i915->perf.oa.specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ }
+ break;
+
+ case 11: {
+ struct intel_engine_cs *engine = i915->engine[RCS];
+
+ i915->perf.oa.specific_ctx_id =
+ stream->ctx->hw_id << (GEN11_SW_CTX_ID_SHIFT - 32) |
+ engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
+ engine->class << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+ i915->perf.oa.specific_ctx_id_mask =
+ ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
+ ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
+ ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ break;
+ }
+
+ default:
+ MISSING_CASE(INTEL_GEN(i915));
}
+ DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+ i915->perf.oa.specific_ctx_id,
+ i915->perf.oa.specific_ctx_id_mask);
+
return 0;
}
@@ -1262,17 +1320,15 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_context *ce;
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- } else {
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+ dev_priv->perf.oa.specific_ctx_id_mask = 0;
+ ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+ if (ce) {
mutex_lock(&dev_priv->drm.struct_mutex);
-
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- intel_context_unpin(stream->ctx, engine);
-
+ intel_context_unpin(ce);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index dc87797db500..c39541ed2219 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -127,6 +127,7 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
{
if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
i915->pmu.timer_enabled = true;
+ i915->pmu.timer_last = ktime_get();
hrtimer_start_range_ns(&i915->pmu.timer,
ns_to_ktime(PERIOD), 0,
HRTIMER_MODE_REL_PINNED);
@@ -155,12 +156,13 @@ static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
}
static void
-update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+add_sample(struct i915_pmu_sample *sample, u32 val)
{
- sample->cur += mul_u32_u32(val, unit);
+ sample->cur += val;
}
-static void engines_sample(struct drm_i915_private *dev_priv)
+static void
+engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -182,8 +184,9 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = !i915_seqno_passed(current_seqno, last_seqno);
- update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
- PERIOD, val);
+ if (val)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+ period_ns);
if (val && (engine->pmu.enable &
(BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
@@ -194,11 +197,13 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = 0;
}
- update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
- PERIOD, !!(val & RING_WAIT));
+ if (val & RING_WAIT)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+ period_ns);
- update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
- PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+ if (val & RING_WAIT_SEMAPHORE)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+ period_ns);
}
if (fw)
@@ -207,7 +212,14 @@ static void engines_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
-static void frequency_sample(struct drm_i915_private *dev_priv)
+static void
+add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
+{
+ sample->cur += mul_u32_u32(val, mul);
+}
+
+static void
+frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
@@ -221,15 +233,17 @@ static void frequency_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
- update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
- 1, intel_gpu_freq(dev_priv, val));
+ add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+ intel_gpu_freq(dev_priv, val),
+ period_ns / 1000);
}
if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
- update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq));
+ add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
+ intel_gpu_freq(dev_priv,
+ dev_priv->gt_pm.rps.cur_freq),
+ period_ns / 1000);
}
}
@@ -237,14 +251,27 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
+ unsigned int period_ns;
+ ktime_t now;
if (!READ_ONCE(i915->pmu.timer_enabled))
return HRTIMER_NORESTART;
- engines_sample(i915);
- frequency_sample(i915);
+ now = ktime_get();
+ period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
+ i915->pmu.timer_last = now;
+
+ /*
+ * Strictly speaking the passed in period may not be 100% accurate for
+ * all internal calculation, since some amount of time can be spent on
+ * grabbing the forcewake. However the potential error from timer call-
+ * back delay greatly dominates this so we keep it simple.
+ */
+ engines_sample(i915, period_ns);
+ frequency_sample(i915, period_ns);
+
+ hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
- hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
return HRTIMER_RESTART;
}
@@ -519,12 +546,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
case I915_PMU_ACTUAL_FREQUENCY:
val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
- FREQUENCY);
+ USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_REQUESTED_FREQUENCY:
val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
- FREQUENCY);
+ USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
val = count_interrupts(i915);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 2ba735299f7c..7f164ca3db12 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -65,6 +65,14 @@ struct i915_pmu {
* event types.
*/
u64 enable;
+
+ /**
+ * @timer_last:
+ *
+ * Timestmap of the previous timer invocation.
+ */
+ ktime_t timer_last;
+
/**
* @enable_count: Reference counts for the enabled events.
*
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 195203f298df..55bde4a02289 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -54,6 +54,7 @@ enum vgt_g2v_type {
*/
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3)
+#define VGT_CAPS_HUGE_GTT BIT(4)
struct vgt_if {
u64 magic; /* VGT_MAGIC */
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3ace929dd90f..3f502eef2431 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -4,6 +4,8 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/nospec.h>
+
#include "i915_drv.h"
#include "i915_query.h"
#include <uapi/drm/i915_drm.h>
@@ -100,7 +102,7 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < args->num_items; i++, user_item_ptr++) {
struct drm_i915_query_item item;
- u64 func_idx;
+ unsigned long func_idx;
int ret;
if (copy_from_user(&item, user_item_ptr, sizeof(item)))
@@ -109,12 +111,17 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (item.query_id == 0)
return -EINVAL;
+ if (overflows_type(item.query_id - 1, unsigned long))
+ return -EINVAL;
+
func_idx = item.query_id - 1;
- if (func_idx < ARRAY_SIZE(i915_query_funcs))
+ ret = -EINVAL;
+ if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
+ func_idx = array_index_nospec(func_idx,
+ ARRAY_SIZE(i915_query_funcs));
ret = i915_query_funcs[func_idx](dev_priv, &item);
- else
- ret = -EINVAL;
+ }
/* Only write the length back to userspace if they differ. */
if (ret != item.length && put_user(ret, &user_item_ptr->length))
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f11bb213ec07..f0317bde3aab 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1990,6 +1990,11 @@ enum i915_power_well_id {
_ICL_PORT_COMP_DW10_A, \
_ICL_PORT_COMP_DW10_B)
+/* ICL PHY DFLEX registers */
+#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n)))
+#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n)))
+
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
@@ -2306,8 +2311,9 @@ enum i915_power_well_id {
#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
-#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
-#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24)
+#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
+#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
#if 0
#define PRB0_TAIL _MMIO(0x2030)
@@ -2663,6 +2669,9 @@ enum i915_power_well_id {
#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
+#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
+#define FLOAT_BLEND_OPTIMIZATION_ENABLE (1 << 4)
+
#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0)
#define GEN6_BLITTER_LOCK_SHIFT 16
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
@@ -2709,6 +2718,10 @@ enum i915_power_well_id {
#define GEN10_F2_SS_DIS_SHIFT 18
#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
+#define GEN10_L3BANK_PAIR_COUNT 4
+#define GEN10_L3BANK_MASK 0x0F
+
#define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24
@@ -4088,10 +4101,10 @@ enum {
#define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */
#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
-#define EDP_PSR2_TP2_TIME_500 (0<<8)
-#define EDP_PSR2_TP2_TIME_100 (1<<8)
-#define EDP_PSR2_TP2_TIME_2500 (2<<8)
-#define EDP_PSR2_TP2_TIME_50 (3<<8)
+#define EDP_PSR2_TP2_TIME_500us (0<<8)
+#define EDP_PSR2_TP2_TIME_100us (1<<8)
+#define EDP_PSR2_TP2_TIME_2500us (2<<8)
+#define EDP_PSR2_TP2_TIME_50us (3<<8)
#define EDP_PSR2_TP2_TIME_MASK (3<<8)
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
@@ -4133,11 +4146,12 @@ enum {
#define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0
-#define ADPA_PIPE_SELECT_MASK (1<<30)
-#define ADPA_PIPE_A_SELECT 0
-#define ADPA_PIPE_B_SELECT (1<<30)
-#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
-/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_PIPE_SEL_SHIFT 30
+#define ADPA_PIPE_SEL_MASK (1<<30)
+#define ADPA_PIPE_SEL(pipe) ((pipe) << 30)
+#define ADPA_PIPE_SEL_SHIFT_CPT 29
+#define ADPA_PIPE_SEL_MASK_CPT (3<<29)
+#define ADPA_PIPE_SEL_CPT(pipe) ((pipe) << 29)
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
@@ -4296,9 +4310,9 @@ enum {
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_PIPE_SEL_SHIFT 30
#define SDVO_PIPE_SEL_MASK (1 << 30)
-#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
/*
@@ -4338,12 +4352,14 @@ enum {
#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
/* Gen 6 (CPT) SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+#define SDVO_PIPE_SEL_SHIFT_CPT 29
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* CHV SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+#define SDVO_PIPE_SEL_SHIFT_CHV 24
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
/* DVO port control */
@@ -4354,7 +4370,9 @@ enum {
#define _DVOC 0x61160
#define DVOC _MMIO(_DVOC)
#define DVO_ENABLE (1 << 31)
-#define DVO_PIPE_B_SELECT (1 << 30)
+#define DVO_PIPE_SEL_SHIFT 30
+#define DVO_PIPE_SEL_MASK (1 << 30)
+#define DVO_PIPE_SEL(pipe) ((pipe) << 30)
#define DVO_PIPE_STALL_UNUSED (0 << 28)
#define DVO_PIPE_STALL (1 << 28)
#define DVO_PIPE_STALL_TV (2 << 28)
@@ -4391,9 +4409,12 @@ enum {
*/
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
-#define LVDS_PIPEB_SELECT (1 << 30)
-#define LVDS_PIPE_MASK (1 << 30)
-#define LVDS_PIPE(pipe) ((pipe) << 30)
+#define LVDS_PIPE_SEL_SHIFT 30
+#define LVDS_PIPE_SEL_MASK (1 << 30)
+#define LVDS_PIPE_SEL(pipe) ((pipe) << 30)
+#define LVDS_PIPE_SEL_SHIFT_CPT 29
+#define LVDS_PIPE_SEL_MASK_CPT (3 << 29)
+#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -4690,7 +4711,9 @@ enum {
/* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31)
/* Sources the TV encoder input from pipe B instead of A. */
-# define TV_ENC_PIPEB_SELECT (1 << 30)
+# define TV_ENC_PIPE_SEL_SHIFT 30
+# define TV_ENC_PIPE_SEL_MASK (1 << 30)
+# define TV_ENC_PIPE_SEL(pipe) ((pipe) << 30)
/* Outputs composite video (DAC A only) */
# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
/* Outputs SVideo video (DAC B/C) */
@@ -5172,10 +5195,15 @@ enum {
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
#define DP_PORT_EN (1 << 31)
-#define DP_PIPEB_SELECT (1 << 30)
-#define DP_PIPE_MASK (1 << 30)
-#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16)
-#define DP_PIPE_MASK_CHV (3 << 16)
+#define DP_PIPE_SEL_SHIFT 30
+#define DP_PIPE_SEL_MASK (1 << 30)
+#define DP_PIPE_SEL(pipe) ((pipe) << 30)
+#define DP_PIPE_SEL_SHIFT_IVB 29
+#define DP_PIPE_SEL_MASK_IVB (3 << 29)
+#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define DP_PIPE_SEL_SHIFT_CHV 16
+#define DP_PIPE_SEL_MASK_CHV (3 << 16)
+#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16)
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -5896,7 +5924,6 @@ enum {
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_SHIFT 28
#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
-#define CURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
@@ -5905,18 +5932,21 @@ enum {
#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
/* New style CUR*CNTR flags */
-#define CURSOR_MODE 0x27
-#define CURSOR_MODE_DISABLE 0x00
-#define CURSOR_MODE_128_32B_AX 0x02
-#define CURSOR_MODE_256_32B_AX 0x03
-#define CURSOR_MODE_64_32B_AX 0x07
-#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
-#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
-#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_MODE 0x27
+#define MCURSOR_MODE_DISABLE 0x00
+#define MCURSOR_MODE_128_32B_AX 0x02
+#define MCURSOR_MODE_256_32B_AX 0x03
+#define MCURSOR_MODE_64_32B_AX 0x07
+#define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX)
+#define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX)
+#define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX)
+#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28)
+#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define CURSOR_ROTATE_180 (1<<15)
-#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
+#define MCURSOR_PIPE_CSC_ENABLE (1<<24)
+#define MCURSOR_ROTATE_180 (1<<15)
+#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
@@ -6764,6 +6794,10 @@ enum {
#define _PS_VPHASE_1B 0x68988
#define _PS_VPHASE_2B 0x68A88
#define _PS_VPHASE_1C 0x69188
+#define PS_Y_PHASE(x) ((x) << 16)
+#define PS_UV_RGB_PHASE(x) ((x) << 0)
+#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
+#define PS_PHASE_TRIP (1 << 0)
#define _PS_HPHASE_1A 0x68194
#define _PS_HPHASE_2A 0x68294
@@ -7192,13 +7226,17 @@ enum {
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
-# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
-# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
-#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
-# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13)
-# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
-# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
-# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
+ #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1 << 10) | (1 << 26))
+ #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
+
+#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+ #define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
+ #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
+ #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
+ #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+
+#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
+ #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
#define HIZ_CHICKEN _MMIO(0x7018)
# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
@@ -7208,6 +7246,7 @@ enum {
#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -7862,27 +7901,14 @@ enum {
#define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
/* CPT */
-#define PORT_TRANS_A_SEL_CPT 0
-#define PORT_TRANS_B_SEL_CPT (1<<29)
-#define PORT_TRANS_C_SEL_CPT (2<<29)
-#define PORT_TRANS_SEL_MASK (3<<29)
-#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
-#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
-#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
-#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
-#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
-
#define _TRANS_DP_CTL_A 0xe0300
#define _TRANS_DP_CTL_B 0xe1300
#define _TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
-#define TRANS_DP_PORT_SEL_B (0<<29)
-#define TRANS_DP_PORT_SEL_C (1<<29)
-#define TRANS_DP_PORT_SEL_D (2<<29)
-#define TRANS_DP_PORT_SEL_NONE (3<<29)
-#define TRANS_DP_PORT_SEL_MASK (3<<29)
-#define TRANS_DP_PIPE_TO_PORT(val) ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
+#define TRANS_DP_PORT_SEL_MASK (3 << 29)
+#define TRANS_DP_PORT_SEL_NONE (3 << 29)
+#define TRANS_DP_PORT_SEL(port) (((port) - PORT_B) << 29)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_ENH_FRAMING (1<<18)
#define TRANS_DP_8BPC (0<<9)
@@ -8322,8 +8348,9 @@ enum {
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
-#define DOP_CLOCK_GATING_DISABLE (1<<0)
-#define PUSH_CONSTANT_DEREF_DISABLE (1<<8)
+#define DOP_CLOCK_GATING_DISABLE (1 << 0)
+#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
+#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -9100,13 +9127,16 @@ enum skl_power_gate {
#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
+#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
+#define DPLL_CFGCR1_KDIV_SHIFT (6)
#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
#define DPLL_CFGCR1_KDIV_1 (1 << 6)
#define DPLL_CFGCR1_KDIV_2 (2 << 6)
#define DPLL_CFGCR1_KDIV_4 (4 << 6)
#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
+#define DPLL_CFGCR1_PDIV_SHIFT (2)
#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
#define DPLL_CFGCR1_PDIV_2 (1 << 2)
#define DPLL_CFGCR1_PDIV_3 (2 << 2)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8928894dd9c7..f187250e60c6 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -320,6 +320,7 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset.
*/
+ GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
tail = READ_ONCE(request->tail);
list_del(&ring->active_link);
} else {
@@ -383,8 +384,8 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
* the subsequent request.
*/
if (engine->last_retired_context)
- intel_context_unpin(engine->last_retired_context, engine);
- engine->last_retired_context = rq->ctx;
+ intel_context_unpin(engine->last_retired_context);
+ engine->last_retired_context = rq->hw_context;
}
static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -455,8 +456,8 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request);
/* Retirement decays the ban score as it is a sign of ctx progress */
- atomic_dec_if_positive(&request->ctx->ban_score);
- intel_context_unpin(request->ctx, request->engine);
+ atomic_dec_if_positive(&request->gem_context->ban_score);
+ intel_context_unpin(request->hw_context);
__retire_engine_upto(request->engine, request);
@@ -657,7 +658,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
- struct intel_ring *ring;
+ struct intel_context *ce;
int ret;
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -681,22 +682,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ring = intel_context_pin(ctx, engine);
- if (IS_ERR(ring))
- return ERR_CAST(ring);
- GEM_BUG_ON(!ring);
+ ce = intel_context_pin(ctx, engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
ret = reserve_gt(i915);
if (ret)
goto err_unpin;
- ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
+ ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
if (ret)
goto err_unreserve;
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ring->request_list) &&
+ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
+ if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
i915_request_completed(rq))
i915_request_retire(rq);
@@ -760,9 +760,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
INIT_LIST_HEAD(&rq->active_list);
rq->i915 = i915;
rq->engine = engine;
- rq->ctx = ctx;
- rq->ring = ring;
- rq->timeline = ring->timeline;
+ rq->gem_context = ctx;
+ rq->hw_context = ce;
+ rq->ring = ce->ring;
+ rq->timeline = ce->ring->timeline;
GEM_BUG_ON(rq->timeline == &engine->timeline);
spin_lock_init(&rq->lock);
@@ -814,14 +815,14 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
goto err_unwind;
/* Keep a second pin for the dual retirement along engine and ring */
- __intel_context_pin(rq->ctx, engine);
+ __intel_context_pin(ce);
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
return rq;
err_unwind:
- rq->ring->emit = rq->head;
+ ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
GEM_BUG_ON(!list_empty(&rq->active_list));
@@ -832,7 +833,7 @@ err_unwind:
err_unreserve:
unreserve_gt(i915);
err_unpin:
- intel_context_unpin(ctx, engine);
+ intel_context_unpin(ce);
return ERR_PTR(ret);
}
@@ -1018,8 +1019,8 @@ i915_request_await_object(struct i915_request *to,
void __i915_request_add(struct i915_request *request, bool flush_caches)
{
struct intel_engine_cs *engine = request->engine;
- struct intel_ring *ring = request->ring;
struct i915_timeline *timeline = request->timeline;
+ struct intel_ring *ring = request->ring;
struct i915_request *prev;
u32 *cs;
int err;
@@ -1095,8 +1096,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
i915_gem_active_set(&timeline->last_request, request);
list_add_tail(&request->ring_link, &ring->request_list);
- if (list_is_first(&request->ring_link, &ring->request_list))
+ if (list_is_first(&request->ring_link, &ring->request_list)) {
+ GEM_TRACE("marking %s as active\n", ring->timeline->name);
list_add(&ring->active_link, &request->i915->gt.active_rings);
+ }
request->emitted_jiffies = jiffies;
/*
@@ -1113,7 +1116,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
- engine->schedule(request, &request->ctx->sched);
+ engine->schedule(request, &request->gem_context->sched);
rcu_read_unlock();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index eddbd4245cb3..491ff81d0fea 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -93,8 +93,9 @@ struct i915_request {
* i915_request_free() will then decrement the refcount on the
* context.
*/
- struct i915_gem_context *ctx;
+ struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
+ struct intel_context *hw_context;
struct intel_ring *ring;
struct i915_timeline *timeline;
struct intel_signal_node signaling;
@@ -266,6 +267,7 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8cc3a256f29d..1472f48ab2e8 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -591,21 +591,26 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, sync_from)
- __field(u32, sync_to)
+ __field(u32, from_class)
+ __field(u32, from_instance)
+ __field(u32, to_class)
+ __field(u32, to_instance)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = from->i915->drm.primary->index;
- __entry->sync_from = from->engine->id;
- __entry->sync_to = to->engine->id;
+ __entry->from_class = from->engine->uabi_class;
+ __entry->from_instance = from->engine->instance;
+ __entry->to_class = to->engine->uabi_class;
+ __entry->to_instance = to->engine->instance;
__entry->seqno = from->global_seqno;
),
- TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+ TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
__entry->dev,
- __entry->sync_from, __entry->sync_to,
+ __entry->from_class, __entry->from_instance,
+ __entry->to_class, __entry->to_instance,
__entry->seqno)
);
@@ -616,24 +621,27 @@ TRACE_EVENT(i915_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, flags)
),
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->flags)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->flags)
);
DECLARE_EVENT_CLASS(i915_request,
@@ -643,24 +651,27 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global)
),
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->global)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -686,8 +697,9 @@ TRACE_EVENT(i915_request_in,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, port)
@@ -696,8 +708,9 @@ TRACE_EVENT(i915_request_in,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno;
@@ -705,10 +718,10 @@ TRACE_EVENT(i915_request_in,
__entry->port = port;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, prio=%u, global=%u, port=%u",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->prio, __entry->global_seqno,
- __entry->port)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->prio, __entry->global_seqno, __entry->port)
);
TRACE_EVENT(i915_request_out,
@@ -718,8 +731,9 @@ TRACE_EVENT(i915_request_out,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, completed)
@@ -727,17 +741,18 @@ TRACE_EVENT(i915_request_out,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno;
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, completed?=%u",
- __entry->dev, __entry->hw_id, __entry->ring,
- __entry->ctx, __entry->seqno,
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
__entry->global_seqno, __entry->completed)
);
@@ -771,21 +786,23 @@ TRACE_EVENT(intel_engine_notify,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, ring)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(bool, waiters)
),
TP_fast_assign(
__entry->dev = engine->i915->drm.primary->index;
- __entry->ring = engine->id;
+ __entry->class = engine->uabi_class;
+ __entry->instance = engine->instance;
__entry->seqno = intel_engine_get_seqno(engine);
__entry->waiters = waiters;
),
- TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
- __entry->dev, __entry->ring, __entry->seqno,
- __entry->waiters)
+ TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->seqno, __entry->waiters)
);
DEFINE_EVENT(i915_request, i915_request_retire,
@@ -800,8 +817,9 @@ TRACE_EVENT(i915_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global)
__field(unsigned int, flags)
@@ -815,18 +833,20 @@ TRACE_EVENT(i915_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global,
- !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+ __entry->flags)
);
DEFINE_EVENT(i915_request, i915_request_wait_end,
@@ -936,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context,
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
__entry->hw_id = ctx->hw_id;
- __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+ __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
@@ -966,21 +986,24 @@ TRACE_EVENT(switch_mm,
TP_ARGS(engine, to),
TP_STRUCT__entry(
- __field(u32, ring)
+ __field(u16, class)
+ __field(u16, instance)
__field(struct i915_gem_context *, to)
__field(struct i915_address_space *, vm)
__field(u32, dev)
),
TP_fast_assign(
- __entry->ring = engine->id;
+ __entry->class = engine->uabi_class;
+ __entry->instance = engine->instance;
__entry->to = to;
- __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
+ __entry->vm = to->ppgtt ? &to->ppgtt->vm : NULL;
__entry->dev = engine->i915->drm.primary->index;
),
- TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
- __entry->dev, __entry->ring, __entry->to, __entry->vm)
+ TP_printk("dev=%u, engine=%u:%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->class, __entry->instance, __entry->to,
+ __entry->vm)
);
#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 5fe9f3f39467..869cf4a3b6de 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
node->start + node->size,
node->size / 1024);
- ggtt->base.reserved -= node->size;
+ ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node);
}
@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
- ret = i915_gem_gtt_reserve(&ggtt->base, node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
if (!ret)
- ggtt->base.reserved += size;
+ ggtt->vm.reserved += size;
return ret;
}
@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long ggtt_end = ggtt->base.total;
+ unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index bb8338450dc1..551acc390046 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -36,6 +36,12 @@ intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
}
+static inline bool
+intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 9324d476e0a7..912f16ffe7ee 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -85,7 +85,7 @@ vma_create(struct drm_i915_gem_object *obj,
int i;
/* The aliasing_ppgtt should never be used directly! */
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
@@ -459,6 +459,18 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
return true;
}
+static void assert_bind_count(const struct drm_i915_gem_object *obj)
+{
+ /*
+ * Combine the assertion that the object is bound and that we have
+ * pinned its pages. But we should never have bound the object
+ * more than we have pinned its pages. (For complete accuracy, we
+ * assume that no else is pinning the pages, but as a rough assertion
+ * that we will not run into problems later, this will do!)
+ */
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
/**
* i915_vma_insert - finds a slot for the vma in its address space
* @vma: the vma
@@ -595,7 +607,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
obj->bind_count++;
spin_unlock(&dev_priv->mm.obj_lock);
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ assert_bind_count(obj);
return 0;
@@ -633,7 +645,7 @@ i915_vma_remove(struct i915_vma *vma)
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ assert_bind_count(obj);
}
int __i915_vma_do_pin(struct i915_vma *vma,
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 40285d1b91b7..61ddb5871d8a 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -124,6 +124,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
if (new_conn_state->force_audio != old_conn_state->force_audio ||
new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
+ new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
crtc_state->mode_changed = true;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 6d068786eb41..e8bf4cc499e1 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -120,12 +120,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
&crtc_state->base.adjusted_mode;
int ret;
- /*
- * Both crtc and plane->crtc could be NULL if we're updating a
- * property while the plane is disabled. We don't actually have
- * anything driver-specific we need to test in that case, so
- * just return success.
- */
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
@@ -209,12 +203,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
- /*
- * Both crtc and plane->crtc could be NULL if we're updating a
- * property while the plane is disabled. We don't actually have
- * anything driver-specific we need to test in that case, so
- * just return success.
- */
if (!crtc)
return 0;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 54270bdde100..465dff4780fe 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -267,8 +267,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data_ptrs)
return;
- dev_priv->vbt.lvds_vbt = 1;
-
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
panel_type);
@@ -518,8 +516,31 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (!driver)
return;
- if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
- dev_priv->vbt.edp.support = 1;
+ if (INTEL_GEN(dev_priv) >= 5) {
+ /*
+ * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
+ * to mean "eDP". The VBT spec doesn't agree with that
+ * interpretation, but real world VBTs seem to.
+ */
+ if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
+ dev_priv->vbt.int_lvds_support = 0;
+ } else {
+ /*
+ * FIXME it's not clear which BDB version has the LVDS config
+ * bits defined. Revision history in the VBT spec says:
+ * "0.92 | Add two definitions for VBT value of LVDS Active
+ * Config (00b and 11b values defined) | 06/13/2005"
+ * but does not the specify the BDB version.
+ *
+ * So far version 134 (on i945gm) is the oldest VBT observed
+ * in the wild with the bits correctly populated. Version
+ * 108 (on i85x) does not have the bits correctly populated.
+ */
+ if (bdb->version >= 134 &&
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
+ dev_priv->vbt.int_lvds_support = 0;
+ }
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
/*
@@ -542,11 +563,8 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
- if (!edp) {
- if (dev_priv->vbt.edp.support)
- DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+ if (!edp)
return;
- }
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
@@ -688,8 +706,52 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
break;
}
- dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
- dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
+ /*
+ * New psr options 0=500us, 1=100us, 2=2500us, 3=0us
+ * Old decimal value is wake up time in multiples of 100 us.
+ */
+ if (bdb->version >= 209 && IS_GEN9_BC(dev_priv)) {
+ switch (psr_table->tp1_wakeup_time) {
+ case 0:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
+ break;
+ case 1:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
+ break;
+ case 3:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp1_wakeup_time);
+ /* fallthrough */
+ case 2:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
+ break;
+ }
+
+ switch (psr_table->tp2_tp3_wakeup_time) {
+ case 0:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+ break;
+ case 1:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+ break;
+ case 3:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp2_tp3_wakeup_time);
+ /* fallthrough */
+ case 2:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+ break;
+ }
+ } else {
+ dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+ }
}
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@@ -1197,18 +1259,37 @@ static const u8 cnp_ddc_pin_map[] = {
[DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
};
+static const u8 icp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+ [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+ [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+ [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
- if (HAS_PCH_CNP(dev_priv)) {
- if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
- return cnp_ddc_pin_map[vbt_pin];
- } else {
- DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
- return 0;
- }
+ const u8 *ddc_pin_map;
+ int n_entries;
+
+ if (HAS_PCH_ICP(dev_priv)) {
+ ddc_pin_map = icp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(icp_ddc_pin_map);
+ } else if (HAS_PCH_CNP(dev_priv)) {
+ ddc_pin_map = cnp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
+ } else {
+ /* Assuming direct map */
+ return vbt_pin;
}
- return vbt_pin;
+ if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
+ return ddc_pin_map[vbt_pin];
+
+ DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+ vbt_pin);
+ return 0;
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
@@ -1504,7 +1585,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* LFP panel data */
dev_priv->vbt.lvds_dither = 1;
- dev_priv->vbt.lvds_vbt = 0;
/* SDVO panel data */
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
@@ -1513,6 +1593,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.int_tv_support = 1;
dev_priv->vbt.int_crt_support = 1;
+ /* driver features */
+ dev_priv->vbt.int_lvds_support = 1;
+
/* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1;
/*
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 18e643df523e..86a987b8ac66 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -846,8 +846,9 @@ static void cancel_fake_irq(struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
- spin_lock_irq(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
/*
* Leave the fake_irq timer enabled (if it is running), but clear the
@@ -871,7 +872,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
*/
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
- spin_unlock_irq(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index de0e22322c76..211d601cd1b1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -63,33 +63,35 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(adpa_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
+ else
+ *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
+
+ return val & ADPA_DAC_ENABLE;
+}
+
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(crt->adpa_reg);
-
- if (!(tmp & ADPA_DAC_ENABLE))
- goto out;
+ ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -168,11 +170,9 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
if (HAS_PCH_LPT(dev_priv))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev_priv))
- adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
- else if (crtc->pipe == 0)
- adpa |= ADPA_PIPE_A_SELECT;
+ adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
else
- adpa |= ADPA_PIPE_B_SELECT;
+ adpa |= ADPA_PIPE_SEL(crtc->pipe);
if (!HAS_PCH_SPLIT(dev_priv))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f4a8598a2d39..b344e0fe08fd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1243,35 +1243,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret;
}
-/* Finds the only possible encoder associated with the given CRTC. */
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_encoder *ret = NULL;
- struct drm_atomic_state *state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int num_encoders = 0;
- int i;
-
- state = crtc_state->base.state;
-
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- ret = to_intel_encoder(connector_state->best_encoder);
- num_encoders++;
- }
-
- WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
- pipe_name(crtc->pipe));
-
- BUG_ON(ret == NULL);
- return ret;
-}
-
#define LC_FREQ 2700
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
@@ -1374,8 +1345,13 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
- cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+ if (INTEL_GEN(dev_priv) >= 11) {
+ cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+ } else {
+ cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+ }
p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
@@ -1451,6 +1427,30 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
+static void icl_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ int link_clock = 0;
+ uint32_t pll_id;
+
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ if (port == PORT_A || port == PORT_B) {
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ else
+ link_clock = icl_calc_dp_combo_pll_link(dev_priv,
+ pll_id);
+ } else {
+ /* FIXME - Add for MG PLL */
+ WARN(1, "MG PLL clock_get code not implemented yet\n");
+ }
+
+ pipe_config->port_clock = link_clock;
+ ddi_dotclock_get(pipe_config);
+}
+
static void cnl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -1644,6 +1644,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
bxt_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
+ else if (IS_ICELAKE(dev_priv))
+ icl_ddi_clock_get(encoder, pipe_config);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -2115,6 +2117,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
DP_TRAIN_VOLTAGE_SWING_MASK;
}
+/*
+ * We assume that the full set of pre-emphasis values can be
+ * used on all DDI platforms. Should that change we need to
+ * rethink this code.
+ */
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ default:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
+ }
+}
+
static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dee3a8e659f1..2c16c3a3cdea 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1022,7 +1022,7 @@ bool intel_crtc_active(struct intel_crtc *crtc)
* We can ditch the adjusted_mode.crtc_clock check as soon
* as Haswell has gained clock readout/fastboot support.
*
- * We can ditch the crtc->primary->fb check as soon as we can
+ * We can ditch the crtc->primary->state->fb check as soon as we can
* properly reconstruct framebuffers.
*
* FIXME: The intel_crtc->active here should be switched to
@@ -1202,7 +1202,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
i915_reg_t pp_reg;
u32 val;
- enum pipe panel_pipe = PIPE_A;
+ enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
if (WARN_ON(HAS_DDI(dev_priv)))
@@ -1214,18 +1214,35 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
pp_reg = PP_CONTROL(0);
port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
- if (port_sel == PANEL_PORT_SELECT_LVDS &&
- I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
- panel_pipe = PIPE_B;
- /* XXX: else fix for eDP */
+ switch (port_sel) {
+ case PANEL_PORT_SELECT_LVDS:
+ intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPA:
+ intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPC:
+ intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPD:
+ intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+ break;
+ default:
+ MISSING_CASE(port_sel);
+ break;
+ }
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = PP_CONTROL(pipe);
panel_pipe = pipe;
} else {
+ u32 port_sel;
+
pp_reg = PP_CONTROL(0);
- if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
- panel_pipe = PIPE_B;
+ port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+ WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+ intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
val = I915_READ(pp_reg);
@@ -1267,7 +1284,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
static void assert_plane(struct intel_plane *plane, bool state)
{
- bool cur_state = plane->get_hw_state(plane);
+ enum pipe pipe;
+ bool cur_state;
+
+ cur_state = plane->get_hw_state(plane, &pipe);
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
@@ -1305,125 +1325,64 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 port_sel, u32 val)
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t dp_reg)
{
- if ((val & DP_PORT_EN) == 0)
- return false;
+ enum pipe port_pipe;
+ bool state;
- if (HAS_PCH_CPT(dev_priv)) {
- u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
- if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
- return false;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
- return false;
- } else {
- if ((val & DP_PIPE_MASK) != (pipe << 30))
- return false;
- }
- return true;
-}
+ state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
-static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & SDVO_ENABLE) == 0)
- return false;
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH DP %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
- return false;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
- return false;
- } else {
- if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
- return false;
- }
- return true;
-}
-
-static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & LVDS_PORT_EN) == 0)
- return false;
-
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
- return false;
- }
- return true;
-}
-
-static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & ADPA_DAC_ENABLE) == 0)
- return false;
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
- return false;
- }
- return true;
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH DP %c still using transcoder B\n",
+ port_name(port));
}
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, i915_reg_t reg,
- u32 port_sel)
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t hdmi_reg)
{
- u32 val = I915_READ(reg);
- I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
- "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
- i915_mmio_reg_offset(reg), pipe_name(pipe));
+ enum pipe port_pipe;
+ bool state;
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
- && (val & DP_PIPEB_SELECT),
- "IBX PCH dp port still using transcoder B\n");
-}
+ state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, i915_reg_t reg)
-{
- u32 val = I915_READ(reg);
- I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
- "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
- i915_mmio_reg_offset(reg), pipe_name(pipe));
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
- && (val & SDVO_PIPE_B_SELECT),
- "IBX PCH hdmi port still using transcoder B\n");
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH HDMI %c still using transcoder B\n",
+ port_name(port));
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- u32 val;
+ enum pipe port_pipe;
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
- val = I915_READ(PCH_ADPA);
- I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
- "PCH VGA enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
+ I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
- val = I915_READ(PCH_LVDS);
- I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
- "PCH LVDS enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
+ I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
}
static void _vlv_enable_pll(struct intel_crtc *crtc,
@@ -2521,6 +2480,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 gtt_offset_rotated = 0;
unsigned int max_size = 0;
int i, num_planes = fb->format->num_planes;
@@ -2585,7 +2545,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* fb layout agrees with the fence layout. We already check that the
* fb stride matches the fence stride elsewhere.
*/
- if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) &&
+ if (i == 0 && i915_gem_object_is_tiled(obj) &&
(x + width) * cpp > fb->pitches[i]) {
DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
i, fb->offsets[i]);
@@ -2670,9 +2630,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
max_size = max(max_size, offset + size);
}
- if (max_size * tile_size > intel_fb->obj->base.size) {
+ if (max_size * tile_size > obj->base.size) {
DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
- max_size * tile_size, intel_fb->obj->base.size);
+ max_size * tile_size, obj->base.size);
return -EINVAL;
}
@@ -2922,9 +2882,8 @@ valid_fb:
if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true;
- drm_framebuffer_get(fb);
- primary->fb = primary->state->fb = fb;
- primary->crtc = primary->state->crtc = &intel_crtc->base;
+ plane_state->fb = fb;
+ plane_state->crtc = &intel_crtc->base;
intel_set_plane_visible(to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state),
@@ -3430,24 +3389,33 @@ static void i9xx_disable_plane(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane)
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- enum pipe pipe = plane->pipe;
bool ret;
+ u32 val;
/*
* Not 100% correct for planes that can move between pipes,
* but that's only the case for gen2-4 which don't have any
* display power wells.
*/
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE;
+ val = I915_READ(DSPCNTR(i9xx_plane));
+
+ ret = val & DISPLAY_PLANE_ENABLE;
+
+ if (INTEL_GEN(dev_priv) >= 5)
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
intel_display_power_put(dev_priv, power_domain);
@@ -4631,20 +4599,33 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
}
}
-/* Return which DP Port should be selected for Transcoder DP control */
-static enum port
-intel_trans_dp_port_sel(struct intel_crtc *crtc)
+/*
+ * Finds the encoder associated with the given CRTC. This can only be
+ * used when we know that the CRTC isn't feeding multiple encoders!
+ */
+static struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_connector_state *connector_state;
+ const struct drm_connector *connector;
+ struct intel_encoder *encoder = NULL;
+ int num_encoders = 0;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
+ continue;
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_EDP)
- return encoder->port;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ num_encoders++;
}
- return -1;
+ WARN(num_encoders != 1, "%d encoders for pipe %c\n",
+ num_encoders, pipe_name(crtc->pipe));
+
+ return encoder;
}
/*
@@ -4655,7 +4636,8 @@ intel_trans_dp_port_sel(struct intel_crtc *crtc)
* - DP transcoding bits
* - transcoder
*/
-static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
+static void ironlake_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
@@ -4714,6 +4696,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
&crtc_state->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
+ enum port port;
+
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
@@ -4726,19 +4710,9 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
- switch (intel_trans_dp_port_sel(crtc)) {
- case PORT_B:
- temp |= TRANS_DP_PORT_SEL_B;
- break;
- case PORT_C:
- temp |= TRANS_DP_PORT_SEL_C;
- break;
- case PORT_D:
- temp |= TRANS_DP_PORT_SEL_D;
- break;
- default:
- BUG();
- }
+ port = intel_get_crtc_new_encoder(state, crtc_state)->port;
+ WARN_ON(port < PORT_B || port > PORT_D);
+ temp |= TRANS_DP_PORT_SEL(port);
I915_WRITE(reg, temp);
}
@@ -4746,7 +4720,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
ironlake_enable_pch_transcoder(dev_priv, pipe);
}
-static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
+static void lpt_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -4776,6 +4751,39 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
+/*
+ * The hardware phase 0.0 refers to the center of the pixel.
+ * We want to start from the top/left edge which is phase
+ * -0.5. That matches how the hardware calculates the scaling
+ * factors (from top-left of the first pixel to bottom-right
+ * of the last pixel, as opposed to the pixel centers).
+ *
+ * For 4:2:0 subsampled chroma planes we obviously have to
+ * adjust that so that the chroma sample position lands in
+ * the right spot.
+ *
+ * Note that for packed YCbCr 4:2:2 formats there is no way to
+ * control chroma siting. The hardware simply replicates the
+ * chroma samples for both of the luma samples, and thus we don't
+ * actually get the expected MPEG2 chroma siting convention :(
+ * The same behaviour is observed on pre-SKL platforms as well.
+ */
+u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
+{
+ int phase = -0x8000;
+ u16 trip = 0;
+
+ if (chroma_cosited)
+ phase += (sub - 1) * 0x8000 / sub;
+
+ if (phase < 0)
+ phase = 0x10000 + phase;
+ else
+ trip = PS_PHASE_TRIP;
+
+ return ((phase >> 2) & PS_PHASE_MASK) | trip;
+}
+
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
@@ -4975,14 +4983,22 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
&crtc->config->scaler_state;
if (crtc->config->pch_pfit.enabled) {
+ u16 uv_rgb_hphase, uv_rgb_vphase;
int id;
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
return;
+ uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+ I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
}
@@ -5501,10 +5517,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
*
* Spurious PCH underruns also occur during PCH enabling.
*/
- if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
@@ -5549,7 +5563,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
- ironlake_pch_enable(pipe_config);
+ ironlake_pch_enable(old_intel_state, pipe_config);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5559,9 +5573,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
if (HAS_PCH_CPT(dev_priv))
cpt_verify_modeset(dev, intel_crtc->pipe);
- /* Must wait for vblank to avoid spurious PCH FIFO underruns */
- if (intel_crtc->config->has_pch_encoder)
+ /*
+ * Must wait for vblank to avoid spurious PCH FIFO underruns.
+ * And a second vblank wait is needed at least on ILK with
+ * some interlaced HDMI modes. Let's do the double wait always
+ * in case there are more corner cases we don't know about.
+ */
+ if (intel_crtc->config->has_pch_encoder) {
intel_wait_for_vblank(dev_priv, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
+ }
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -5623,6 +5644,11 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (INTEL_GEN(dev_priv) >= 11)
icl_map_plls_to_ports(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(crtc, pipe_config, old_state);
+
+ if (!transcoder_is_dsi(cpu_transcoder))
+ intel_ddi_enable_pipe_clock(pipe_config);
+
if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -5651,11 +5677,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
-
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_pipe_clock(pipe_config);
-
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
intel_crtc->config->pch_pfit.enabled;
@@ -5688,7 +5709,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
- lpt_pch_enable(pipe_config);
+ lpt_pch_enable(old_intel_state, pipe_config);
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(pipe_config, true);
@@ -5741,10 +5762,8 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
* pipe is already disabled, but FDI RX/TX is still enabled.
* Happens at least with VGA+HDMI cloning. Suppress them.
*/
- if (intel_crtc->config->has_pch_encoder) {
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- }
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -5849,6 +5868,22 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (IS_ICELAKE(dev_priv))
+ return port >= PORT_C && port <= PORT_F;
+
+ return false;
+}
+
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (!intel_port_is_tc(dev_priv, port))
+ return PORT_TC_NONE;
+
+ return port - PORT_C;
+}
+
enum intel_display_power_domain intel_port_to_power_domain(enum port port)
{
switch (port) {
@@ -7675,16 +7710,18 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe;
u32 val, base, offset;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- if (!plane->get_hw_state(plane))
+ if (!plane->get_hw_state(plane, &pipe))
return;
+ WARN_ON(pipe != crtc->pipe);
+
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -8705,16 +8742,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum plane_id plane_id = plane->id;
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe;
u32 val, base, offset, stride_mult, tiling, alpha;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- if (!plane->get_hw_state(plane))
+ if (!plane->get_hw_state(plane, &pipe))
return;
+ WARN_ON(pipe != crtc->pipe);
+
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -9142,9 +9181,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->base.state);
+
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
struct intel_encoder *encoder =
- intel_ddi_get_crtc_new_encoder(crtc_state);
+ intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
@@ -9692,7 +9734,8 @@ static void i845_disable_cursor(struct intel_plane *plane,
i845_update_cursor(plane, NULL, NULL);
}
-static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
@@ -9704,6 +9747,8 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane)
ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+ *pipe = PIPE_A;
+
intel_display_power_put(dev_priv, power_domain);
return ret;
@@ -9715,25 +9760,30 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- u32 cntl;
+ u32 cntl = 0;
- cntl = MCURSOR_GAMMA_ENABLE;
+ if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
- if (HAS_DDI(dev_priv))
- cntl |= CURSOR_PIPE_CSC_ENABLE;
+ if (INTEL_GEN(dev_priv) <= 10) {
+ cntl |= MCURSOR_GAMMA_ENABLE;
+
+ if (HAS_DDI(dev_priv))
+ cntl |= MCURSOR_PIPE_CSC_ENABLE;
+ }
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
switch (plane_state->base.crtc_w) {
case 64:
- cntl |= CURSOR_MODE_64_ARGB_AX;
+ cntl |= MCURSOR_MODE_64_ARGB_AX;
break;
case 128:
- cntl |= CURSOR_MODE_128_ARGB_AX;
+ cntl |= MCURSOR_MODE_128_ARGB_AX;
break;
case 256:
- cntl |= CURSOR_MODE_256_ARGB_AX;
+ cntl |= MCURSOR_MODE_256_ARGB_AX;
break;
default:
MISSING_CASE(plane_state->base.crtc_w);
@@ -9741,7 +9791,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
}
if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
- cntl |= CURSOR_ROTATE_180;
+ cntl |= MCURSOR_ROTATE_180;
return cntl;
}
@@ -9903,23 +9953,32 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
i9xx_update_cursor(plane, NULL, NULL);
}
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
+ u32 val;
/*
* Not 100% correct for planes that can move between pipes,
* but that's only the case for gen2-3 which don't have any
* display power wells.
*/
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+ val = I915_READ(CURCNTR(plane->pipe));
+
+ ret = val & MCURSOR_MODE;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+ MCURSOR_PIPE_SELECT_SHIFT;
intel_display_power_put(dev_priv, power_domain);
@@ -13181,8 +13240,17 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
@@ -13195,8 +13263,17 @@ static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool i965_mod_supported(uint32_t format, uint64_t modifier)
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
@@ -13211,8 +13288,26 @@ static bool i965_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (!plane->has_ccs)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -13244,38 +13339,36 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
-
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
- modifier != DRM_FORMAT_MOD_LINEAR)
- return false;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_mod_supported(format, modifier);
- else if (INTEL_GEN(dev_priv) >= 4)
- return i965_mod_supported(format, modifier);
- else
- return i8xx_mod_supported(format, modifier);
+ return modifier == DRM_FORMAT_MOD_LINEAR &&
+ format == DRM_FORMAT_ARGB8888;
}
-static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
-{
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
+static struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
+};
- return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
-}
+static struct drm_plane_funcs i965_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i965_plane_format_mod_supported,
+};
-static struct drm_plane_funcs intel_plane_funcs = {
+static struct drm_plane_funcs i8xx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@@ -13283,7 +13376,7 @@ static struct drm_plane_funcs intel_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_primary_plane_format_mod_supported,
+ .format_mod_supported = i8xx_plane_format_mod_supported,
};
static int
@@ -13408,7 +13501,7 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_cursor_plane_format_mod_supported,
+ .format_mod_supported = intel_cursor_format_mod_supported,
};
static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
@@ -13466,6 +13559,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
+ const struct drm_plane_funcs *plane_funcs;
const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
unsigned int num_formats;
@@ -13521,6 +13615,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->check_plane = intel_check_primary_plane;
if (INTEL_GEN(dev_priv) >= 9) {
+ primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+ PLANE_PRIMARY);
+
if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
intel_primary_formats = skl_pri_planar_formats;
num_formats = ARRAY_SIZE(skl_pri_planar_formats);
@@ -13529,7 +13626,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(skl_primary_formats);
}
- if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
+ if (primary->has_ccs)
modifiers = skl_format_modifiers_ccs;
else
modifiers = skl_format_modifiers_noccs;
@@ -13537,6 +13634,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
primary->get_hw_state = skl_plane_get_hw_state;
+
+ plane_funcs = &skl_plane_funcs;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13545,6 +13644,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
+
+ plane_funcs = &i965_plane_funcs;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13553,25 +13654,27 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
+
+ plane_funcs = &i8xx_plane_funcs;
}
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
@@ -14124,14 +14227,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
drm_framebuffer_cleanup(fb);
- i915_gem_object_lock(intel_fb->obj);
- WARN_ON(!intel_fb->obj->framebuffer_references--);
- i915_gem_object_unlock(intel_fb->obj);
+ i915_gem_object_lock(obj);
+ WARN_ON(!obj->framebuffer_references--);
+ i915_gem_object_unlock(obj);
- i915_gem_object_put(intel_fb->obj);
+ i915_gem_object_put(obj);
kfree(intel_fb);
}
@@ -14140,8 +14244,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int *handle)
{
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
if (obj->userptr.mm) {
DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
@@ -14411,9 +14514,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
i, fb->pitches[i], stride_alignment);
goto err;
}
- }
- intel_fb->obj = obj;
+ fb->obj[i] = &obj->base;
+ }
ret = intel_fill_fb_info(dev_priv, fb);
if (ret)
@@ -15095,8 +15198,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
- WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
I915_WRITE(PIPECONF(pipe), 0);
POSTING_READ(PIPECONF(pipe));
@@ -15110,12 +15213,12 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 val = I915_READ(DSPCNTR(i9xx_plane));
+ enum pipe pipe;
+
+ if (!plane->get_hw_state(plane, &pipe))
+ return true;
- return (val & DISPLAY_PLANE_ENABLE) == 0 ||
- (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+ return pipe == crtc->pipe;
}
static void
@@ -15274,6 +15377,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
+
+ /* notify opregion of the sanitized encoder state */
+ intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
}
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15314,7 +15420,10 @@ static void readout_plane_state(struct intel_crtc *crtc)
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- bool visible = plane->get_hw_state(plane);
+ enum pipe pipe;
+ bool visible;
+
+ visible = plane->get_hw_state(plane, &pipe);
intel_set_plane_visible(crtc_state, plane_state, visible);
}
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 2ef31617614a..c88185ed7594 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -126,6 +126,17 @@ enum port {
#define port_name(p) ((p) + 'A')
+enum tc_port {
+ PORT_TC_NONE = -1,
+
+ PORT_TC1 = 0,
+ PORT_TC2,
+ PORT_TC3,
+ PORT_TC4,
+
+ I915_MAX_TC_PORTS
+};
+
enum dpio_channel {
DPIO_CH0,
DPIO_CH1
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8320f0e8e3be..37b9f62aeb6e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -56,7 +56,7 @@ struct dp_link_dpll {
struct dpll dpll;
};
-static const struct dp_link_dpll gen4_dpll[] = {
+static const struct dp_link_dpll g4x_dpll[] = {
{ 162000,
{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
{ 270000,
@@ -513,7 +513,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
uint32_t DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+ "skipping pipe %c power sequencer kick due to port %c being active\n",
pipe_name(pipe), port_name(intel_dig_port->base.port)))
return;
@@ -529,9 +529,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
DP |= DP_LINK_TRAIN_PAT_1;
if (IS_CHERRYVIEW(dev_priv))
- DP |= DP_PIPE_SELECT_CHV(pipe);
- else if (pipe == PIPE_B)
- DP |= DP_PIPEB_SELECT;
+ DP |= DP_PIPE_SEL_CHV(pipe);
+ else
+ DP |= DP_PIPE_SEL(pipe);
pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
@@ -554,7 +554,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
/*
* Similar magic as in intel_dp_enable_port().
* We _must_ do this port enable + disable trick
- * to make this power seqeuencer lock onto the port.
+ * to make this power sequencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
I915_WRITE(intel_dp->output_reg, DP);
@@ -1550,8 +1550,8 @@ intel_dp_set_clock(struct intel_encoder *encoder,
int i, count = 0;
if (IS_G4X(dev_priv)) {
- divisor = gen4_dpll;
- count = ARRAY_SIZE(gen4_dpll);
+ divisor = g4x_dpll;
+ count = ARRAY_SIZE(g4x_dpll);
} else if (HAS_PCH_SPLIT(dev_priv)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
@@ -1964,7 +1964,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
- if (IS_GEN7(dev_priv) && port == PORT_A) {
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1974,7 +1974,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- intel_dp->DP |= crtc->pipe << 29;
+ intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp;
@@ -2000,9 +2000,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
if (IS_CHERRYVIEW(dev_priv))
- intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
- else if (crtc->pipe == PIPE_B)
- intel_dp->DP |= DP_PIPEB_SELECT;
+ intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
+ else
+ intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
}
}
@@ -2624,52 +2624,66 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
}
+static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+ enum port port, enum pipe *pipe)
+{
+ enum pipe p;
+
+ for_each_pipe(dev_priv, p) {
+ u32 val = I915_READ(TRANS_DP_CTL(p));
+
+ if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
+ *pipe = p;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
+
+ /* must initialize pipe to something for the asserts */
+ *pipe = PIPE_A;
+
+ return false;
+}
+
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe)
+{
+ bool ret;
+ u32 val;
+
+ val = I915_READ(dp_reg);
+
+ ret = val & DP_PORT_EN;
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
+ else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+ ret &= cpt_dp_port_selected(dev_priv, port, pipe);
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ else
+ *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+
+ return ret;
+}
+
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = encoder->port;
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(intel_dp->output_reg);
-
- if (!(tmp & DP_PORT_EN))
- goto out;
-
- if (IS_GEN7(dev_priv) && port == PORT_A) {
- *pipe = PORT_TO_PIPE_CPT(tmp);
- } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- enum pipe p;
-
- for_each_pipe(dev_priv, p) {
- u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
- if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
- *pipe = p;
- ret = true;
-
- goto out;
- }
- }
-
- DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
- i915_mmio_reg_offset(intel_dp->output_reg));
- } else if (IS_CHERRYVIEW(dev_priv)) {
- *pipe = DP_PORT_TO_PIPE_CHV(tmp);
- } else {
- *pipe = PORT_TO_PIPE(tmp);
- }
+ ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, pipe);
- ret = true;
-
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -2883,7 +2897,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
I915_WRITE(DP_TP_CTL(port), temp);
- } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
@@ -3041,11 +3055,11 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
edp_panel_vdd_off_sync(intel_dp);
/*
- * VLV seems to get confused when multiple power seqeuencers
+ * VLV seems to get confused when multiple power sequencers
* have the same port selected (even if only one has power/vdd
* enabled). The failure manifests as vlv_wait_port_ready() failing
* CHV on the other hand doesn't seem to mind having the same port
- * selected in multiple power seqeuencers, but let's clear the
+ * selected in multiple power sequencers, but let's clear the
* port select always when logically disconnecting a power sequencer
* from a port.
*/
@@ -3195,14 +3209,14 @@ uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->base.port;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum port port = encoder->port;
- if (INTEL_GEN(dev_priv) >= 9) {
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ if (HAS_DDI(dev_priv))
return intel_ddi_dp_voltage_max(encoder);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (IS_GEN7(dev_priv) && port == PORT_A)
+ else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
@@ -3214,33 +3228,11 @@ uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->base.port;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum port port = encoder->port;
- if (INTEL_GEN(dev_priv) >= 9) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
+ if (HAS_DDI(dev_priv)) {
+ return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
@@ -3253,7 +3245,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_GEN7(dev_priv) && port == PORT_A) {
+ } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3448,7 +3440,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
}
static uint32_t
-gen4_signal_levels(uint8_t train_set)
+g4x_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -3485,9 +3477,9 @@ gen4_signal_levels(uint8_t train_set)
return signal_levels;
}
-/* Gen6's DP voltage swing and pre-emphasis control */
+/* SNB CPU eDP voltage swing and pre-emphasis control */
static uint32_t
-gen6_edp_signal_levels(uint8_t train_set)
+snb_cpu_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3513,9 +3505,9 @@ gen6_edp_signal_levels(uint8_t train_set)
}
}
-/* Gen7's DP voltage swing and pre-emphasis control */
+/* IVB CPU eDP voltage swing and pre-emphasis control */
static uint32_t
-gen7_edp_signal_levels(uint8_t train_set)
+ivb_cpu_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3562,14 +3554,14 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
signal_levels = chv_signal_levels(intel_dp);
} else if (IS_VALLEYVIEW(dev_priv)) {
signal_levels = vlv_signal_levels(intel_dp);
- } else if (IS_GEN7(dev_priv) && port == PORT_A) {
- signal_levels = gen7_edp_signal_levels(train_set);
+ } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+ signal_levels = ivb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev_priv) && port == PORT_A) {
- signal_levels = gen6_edp_signal_levels(train_set);
+ signal_levels = snb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
- signal_levels = gen4_signal_levels(train_set);
+ signal_levels = g4x_signal_levels(train_set);
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
}
@@ -3652,7 +3644,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
- if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
@@ -3681,8 +3673,9 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
/* always enable with pattern 1 (as per spec) */
- DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
- DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
+ DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
+ DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
+ DP_LINK_TRAIN_PAT_1;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
@@ -3737,8 +3730,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
- intel_psr_init_dpcd(intel_dp);
-
/*
* Read the eDP display control registers.
*
@@ -3754,6 +3745,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
+ /*
+ * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+ * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
+ */
+ intel_psr_init_dpcd(intel_dp);
+
/* Read the eDP 1.4+ supported link rates. */
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -5317,14 +5314,14 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum pipe pipe;
- if ((intel_dp->DP & DP_PORT_EN) == 0)
- return INVALID_PIPE;
+ if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, &pipe))
+ return pipe;
- if (IS_CHERRYVIEW(dev_priv))
- return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
- else
- return PORT_TO_PIPE(intel_dp->DP);
+ return INVALID_PIPE;
}
void intel_dp_encoder_reset(struct drm_encoder *encoder)
@@ -5673,7 +5670,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
/*
* On some VLV machines the BIOS can leave the VDD
- * enabled even on power seqeuencers which aren't
+ * enabled even on power sequencers which aren't
* hooked up to any port. This would mess up the
* power domain tracking the first time we pick
* one of these power sequencers for use since
@@ -5681,7 +5678,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
* already on and therefore wouldn't grab the power
* domain reference. Disable VDD first to avoid this.
* This also avoids spuriously turning the VDD on as
- * soon as the new power seqeuencer gets initialized.
+ * soon as the new power sequencer gets initialized.
*/
if (force_disable_vdd) {
u32 pp = ironlake_get_pp_control(intel_dp);
@@ -5719,10 +5716,20 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- if (port == PORT_A)
+ switch (port) {
+ case PORT_A:
port_sel = PANEL_PORT_SELECT_DPA;
- else
+ break;
+ case PORT_C:
+ port_sel = PANEL_PORT_SELECT_DPC;
+ break;
+ case PORT_D:
port_sel = PANEL_PORT_SELECT_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ break;
+ }
}
pp_on |= port_sel;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 383fbc15113d..07bdbf2582ba 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2525,6 +2525,76 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
return true;
}
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+ uint32_t pll_id)
+{
+ uint32_t cfgcr0, cfgcr1;
+ uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
+ const struct skl_wrpll_params *params;
+ int index, n_entries, link_clock;
+
+ /* Read back values from DPLL CFGCR registers */
+ cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+
+ dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
+ dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT;
+ pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
+ kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
+ qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
+ DPLL_CFGCR1_QDIV_MODE_SHIFT;
+ qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+
+ params = dev_priv->cdclk.hw.ref == 24000 ?
+ icl_dp_combo_pll_24MHz_values :
+ icl_dp_combo_pll_19_2MHz_values;
+ n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
+
+ for (index = 0; index < n_entries; index++) {
+ if (dco_integer == params[index].dco_integer &&
+ dco_fraction == params[index].dco_fraction &&
+ pdiv == params[index].pdiv &&
+ kdiv == params[index].kdiv &&
+ qdiv_mode == params[index].qdiv_mode &&
+ qdiv_ratio == params[index].qdiv_ratio)
+ break;
+ }
+
+ /* Map PLL Index to Link Clock */
+ switch (index) {
+ default:
+ MISSING_CASE(index);
+ case 0:
+ link_clock = 540000;
+ break;
+ case 1:
+ link_clock = 270000;
+ break;
+ case 2:
+ link_clock = 162000;
+ break;
+ case 3:
+ link_clock = 324000;
+ break;
+ case 4:
+ link_clock = 216000;
+ break;
+ case 5:
+ link_clock = 432000;
+ break;
+ case 6:
+ link_clock = 648000;
+ break;
+ case 7:
+ link_clock = 810000;
+ break;
+ }
+
+ return link_clock;
+}
+
static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
{
return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 7a0cd564a9ee..78915057d2e6 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -336,5 +336,7 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+ uint32_t pll_id);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0361130500a6..8840108749a5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -194,7 +194,6 @@ enum intel_output_type {
struct intel_framebuffer {
struct drm_framebuffer base;
- struct drm_i915_gem_object *obj;
struct intel_rotation_info rot_info;
/* for each plane in the normal GTT view */
@@ -953,6 +952,7 @@ struct intel_plane {
enum pipe pipe;
bool can_scale;
bool has_fbc;
+ bool has_ccs;
int max_downscale;
uint32_t frontbuffer_bit;
@@ -971,7 +971,7 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc);
- bool (*get_hw_state)(struct intel_plane *plane);
+ bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
@@ -1004,7 +1004,7 @@ struct cxsr_latency {
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
-#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
+#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi {
i915_reg_t hdmi_reg;
@@ -1376,6 +1376,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe);
void intel_crt_init(struct drm_i915_private *dev_priv);
void intel_crt_reset(struct drm_encoder *encoder);
@@ -1392,8 +1394,6 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -1407,6 +1407,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
+ u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_map_plls_to_ports(struct drm_crtc *crtc,
@@ -1488,6 +1490,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+ enum port port);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
@@ -1615,6 +1620,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
+u16 skl_scaler_calc_phase(int sub, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
uint32_t pixel_format);
@@ -1644,6 +1650,9 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe);
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1821,6 +1830,8 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
/* intel_lvds.c */
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe);
void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
@@ -1911,8 +1922,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
@@ -2058,6 +2067,8 @@ void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
/* intel_sdvo.c */
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe);
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
i915_reg_t reg, enum port port);
@@ -2076,7 +2087,7 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane);
+bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
bool intel_format_is_yuv(uint32_t format);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index cf39ca90d887..cf1231f9c33b 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -1665,16 +1665,16 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
- enum i9xx_plane_id plane;
+ enum i9xx_plane_id i9xx_plane;
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (connector->encoder->crtc_mask == BIT(PIPE_B))
- plane = PLANE_B;
+ i9xx_plane = PLANE_B;
else
- plane = PLANE_A;
+ i9xx_plane = PLANE_A;
- val = I915_READ(DSPCNTR(plane));
+ val = I915_READ(DSPCNTR(i9xx_plane));
if (val & DISPPLANE_ROTATE_180)
orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a70d767313aa..7b942b6c1700 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -137,19 +137,15 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
- if (!(tmp & DVO_ENABLE))
- return false;
-
- *pipe = PORT_TO_PIPE(tmp);
+ *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
- return true;
+ return tmp & DVO_ENABLE;
}
static void intel_dvo_get_config(struct intel_encoder *encoder,
@@ -276,8 +272,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
- if (pipe == 1)
- dvo_val |= DVO_PIPE_B_SELECT;
+ dvo_val |= DVO_PIPE_SEL(pipe);
dvo_val |= DVO_PIPE_STALL;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 1590375f31cb..2ec2e60dc670 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -515,7 +515,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
return PTR_ERR(obj);
}
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
@@ -585,7 +585,7 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -645,6 +645,12 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
return 0;
}
+static void __intel_context_unpin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_context_unpin(to_intel_context(ctx, engine));
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -658,7 +664,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
*/
int intel_engine_init_common(struct intel_engine_cs *engine)
{
- struct intel_ring *ring;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_context *ce;
int ret;
engine->set_default_submission(engine);
@@ -670,18 +677,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
- ring = intel_context_pin(engine->i915->kernel_context, engine);
- if (IS_ERR(ring))
- return PTR_ERR(ring);
+ ce = intel_context_pin(i915->kernel_context, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
/*
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
*/
- if (engine->i915->preempt_context) {
- ring = intel_context_pin(engine->i915->preempt_context, engine);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
+ if (i915->preempt_context) {
+ ce = intel_context_pin(i915->preempt_context, engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
goto err_unpin_kernel;
}
}
@@ -690,7 +697,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
goto err_unpin_preempt;
- if (HWS_NEEDS_PHYSICAL(engine->i915))
+ if (HWS_NEEDS_PHYSICAL(i915))
ret = init_phys_status_page(engine);
else
ret = init_status_page(engine);
@@ -702,10 +709,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
- if (engine->i915->preempt_context)
- intel_context_unpin(engine->i915->preempt_context, engine);
+ if (i915->preempt_context)
+ __intel_context_unpin(i915->preempt_context, engine);
+
err_unpin_kernel:
- intel_context_unpin(engine->i915->kernel_context, engine);
+ __intel_context_unpin(i915->kernel_context, engine);
return ret;
}
@@ -718,6 +726,8 @@ err_unpin_kernel:
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+
intel_engine_cleanup_scratch(engine);
if (HWS_NEEDS_PHYSICAL(engine->i915))
@@ -732,9 +742,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (engine->i915->preempt_context)
- intel_context_unpin(engine->i915->preempt_context, engine);
- intel_context_unpin(engine->i915->kernel_context, engine);
+ if (i915->preempt_context)
+ __intel_context_unpin(i915->preempt_context, engine);
+ __intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
}
@@ -769,6 +779,35 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
return bbaddr;
}
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ const u32 base = engine->mmio_base;
+ const i915_reg_t mode = RING_MI_MODE(base);
+ int err;
+
+ if (INTEL_GEN(dev_priv) < 3)
+ return -ENODEV;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+
+ err = 0;
+ if (__intel_wait_for_register_fw(dev_priv,
+ mode, MODE_IDLE, MODE_IDLE,
+ 1000, 0,
+ NULL)) {
+ GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+ err = -ETIMEDOUT;
+ }
+
+ /* A final mmio read to let GPU writes be hopefully flushed to memory */
+ POSTING_READ_FW(mode);
+
+ return err;
+}
+
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
switch (type) {
@@ -780,12 +819,32 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
}
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
+{
+ const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+ u32 mcr_s_ss_select;
+ u32 slice = fls(sseu->slice_mask);
+ u32 subslice = fls(sseu->subslice_mask[slice]);
+
+ if (INTEL_GEN(dev_priv) == 10)
+ mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
+ GEN8_MCR_SUBSLICE(subslice);
+ else if (INTEL_GEN(dev_priv) >= 11)
+ mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
+ GEN11_MCR_SUBSLICE(subslice);
+ else
+ mcr_s_ss_select = 0;
+
+ return mcr_s_ss_select;
+}
+
static inline uint32_t
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
uint32_t mcr_slice_subslice_mask;
uint32_t mcr_slice_subslice_select;
+ uint32_t default_mcr_s_ss_select;
uint32_t mcr;
uint32_t ret;
enum forcewake_domains fw_domains;
@@ -802,6 +861,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
GEN8_MCR_SUBSLICE(subslice);
}
+ default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
+
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -812,11 +873,10 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
- /*
- * The HW expects the slice and sublice selectors to be reset to 0
- * after reading out the registers.
- */
- WARN_ON_ONCE(mcr & mcr_slice_subslice_mask);
+
+ WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
+ default_mcr_s_ss_select);
+
mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
@@ -824,6 +884,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
ret = I915_READ_FW(reg);
mcr &= ~mcr_slice_subslice_mask;
+ mcr |= default_mcr_s_ss_select;
+
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
@@ -934,10 +996,19 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return true;
/* Waiting to drain ELSP? */
- if (READ_ONCE(engine->execlists.active))
- return false;
+ if (READ_ONCE(engine->execlists.active)) {
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ if (tasklet_trylock(&execlists->tasklet)) {
+ execlists->tasklet.func(execlists->tasklet.data);
+ tasklet_unlock(&execlists->tasklet);
+ }
- /* ELSP is empty, but there are ready requests? */
+ if (READ_ONCE(execlists->active))
+ return false;
+ }
+
+ /* ELSP is empty, but there are ready requests? E.g. after reset */
if (READ_ONCE(engine->execlists.first))
return false;
@@ -978,8 +1049,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
*/
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
{
- const struct i915_gem_context * const kernel_context =
- engine->i915->kernel_context;
+ const struct intel_context *kernel_context =
+ to_intel_context(engine->i915->kernel_context, engine);
struct i915_request *rq;
lockdep_assert_held(&engine->i915->drm.struct_mutex);
@@ -991,7 +1062,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
*/
rq = __i915_gem_active_peek(&engine->timeline.last_request);
if (rq)
- return rq->ctx == kernel_context;
+ return rq->hw_context == kernel_context;
else
return engine->last_retired_context == kernel_context;
}
@@ -1043,6 +1114,11 @@ void intel_engines_park(struct drm_i915_private *i915)
if (engine->park)
engine->park(engine);
+ if (engine->pinned_default_state) {
+ i915_gem_object_unpin_map(engine->default_state);
+ engine->pinned_default_state = NULL;
+ }
+
i915_gem_batch_pool_fini(&engine->batch_pool);
engine->execlists.no_priolist = false;
}
@@ -1060,6 +1136,16 @@ void intel_engines_unpark(struct drm_i915_private *i915)
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
+ void *map;
+
+ /* Pin the default state for fast resets from atomic context. */
+ map = NULL;
+ if (engine->default_state)
+ map = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (!IS_ERR_OR_NULL(map))
+ engine->pinned_default_state = map;
+
if (engine->unpark)
engine->unpark(engine);
@@ -1067,6 +1153,29 @@ void intel_engines_unpark(struct drm_i915_private *i915)
}
}
+/**
+ * intel_engine_lost_context: called when the GPU is reset into unknown state
+ * @engine: the engine
+ *
+ * We have either reset the GPU or otherwise about to lose state tracking of
+ * the current GPU logical state (e.g. suspend). On next use, it is therefore
+ * imperative that we make no presumptions about the current state and load
+ * from scratch.
+ */
+void intel_engine_lost_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+ engine->legacy_active_context = NULL;
+ engine->legacy_active_ppgtt = NULL;
+
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
+}
+
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
@@ -1296,6 +1405,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq, *last;
+ unsigned long flags;
struct rb_node *rb;
int count;
@@ -1362,7 +1472,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- spin_lock_irq(&engine->timeline.lock);
+ local_irq_save(flags);
+ spin_lock(&engine->timeline.lock);
last = NULL;
count = 0;
@@ -1404,16 +1515,17 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request(m, last, "\t\tQ ");
}
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock(&engine->timeline.lock);
- spin_lock_irq(&b->rb_lock);
+ spin_lock(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
drm_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock_irq(&b->rb_lock);
+ spin_unlock(&b->rb_lock);
+ local_irq_restore(flags);
drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
engine->irq_posted,
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index e9e02b58b7be..fb2f9fce34cd 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -47,7 +47,7 @@
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
- struct drm_i915_gem_object *obj = ifbdev->fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
unsigned int origin =
ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
@@ -193,7 +193,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
- if (!intel_fb || WARN_ON(!intel_fb->obj)) {
+ if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
@@ -265,7 +265,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (intel_fb->obj->stolen && !prealloc)
+ if (intel_fb_obj(fb)->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -792,7 +792,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there.
*/
- if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
+ if (state == FBINFO_STATE_RUNNING &&
+ intel_fb_obj(&ifbdev->fb->base)->stolen)
memset_io(info->screen_base, 0, info->screen_size);
drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 7fff0a0eceb4..c3379bde266f 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -153,8 +153,6 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
-
- intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 116f4ccf1bbd..29fd95c1306b 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -346,10 +346,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
ret = -EIO;
if (ret) {
- DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;"
- " ret=%d status=0x%08X response=0x%08X\n",
- action[0], ret, status,
- I915_READ(SOFT_SCRATCH(15)));
+ DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
+ action[0], ret, status);
goto out;
}
@@ -572,7 +570,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
if (IS_ERR(vma))
goto err;
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 2feb65096966..f3945258fe1b 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -513,8 +513,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
struct intel_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
- engine));
+ u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
spin_lock(&client->wq_lock);
@@ -537,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
*/
static void flush_ggtt_writes(struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
+ struct drm_i915_private *dev_priv = vma->vm->i915;
if (i915_vma_is_map_and_fenceable(vma))
POSTING_READ_FW(GUC_STATUS);
@@ -552,8 +551,8 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
- engine));
+ u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
+ engine)->lrc_desc);
u32 data[7];
/*
@@ -623,6 +622,21 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
}
+static void complete_preempt_context(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+ execlists_cancel_port_requests(execlists);
+ execlists_unwind_incomplete_requests(execlists);
+
+ wait_for_guc_preempt_report(engine);
+ intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+}
+
/**
* guc_submit() - Submit commands through GuC
* @engine: engine associated with the commands
@@ -710,7 +724,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
- if (last && rq->ctx != last->ctx) {
+ if (last && rq->hw_context != last->hw_context) {
if (port == last_port) {
__list_del_many(&p->requests,
&rq->sched.link);
@@ -793,20 +807,44 @@ static void guc_submission_tasklet(unsigned long data)
if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
- GUC_PREEMPT_FINISHED) {
- execlists_cancel_port_requests(&engine->execlists);
- execlists_unwind_incomplete_requests(execlists);
-
- wait_for_guc_preempt_report(engine);
-
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
- intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
- }
+ GUC_PREEMPT_FINISHED)
+ complete_preempt_context(engine);
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine);
}
+static struct i915_request *
+guc_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+
+ /*
+ * We're using worker to queue preemption requests from the tasklet in
+ * GuC submission mode.
+ * Even though tasklet was disabled, we may still have a worker queued.
+ * Let's make sure that all workers scheduled before disabling the
+ * tasklet are completed before continuing with the reset.
+ */
+ if (engine->i915->guc.preempt_wq)
+ flush_workqueue(engine->i915->guc.preempt_wq);
+
+ return i915_gem_find_active_request(engine);
+}
+
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
@@ -1119,7 +1157,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
WARN_ON(!guc_verify_doorbells(guc));
ret = guc_clients_create(guc);
if (ret)
- return ret;
+ goto err_pool;
for_each_engine(engine, dev_priv, id) {
guc->preempt_work[id].engine = engine;
@@ -1128,6 +1166,9 @@ int intel_guc_submission_init(struct intel_guc *guc)
return 0;
+err_pool:
+ guc_stage_desc_pool_destroy(guc);
+ return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1267,6 +1308,9 @@ int intel_guc_submission_enable(struct intel_guc *guc)
&engine->execlists;
execlists->tasklet.func = guc_submission_tasklet;
+
+ engine->reset.prepare = guc_reset_prepare;
+
engine->park = guc_submission_park;
engine->unpark = guc_submission_unpark;
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 2db5da550a1c..0cc6a861bcf8 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -429,7 +429,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
if (num_downstream == 0)
return -EINVAL;
- ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
if (!ksv_fifo)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ee929f31f7db..0ca4cc877520 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -461,7 +461,8 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
@@ -491,6 +492,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
intel_hdmi->rgb_quant_range_selectable,
is_hdmi2_sink);
+ drm_hdmi_avi_infoframe_content_type(&frame.avi,
+ conn_state);
+
/* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
}
@@ -586,7 +590,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -727,7 +731,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -770,7 +774,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -823,7 +827,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -856,7 +860,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -1161,33 +1165,16 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(intel_hdmi->hdmi_reg);
-
- if (!(tmp & SDVO_ENABLE))
- goto out;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else if (IS_CHERRYVIEW(dev_priv))
- *pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
+ ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -1421,8 +1408,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- temp &= ~SDVO_PIPE_B_SELECT;
- temp |= SDVO_ENABLE;
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
@@ -2065,6 +2052,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
+ drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 15434cad5430..091e28f0e024 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -164,7 +164,8 @@
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine,
+ struct intel_context *ce);
static void execlists_init_reg_state(u32 *reg_state,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
@@ -189,12 +190,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
!i915_request_completed(last));
}
-/**
- * intel_lr_context_descriptor_update() - calculate & cache the descriptor
- * descriptor for a pinned context
- * @ctx: Context to work on
- * @engine: Engine the descriptor will be used with
- *
+/*
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
* expensive to calculate, we'll just do it once and cache the result,
@@ -204,7 +200,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
*
* bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-52: ctx ID, a globally unique tag
+ * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
* bits 53-54: mbz, reserved for use by hardware
* bits 55-63: group ID, currently unused and set to 0
*
@@ -222,9 +218,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
*/
static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -237,6 +233,11 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
/* bits 12-31 */
GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
+ /*
+ * The following 32bits are copied into the OA reports (dword 2).
+ * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
+ * anything below.
+ */
if (INTEL_GEN(ctx->i915) >= 11) {
GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
@@ -418,9 +419,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct i915_request *rq)
{
- struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
+ struct intel_context *ce = rq->hw_context;
struct i915_hw_ppgtt *ppgtt =
- rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -430,7 +431,7 @@ static u64 execlists_update_context(struct i915_request *rq)
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
*/
- if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
+ if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state);
return ce->lrc_desc;
@@ -495,14 +496,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
-static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
+static bool ctx_single_port_submission(const struct intel_context *ce)
{
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- i915_gem_context_force_single_submission(ctx));
+ i915_gem_context_force_single_submission(ce->gem_context));
}
-static bool can_merge_ctx(const struct i915_gem_context *prev,
- const struct i915_gem_context *next)
+static bool can_merge_ctx(const struct intel_context *prev,
+ const struct intel_context *next)
{
if (prev != next)
return false;
@@ -552,8 +553,18 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
if (execlists->ctrl_reg)
writel(EL_CTRL_LOAD, execlists->ctrl_reg);
- execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
- execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
+ execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+}
+
+static void complete_preempt_context(struct intel_engine_execlists *execlists)
+{
+ GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+ execlists_cancel_port_requests(execlists);
+ execlists_unwind_incomplete_requests(execlists);
+
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
}
static bool __execlists_dequeue(struct intel_engine_cs *engine)
@@ -602,8 +613,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_USER));
GEM_BUG_ON(!port_count(&port[0]));
- if (port_count(&port[0]) > 1)
- return false;
/*
* If we write to ELSP a second time before the HW has had
@@ -671,7 +680,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* second request, and so we never need to tell the
* hardware about the first.
*/
- if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
+ if (last &&
+ !can_merge_ctx(rq->hw_context, last->hw_context)) {
/*
* If we are on the second port and cannot
* combine this request with the last, then we
@@ -690,14 +700,14 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* the same context (even though a different
* request) to the second port.
*/
- if (ctx_single_port_submission(last->ctx) ||
- ctx_single_port_submission(rq->ctx)) {
+ if (ctx_single_port_submission(last->hw_context) ||
+ ctx_single_port_submission(rq->hw_context)) {
__list_del_many(&p->requests,
&rq->sched.link);
goto done;
}
- GEM_BUG_ON(last->ctx == rq->ctx);
+ GEM_BUG_ON(last->hw_context == rq->hw_context);
if (submit)
port_assign(port, last);
@@ -947,34 +957,14 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
local_irq_restore(flags);
}
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
+static void process_csb(struct intel_engine_cs *engine)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
bool fw = false;
- /*
- * We can skip acquiring intel_runtime_pm_get() here as it was taken
- * on our behalf by the request (see i915_gem_mark_busy()) and it will
- * not be relinquished until the device is idle (see
- * i915_gem_idle_work_handler()). As a precaution, we make sure
- * that all ELSP are drained i.e. we have processed the CSB,
- * before allowing ourselves to idle and calling intel_runtime_pm_put().
- */
- GEM_BUG_ON(!dev_priv->gt.awake);
-
- /*
- * Prefer doing test_and_clear_bit() as a two stage operation to avoid
- * imposing the cost of a locked atomic transaction when submitting a
- * new request (outside of the context-switch interrupt).
- */
- while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
+ do {
/* The HWSP contains a (cacheable) mirror of the CSB */
const u32 *buf =
&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
@@ -982,28 +972,27 @@ static void execlists_submission_tasklet(unsigned long data)
if (unlikely(execlists->csb_use_mmio)) {
buf = (u32 * __force)
- (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
- execlists->csb_head = -1; /* force mmio read of CSB ptrs */
+ (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+ execlists->csb_head = -1; /* force mmio read of CSB */
}
/* Clear before reading to catch new interrupts */
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
smp_mb__after_atomic();
- if (unlikely(execlists->csb_head == -1)) { /* following a reset */
+ if (unlikely(execlists->csb_head == -1)) { /* after a reset */
if (!fw) {
- intel_uncore_forcewake_get(dev_priv,
- execlists->fw_domains);
+ intel_uncore_forcewake_get(i915, execlists->fw_domains);
fw = true;
}
- head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
+ head = readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
tail = GEN8_CSB_WRITE_PTR(head);
head = GEN8_CSB_READ_PTR(head);
execlists->csb_head = head;
} else {
const int write_idx =
- intel_hws_csb_write_index(dev_priv) -
+ intel_hws_csb_write_index(i915) -
I915_HWS_CSB_BUF0_INDEX;
head = execlists->csb_head;
@@ -1012,8 +1001,8 @@ static void execlists_submission_tasklet(unsigned long data)
}
GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
engine->name,
- head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
- tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
+ head, GEN8_CSB_READ_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
+ tail, GEN8_CSB_WRITE_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
while (head != tail) {
struct i915_request *rq;
@@ -1023,7 +1012,8 @@ static void execlists_submission_tasklet(unsigned long data)
if (++head == GEN8_CSB_ENTRIES)
head = 0;
- /* We are flying near dragons again.
+ /*
+ * We are flying near dragons again.
*
* We hold a reference to the request in execlist_port[]
* but no more than that. We are operating in softirq
@@ -1063,14 +1053,7 @@ static void execlists_submission_tasklet(unsigned long data)
if (status & GEN8_CTX_STATUS_COMPLETE &&
buf[2*head + 1] == execlists->preempt_complete_status) {
GEM_TRACE("%s preempt-idle\n", engine->name);
-
- execlists_cancel_port_requests(execlists);
- execlists_unwind_incomplete_requests(execlists);
-
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT));
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT);
+ complete_preempt_context(execlists);
continue;
}
@@ -1139,15 +1122,48 @@ static void execlists_submission_tasklet(unsigned long data)
if (head != execlists->csb_head) {
execlists->csb_head = head;
writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
- dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
+ i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
}
- }
+ } while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted));
- if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
- execlists_dequeue(engine);
+ if (unlikely(fw))
+ intel_uncore_forcewake_put(i915, execlists->fw_domains);
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
+ GEM_TRACE("%s awake?=%d, active=%x, irq-posted?=%d\n",
+ engine->name,
+ engine->i915->gt.awake,
+ engine->execlists.active,
+ test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted));
+
+ /*
+ * We can skip acquiring intel_runtime_pm_get() here as it was taken
+ * on our behalf by the request (see i915_gem_mark_busy()) and it will
+ * not be relinquished until the device is idle (see
+ * i915_gem_idle_work_handler()). As a precaution, we make sure
+ * that all ELSP are drained i.e. we have processed the CSB,
+ * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ */
+ GEM_BUG_ON(!engine->i915->gt.awake);
- if (fw)
- intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
+ /*
+ * Prefer doing test_and_clear_bit() as a two stage operation to avoid
+ * imposing the cost of a locked atomic transaction when submitting a
+ * new request (outside of the context-switch interrupt).
+ */
+ if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
+ process_csb(engine);
+
+ if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
+ execlists_dequeue(engine);
/* If the engine is now idle, so should be the flag; and vice versa. */
GEM_BUG_ON(execlists_is_active(&engine->execlists,
@@ -1322,6 +1338,26 @@ static void execlists_schedule(struct i915_request *request,
spin_unlock_irq(&engine->timeline.lock);
}
+static void execlists_context_destroy(struct intel_context *ce)
+{
+ GEM_BUG_ON(!ce->state);
+ GEM_BUG_ON(ce->pin_count);
+
+ intel_ring_free(ce->ring);
+ __i915_gem_object_release_unless_active(ce->state->obj);
+}
+
+static void execlists_context_unpin(struct intel_context *ce)
+{
+ intel_ring_unpin(ce->ring);
+
+ ce->state->obj->pin_global--;
+ i915_gem_object_unpin_map(ce->state->obj);
+ i915_vma_unpin(ce->state);
+
+ i915_gem_context_put(ce->gem_context);
+}
+
static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
{
unsigned int flags;
@@ -1345,21 +1381,15 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
}
-static struct intel_ring *
-execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_context *
+__execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
void *vaddr;
int ret;
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- goto out;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
- ret = execlists_context_deferred_alloc(ctx, engine);
+ ret = execlists_context_deferred_alloc(ctx, engine, ce);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
@@ -1378,7 +1408,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
if (ret)
goto unpin_map;
- intel_lr_context_descriptor_update(ctx, engine);
+ intel_lr_context_descriptor_update(ctx, engine, ce);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
@@ -1387,8 +1417,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
-out:
- return ce->ring;
+ return ce;
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
@@ -1399,33 +1428,33 @@ err:
return ERR_PTR(ret);
}
-static void execlists_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops execlists_context_ops = {
+ .unpin = execlists_context_unpin,
+ .destroy = execlists_context_destroy,
+};
+
+static struct intel_context *
+execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(ce->pin_count == 0);
- if (--ce->pin_count)
- return;
-
- intel_ring_unpin(ce->ring);
+ if (likely(ce->pin_count++))
+ return ce;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
- ce->state->obj->pin_global--;
- i915_gem_object_unpin_map(ce->state->obj);
- i915_vma_unpin(ce->state);
+ ce->ops = &execlists_context_ops;
- i915_gem_context_put(ctx);
+ return __execlists_context_pin(engine, ctx, ce);
}
static int execlists_request_alloc(struct i915_request *request)
{
- struct intel_context *ce =
- to_intel_context(request->ctx, request->engine);
int ret;
- GEM_BUG_ON(!ce->pin_count);
+ GEM_BUG_ON(!request->hw_context->pin_count);
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
@@ -1642,7 +1671,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1757,12 +1786,25 @@ static void enable_execlists(struct intel_engine_cs *engine)
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ I915_WRITE(RING_MI_MODE(engine->mmio_base),
+ _MASKED_BIT_DISABLE(STOP_RING));
+
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
engine->status_page.ggtt_offset);
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
- /* Following the reset, we need to reload the CSB read/write pointers */
- engine->execlists.csb_head = -1;
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ bool unexpected = false;
+
+ if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) {
+ DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
+ unexpected = true;
+ }
+
+ return unexpected;
}
static int gen8_init_common_ring(struct intel_engine_cs *engine)
@@ -1777,6 +1819,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
intel_engine_init_hangcheck(engine);
+ if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p, NULL);
+ }
+
enable_execlists(engine);
/* After a GPU reset, we may have requests to replay */
@@ -1823,8 +1871,69 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
return 0;
}
-static void reset_common_ring(struct intel_engine_cs *engine,
- struct i915_request *request)
+static struct i915_request *
+execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *request, *active;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+
+ /*
+ * We want to flush the pending context switches, having disabled
+ * the tasklet above, we can assume exclusive access to the execlists.
+ * For this allows us to catch up with an inflight preemption event,
+ * and avoid blaming an innocent request if the stall was due to the
+ * preemption itself.
+ */
+ if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
+ process_csb(engine);
+
+ /*
+ * The last active request can then be no later than the last request
+ * now in ELSP[0]. So search backwards from there, so that if the GPU
+ * has advanced beyond the last CSB update, it will be pardoned.
+ */
+ active = NULL;
+ request = port_request(execlists->port);
+ if (request) {
+ unsigned long flags;
+
+ /*
+ * Prevent the breadcrumb from advancing before we decide
+ * which request is currently active.
+ */
+ intel_engine_stop_cs(engine);
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ list_for_each_entry_from_reverse(request,
+ &engine->timeline.requests,
+ link) {
+ if (__i915_request_completed(request,
+ request->global_seqno))
+ break;
+
+ active = request;
+ }
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ }
+
+ return active;
+}
+
+static void execlists_reset(struct intel_engine_cs *engine,
+ struct i915_request *request)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
@@ -1854,6 +1963,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
__unwind_incomplete_requests(engine);
spin_unlock(&engine->timeline.lock);
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ engine->execlists.csb_head = -1;
+
local_irq_restore(flags);
/*
@@ -1878,20 +1990,14 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
- if (engine->default_state) {
- void *defaults;
-
- defaults = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (!IS_ERR(defaults)) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- defaults + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- i915_gem_object_unpin_map(engine->default_state);
- }
+ regs = request->hw_context->lrc_reg_state;
+ if (engine->pinned_default_state) {
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
}
- execlists_init_reg_state(regs, request->ctx, engine, request->ring);
+ execlists_init_reg_state(regs,
+ request->gem_context, engine, request->ring);
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
@@ -1904,9 +2010,25 @@ static void reset_common_ring(struct intel_engine_cs *engine,
unwind_wa_tail(request);
}
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+ /*
+ * Flush the tasklet while we still have the forcewake to be sure
+ * that it is not allowed to sleep before we restart and reload a
+ * context.
+ *
+ * As before (with execlists_reset_prepare) we rely on the caller
+ * serialising multiple attempts to reset so that we know that we
+ * are the only one manipulating tasklet state.
+ */
+ __tasklet_enable_sync_once(&engine->execlists.tasklet);
+
+ GEM_TRACE("%s\n", engine->name);
+}
+
static int intel_logical_ring_emit_pdps(struct i915_request *rq)
{
- struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
struct intel_engine_cs *engine = rq->engine;
const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
u32 *cs;
@@ -1945,15 +2067,15 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
- if (rq->ctx->ppgtt &&
- (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
- !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+ if (rq->gem_context->ppgtt &&
+ (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+ !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
!intel_vgpu_active(rq->i915)) {
ret = intel_logical_ring_emit_pdps(rq);
if (ret)
return ret;
- rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
+ rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
}
cs = intel_ring_begin(rq, 6);
@@ -2214,6 +2336,8 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->schedule = execlists_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet;
+ engine->reset.prepare = execlists_reset_prepare;
+
engine->park = NULL;
engine->unpark = NULL;
@@ -2233,11 +2357,12 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
- engine->reset_hw = reset_common_ring;
- engine->context_pin = execlists_context_pin;
- engine->context_unpin = execlists_context_unpin;
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.reset = execlists_reset;
+ engine->reset.finish = execlists_reset_finish;
+ engine->context_pin = execlists_context_pin;
engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush;
@@ -2340,6 +2465,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
upper_32_bits(ce->lrc_desc);
}
+ engine->execlists.csb_head = -1;
+
return 0;
error:
@@ -2472,7 +2599,7 @@ static void execlists_init_reg_state(u32 *regs,
struct drm_i915_private *dev_priv = engine->i915;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
u32 base = engine->mmio_base;
- bool rcs = engine->id == RCS;
+ bool rcs = engine->class == RENDER_CLASS;
/* A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2540,7 +2667,7 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
- if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
@@ -2619,10 +2746,10 @@ err_unpin_ctx:
}
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
{
struct drm_i915_gem_object *ctx_obj;
- struct intel_context *ce = to_intel_context(ctx, engine);
struct i915_vma *vma;
uint32_t context_size;
struct intel_ring *ring;
@@ -2646,7 +2773,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
goto error_deref_obj;
}
- vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4ec7d8dd13c8..1593194e930c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -104,11 +104,4 @@ struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
-static inline uint64_t
-intel_lr_context_descriptor(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- return to_intel_context(ctx, engine)->lrc_desc;
-}
-
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d8ece907ff54..e05c12e7629c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -85,34 +85,35 @@ static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *conn
return container_of(connector, struct intel_lvds_connector, base.base);
}
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(lvds_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
+ else
+ *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
+
+ return val & LVDS_PORT_EN;
+}
+
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(lvds_encoder->reg);
-
- if (!(tmp & LVDS_PORT_EN))
- goto out;
+ ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
-
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -255,14 +256,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
+ temp &= ~LVDS_PIPE_SEL_MASK_CPT;
+ temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
+ temp &= ~LVDS_PIPE_SEL_MASK;
+ temp |= LVDS_PIPE_SEL(pipe);
}
/* set the corresponsding LVDS_BORDER bit */
@@ -574,6 +572,36 @@ exit:
return NOTIFY_OK;
}
+static int
+intel_lvds_connector_register(struct drm_connector *connector)
+{
+ struct intel_lvds_connector *lvds = to_lvds_connector(connector);
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ lvds->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
+ DRM_DEBUG_KMS("lid notifier registration failed\n");
+ lvds->lid_notifier.notifier_call = NULL;
+ }
+
+ return 0;
+}
+
+static void
+intel_lvds_connector_unregister(struct drm_connector *connector)
+{
+ struct intel_lvds_connector *lvds = to_lvds_connector(connector);
+
+ if (lvds->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&lvds->lid_notifier);
+
+ intel_connector_unregister(connector);
+}
+
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
@@ -586,9 +614,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
struct intel_lvds_connector *lvds_connector =
to_lvds_connector(connector);
- if (lvds_connector->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
-
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
kfree(lvds_connector->base.edid);
@@ -609,8 +634,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_connector_register,
- .early_unregister = intel_connector_unregister,
+ .late_register = intel_lvds_connector_register,
+ .early_unregister = intel_lvds_connector_unregister,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -827,6 +852,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Radiant P845",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
+ },
+ },
{ } /* terminating entry */
};
@@ -908,7 +941,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* register is uninitialized.
*/
val = I915_READ(lvds_encoder->reg);
- if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+ if (HAS_PCH_CPT(dev_priv))
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
+ else
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
+ if (val == 0)
val = dev_priv->vbt.bios_lvds_val;
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
@@ -963,8 +1000,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
return;
/* Skip init on machines we know falsely report LVDS */
- if (dmi_check_system(intel_no_lvds))
+ if (dmi_check_system(intel_no_lvds)) {
+ WARN(!dev_priv->vbt.int_lvds_support,
+ "Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
+ }
+
+ if (!dev_priv->vbt.int_lvds_support) {
+ DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
+ return;
+ }
if (HAS_PCH_SPLIT(dev_priv))
lvds_reg = PCH_LVDS;
@@ -976,10 +1021,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
- if (dev_priv->vbt.edp.support) {
- DRM_DEBUG_KMS("disable LVDS for eDP support\n");
- return;
- }
}
pin = GMBUS_PIN_PANEL;
@@ -1149,12 +1190,6 @@ out:
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
- lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
- DRM_DEBUG_KMS("lid notifier registration failed\n");
- lvds_connector->lid_notifier.notifier_call = NULL;
- }
-
return;
failed:
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index db27f2faa1de..71dfe541740f 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -97,10 +97,6 @@ void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
{
u32 debug_mask, mask;
- /* No PSR interrupts on VLV/CHV */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return;
-
mask = EDP_PSR_ERROR(TRANSCODER_EDP);
debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
@@ -201,15 +197,6 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
-static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
-{
- uint8_t psr_caps = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
- return false;
- return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-}
-
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
uint8_t dprx = 0;
@@ -232,13 +219,13 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
- u8 val = 0;
+ u8 val = 8; /* assume the worst if we can't read the value */
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
else
- DRM_ERROR("Unable to get sink synchronization latency\n");
+ DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
return val;
}
@@ -250,13 +237,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
- if (intel_dp->psr_dpcd[0]) {
- dev_priv->psr.sink_support = true;
- DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ if (!intel_dp->psr_dpcd[0])
+ return;
+ DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
+ intel_dp->psr_dpcd[0]);
+
+ if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+ DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+ return;
}
+ dev_priv->psr.sink_support = true;
+ dev_priv->psr.sink_sync_latency =
+ intel_dp_get_sink_sync_latency(intel_dp);
if (INTEL_GEN(dev_priv) >= 9 &&
(intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+ bool y_req = intel_dp->psr_dpcd[1] &
+ DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+ bool alpm = intel_dp_get_alpm_status(intel_dp);
+
/*
* All panels that supports PSR version 03h (PSR2 +
* Y-coordinate) can handle Y-coordinates in VSC but we are
@@ -268,47 +267,17 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* Y-coordinate requirement panels we would need to enable
* GTC first.
*/
- dev_priv->psr.sink_psr2_support =
- intel_dp_get_y_coord_required(intel_dp);
- DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support
- ? "supported" : "not supported");
+ dev_priv->psr.sink_psr2_support = y_req && alpm;
+ DRM_DEBUG_KMS("PSR2 %ssupported\n",
+ dev_priv->psr.sink_psr2_support ? "" : "not ");
if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
- dev_priv->psr.alpm =
- intel_dp_get_alpm_status(intel_dp);
- dev_priv->psr.sink_sync_latency =
- intel_dp_get_sink_sync_latency(intel_dp);
}
}
}
-static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t val;
-
- val = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
-}
-
-static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- uint32_t val;
-
- /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
- val = I915_READ(VLV_VSCSDP(crtc->pipe));
- val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
- val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
- I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
-}
-
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -341,12 +310,6 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
}
-static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
-{
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-}
-
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -389,13 +352,12 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
u8 dpcd_val = DP_PSR_ENABLE;
/* Enable ALPM at sink for psr2 */
- if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE);
-
- if (dev_priv->psr.psr2_enabled)
+ if (dev_priv->psr.psr2_enabled) {
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE);
dpcd_val |= DP_PSR_ENABLE_PSR2;
+ }
+
if (dev_priv->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
@@ -403,81 +365,49 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
-static void vlv_psr_enable_source(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
- /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
- I915_WRITE(VLV_PSRCTL(crtc->pipe),
- VLV_EDP_PSR_MODE_SW_TIMER |
- VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
- VLV_EDP_PSR_ENABLE);
-}
-
-static void vlv_psr_activate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- /*
- * Let's do the transition from PSR_state 1 (inactive) to
- * PSR_state 2 (transition to active - static frame transmission).
- * Then Hardware is responsible for the transition to
- * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
- */
- I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
- VLV_EDP_PSR_ACTIVE_ENTRY);
-}
-
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
- uint32_t max_sleep_time = 0x1f;
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- uint32_t val = EDP_PSR_ENABLE;
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ /* sink_sync_latency of 8 means source has to wait for more than 8
+ * frames, we'll go with 9 frames for now
+ */
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (dev_priv->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
- if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
- val |= EDP_PSR_TP1_TIME_2500us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
- val |= EDP_PSR_TP1_TIME_500us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
+ if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
+ val |= EDP_PSR_TP1_TIME_0us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP1_TIME_500us;
else
- val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_2500us;
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
- val |= EDP_PSR_TP2_TP3_TIME_2500us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
- val |= EDP_PSR_TP2_TP3_TIME_500us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP2_TP3_TIME_500us;
else
- val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
@@ -494,15 +424,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ u32 val;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
@@ -513,14 +443,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
- val |= EDP_PSR2_TP2_TIME_2500;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
- val |= EDP_PSR2_TP2_TIME_500;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
- val |= EDP_PSR2_TP2_TIME_100;
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+ val |= EDP_PSR2_TP2_TIME_50us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ val |= EDP_PSR2_TP2_TIME_100us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR2_TP2_TIME_500us;
else
- val |= EDP_PSR2_TP2_TIME_50;
+ val |= EDP_PSR2_TP2_TIME_2500us;
I915_WRITE(EDP_PSR2_CTL, val);
}
@@ -602,17 +533,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
- if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
+ if (dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return;
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->psr.link_standby) {
- DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
- return;
- }
-
if (IS_HASWELL(dev_priv) &&
I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
S3D_ENABLE) {
@@ -640,11 +565,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
- return;
- }
-
crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
@@ -760,7 +680,6 @@ void intel_psr_enable(struct intel_dp *intel_dp,
* enabled.
* However on some platforms we face issues when first
* activation follows a modeset so quickly.
- * - On VLV/CHV we get bank screen on first activation
* - On HSW/BDW we get a recoverable frozen screen until
* next exit-activate sequence.
*/
@@ -772,36 +691,6 @@ unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void vlv_psr_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- uint32_t val;
-
- if (dev_priv->psr.active) {
- /* Put VLV PSR back to PSR_state 0 (disabled). */
- if (intel_wait_for_register(dev_priv,
- VLV_PSRSTAT(crtc->pipe),
- VLV_EDP_PSR_IN_TRANS,
- 0,
- 1))
- WARN(1, "PSR transition took longer than expected\n");
-
- val = I915_READ(VLV_PSRCTL(crtc->pipe));
- val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
- val &= ~VLV_EDP_PSR_ENABLE;
- val &= ~VLV_EDP_PSR_MODE_MASK;
- I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
-
- dev_priv->psr.active = false;
- } else {
- WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
- }
-}
-
static void hsw_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
@@ -894,21 +783,12 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
if (!intel_dp)
return false;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS;
- mask = EDP_PSR2_STATUS_STATE_MASK;
- } else {
- reg = EDP_PSR_STATUS;
- mask = EDP_PSR_STATUS_STATE_MASK;
- }
+ if (dev_priv->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- struct drm_crtc *crtc =
- dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- reg = VLV_PSRSTAT(pipe);
- mask = VLV_EDP_PSR_IN_TRANS;
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
}
mutex_unlock(&dev_priv->psr.lock);
@@ -953,103 +833,24 @@ unlock:
static void intel_psr_exit(struct drm_i915_private *dev_priv)
{
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
u32 val;
if (!dev_priv->psr.active)
return;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
- WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
- } else {
- val = I915_READ(EDP_PSR_CTL);
- WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
- }
+ if (dev_priv->psr.psr2_enabled) {
+ val = I915_READ(EDP_PSR2_CTL);
+ WARN_ON(!(val & EDP_PSR2_ENABLE));
+ I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
} else {
- val = I915_READ(VLV_PSRCTL(pipe));
-
- /*
- * Here we do the transition drirectly from
- * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
- * PSR_state 5 (exit).
- * PSR State 4 (active with single frame update) can be skipped.
- * On PSR_state 5 (exit) Hardware is responsible to transition
- * back to PSR_state 1 (inactive).
- * Now we are at Same state after vlv_psr_enable_source.
- */
- val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
- I915_WRITE(VLV_PSRCTL(pipe), val);
-
- /*
- * Send AUX wake up - Spec says after transitioning to PSR
- * active we have to send AUX wake up by writing 01h in DPCD
- * 600h of sink device.
- * XXX: This might slow down the transition, but without this
- * HW doesn't complete the transition to PSR_state 1 and we
- * never get the screen updated.
- */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
- DP_SET_POWER_D0);
+ val = I915_READ(EDP_PSR_CTL);
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+ I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
}
-
dev_priv->psr.active = false;
}
/**
- * intel_psr_single_frame_update - Single Frame Update
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * Some platforms support a single frame update feature that is used to
- * send and update only one frame on Remote Frame Buffer.
- * So far it is only implemented for Valleyview and Cherryview because
- * hardware requires this to be done before a page flip.
- */
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits)
-{
- struct drm_crtc *crtc;
- enum pipe pipe;
- u32 val;
-
- if (!CAN_PSR(dev_priv))
- return;
-
- /*
- * Single frame update is already supported on BDW+ but it requires
- * many W/A and it isn't really needed.
- */
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- return;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
- val = I915_READ(VLV_PSRCTL(pipe));
-
- /*
- * We need to set this bit before writing registers for a flip.
- * This bit will be self-clear when it gets to the PSR active state.
- */
- I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
- }
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-/**
* intel_psr_invalidate - Invalidade PSR
* @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
@@ -1071,7 +872,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP)
return;
mutex_lock(&dev_priv->psr.lock);
@@ -1114,7 +915,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP)
return;
mutex_lock(&dev_priv->psr.lock);
@@ -1131,8 +932,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
/* By definition flush = invalidate + flush */
if (frontbuffer_bits) {
- if (dev_priv->psr.psr2_enabled ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (dev_priv->psr.psr2_enabled) {
intel_psr_exit(dev_priv);
} else {
/*
@@ -1184,9 +984,6 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- /* On VLV and CHV only standby mode is supported. */
- dev_priv->psr.link_standby = true;
else
/* For new platforms let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
@@ -1204,18 +1001,10 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
mutex_init(&dev_priv->psr.lock);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->psr.enable_source = vlv_psr_enable_source;
- dev_priv->psr.disable_source = vlv_psr_disable;
- dev_priv->psr.enable_sink = vlv_psr_enable_sink;
- dev_priv->psr.activate = vlv_psr_activate;
- dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
- } else {
- dev_priv->psr.has_hw_tracking = true;
- dev_priv->psr.enable_source = hsw_psr_enable_source;
- dev_priv->psr.disable_source = hsw_psr_disable;
- dev_priv->psr.enable_sink = hsw_psr_enable_sink;
- dev_priv->psr.activate = hsw_psr_activate;
- dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
- }
+ dev_priv->psr.enable_source = hsw_psr_enable_source;
+ dev_priv->psr.disable_source = hsw_psr_disable;
+ dev_priv->psr.enable_sink = hsw_psr_enable_sink;
+ dev_priv->psr.activate = hsw_psr_activate;
+ dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
+
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8f19349a6055..65811e2fa7da 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -531,9 +531,22 @@ out:
return ret;
}
-static void reset_ring_common(struct intel_engine_cs *engine,
- struct i915_request *request)
+static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
{
+ intel_engine_stop_cs(engine);
+
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ return i915_gem_find_active_request(engine);
+}
+
+static void reset_ring(struct intel_engine_cs *engine,
+ struct i915_request *request)
+{
+ GEM_TRACE("%s seqno=%x\n",
+ engine->name, request ? request->global_seqno : 0);
+
/*
* RC6 must be prevented until the reset is complete and the engine
* reinitialised. If it occurs in the middle of this sequence, the
@@ -558,8 +571,7 @@ static void reset_ring_common(struct intel_engine_cs *engine,
*/
if (request) {
struct drm_i915_private *dev_priv = request->i915;
- struct intel_context *ce = to_intel_context(request->ctx,
- engine);
+ struct intel_context *ce = request->hw_context;
struct i915_hw_ppgtt *ppgtt;
if (ce->state) {
@@ -571,7 +583,7 @@ static void reset_ring_common(struct intel_engine_cs *engine,
CCID_EN);
}
- ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
+ ppgtt = request->gem_context->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
if (ppgtt) {
u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
@@ -597,6 +609,10 @@ static void reset_ring_common(struct intel_engine_cs *engine,
}
}
+static void reset_finish(struct intel_engine_cs *engine)
+{
+}
+
static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
@@ -1033,6 +1049,8 @@ int intel_ring_pin(struct intel_ring *ring,
flags |= PIN_OFFSET_BIAS | offset_bias;
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
@@ -1105,7 +1123,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
if (IS_ERR(vma))
goto err;
@@ -1169,10 +1187,22 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring);
}
-static int context_pin(struct intel_context *ce)
+static void intel_ring_context_destroy(struct intel_context *ce)
{
- struct i915_vma *vma = ce->state;
- int ret;
+ GEM_BUG_ON(ce->pin_count);
+
+ if (ce->state)
+ __i915_gem_object_release_unless_active(ce->state->obj);
+}
+
+static int __context_pin(struct intel_context *ce)
+{
+ struct i915_vma *vma;
+ int err;
+
+ vma = ce->state;
+ if (!vma)
+ return 0;
/*
* Clear this page out of any CPU caches for coherent swap-in/out.
@@ -1180,13 +1210,42 @@ static int context_pin(struct intel_context *ce)
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- if (ret)
- return ret;
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ if (err)
+ return err;
}
- return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
- PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
+ PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ return err;
+
+ /*
+ * And mark is as a globally pinned object to let the shrinker know
+ * it cannot reclaim the object until we release it.
+ */
+ vma->obj->pin_global++;
+
+ return 0;
+}
+
+static void __context_unpin(struct intel_context *ce)
+{
+ struct i915_vma *vma;
+
+ vma = ce->state;
+ if (!vma)
+ return;
+
+ vma->obj->pin_global--;
+ i915_vma_unpin(vma);
+}
+
+static void intel_ring_context_unpin(struct intel_context *ce)
+{
+ __context_unpin(ce);
+
+ i915_gem_context_put(ce->gem_context);
}
static struct i915_vma *
@@ -1243,7 +1302,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
}
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -1258,67 +1317,62 @@ err_obj:
return ERR_PTR(err);
}
-static struct intel_ring *
-intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_context *
+__ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
- int ret;
-
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- goto out;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+ int err;
if (!ce->state && engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
+ err = PTR_ERR(vma);
goto err;
}
ce->state = vma;
}
- if (ce->state) {
- ret = context_pin(ce);
- if (ret)
- goto err;
-
- ce->state->obj->pin_global++;
- }
+ err = __context_pin(ce);
+ if (err)
+ goto err;
i915_gem_context_get(ctx);
-out:
/* One ringbuffer to rule them all */
- return engine->buffer;
+ GEM_BUG_ON(!engine->buffer);
+ ce->ring = engine->buffer;
+
+ return ce;
err:
ce->pin_count = 0;
- return ERR_PTR(ret);
+ return ERR_PTR(err);
}
-static void intel_ring_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops ring_context_ops = {
+ .unpin = intel_ring_context_unpin,
+ .destroy = intel_ring_context_destroy,
+};
+
+static struct intel_context *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(ce->pin_count == 0);
- if (--ce->pin_count)
- return;
+ if (likely(ce->pin_count++))
+ return ce;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
- if (ce->state) {
- ce->state->obj->pin_global--;
- i915_vma_unpin(ce->state);
- }
+ ce->ops = &ring_context_ops;
- i915_gem_context_put(ctx);
+ return __ring_context_pin(engine, ctx, ce);
}
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
@@ -1329,10 +1383,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
intel_engine_setup_common(engine);
- err = intel_engine_init_common(engine);
- if (err)
- goto err;
-
timeline = i915_timeline_create(engine->i915, engine->name);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
@@ -1354,8 +1404,14 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
+ err = intel_engine_init_common(engine);
+ if (err)
+ goto err_unpin;
+
return 0;
+err_unpin:
+ intel_ring_unpin(ring);
err_ring:
intel_ring_free(ring);
err:
@@ -1441,7 +1497,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
+ *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1509,7 +1565,7 @@ static int remap_l3(struct i915_request *rq, int slice)
static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *to_ctx = rq->ctx;
+ struct i915_gem_context *to_ctx = rq->gem_context;
struct i915_hw_ppgtt *to_mm =
to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
struct i915_gem_context *from_ctx = engine->legacy_active_context;
@@ -1532,7 +1588,7 @@ static int switch_context(struct i915_request *rq)
hw_flags = MI_FORCE_RESTORE;
}
- if (to_intel_context(to_ctx, engine)->state &&
+ if (rq->hw_context->state &&
(to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
GEM_BUG_ON(engine->id != RCS);
@@ -1580,7 +1636,7 @@ static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
+ GEM_BUG_ON(!request->hw_context->pin_count);
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
@@ -2006,11 +2062,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
intel_ring_init_semaphores(dev_priv, engine);
engine->init_hw = init_ring_common;
- engine->reset_hw = reset_ring_common;
+ engine->reset.prepare = reset_prepare;
+ engine->reset.reset = reset_ring;
+ engine->reset.finish = reset_finish;
engine->context_pin = intel_ring_context_pin;
- engine->context_unpin = intel_ring_context_unpin;
-
engine->request_alloc = ring_request_alloc;
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 010750e8ee44..acef385c4c80 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -342,6 +342,7 @@ struct intel_engine_cs {
struct i915_timeline timeline;
struct drm_i915_gem_object *default_state;
+ void *pinned_default_state;
atomic_t irq_count;
unsigned long irq_posted;
@@ -423,18 +424,22 @@ struct intel_engine_cs {
void (*irq_disable)(struct intel_engine_cs *engine);
int (*init_hw)(struct intel_engine_cs *engine);
- void (*reset_hw)(struct intel_engine_cs *engine,
- struct i915_request *rq);
+
+ struct {
+ struct i915_request *(*prepare)(struct intel_engine_cs *engine);
+ void (*reset)(struct intel_engine_cs *engine,
+ struct i915_request *rq);
+ void (*finish)(struct intel_engine_cs *engine);
+ } reset;
void (*park)(struct intel_engine_cs *engine);
void (*unpark)(struct intel_engine_cs *engine);
void (*set_default_submission)(struct intel_engine_cs *engine);
- struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
- void (*context_unpin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
+ struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+
int (*request_alloc)(struct i915_request *rq);
int (*init_context)(struct i915_request *rq);
@@ -550,7 +555,7 @@ struct intel_engine_cs {
* to the kernel context and trash it as the save may not happen
* before the hardware is powered down.
*/
- struct i915_gem_context *last_retired_context;
+ struct intel_context *last_retired_context;
/* We track the current MI_SET_CONTEXT in order to eliminate
* redudant context switches. This presumes that requests are not
@@ -873,6 +878,8 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
+int intel_engine_stop_cs(struct intel_engine_cs *engine);
+
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
@@ -1046,6 +1053,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+void intel_engine_lost_context(struct intel_engine_cs *engine);
void intel_engines_park(struct drm_i915_private *i915);
void intel_engines_unpark(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 25005023c243..a02e4d73c7a4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1403,27 +1403,37 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
return false;
}
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(sdvo_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
+ else
+ *pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT;
+
+ return val & SDVO_ENABLE;
+}
+
static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
- u32 tmp;
+ bool ret;
- tmp = I915_READ(intel_sdvo->sdvo_reg);
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
- return false;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
+ ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe);
- return true;
+ return ret || active_outputs;
}
static void intel_sdvo_get_config(struct intel_encoder *encoder,
@@ -1550,8 +1560,8 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- temp &= ~SDVO_PIPE_B_SELECT;
- temp |= SDVO_ENABLE;
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
intel_sdvo_write_sdvox(intel_sdvo, temp);
temp &= ~SDVO_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ee23613f9fd4..1160bc3a1e8e 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -284,13 +284,35 @@ skl_update_plane(struct intel_plane *plane,
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
int scaler_id = plane_state->scaler_id;
- const struct intel_scaler *scaler;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[scaler_id];
+ u16 y_hphase, uv_rgb_hphase;
+ u16 y_vphase, uv_rgb_vphase;
+
+ /* TODO: handle sub-pixel coordinates */
+ if (fb->format->format == DRM_FORMAT_NV12) {
+ y_hphase = skl_scaler_calc_phase(1, false);
+ y_vphase = skl_scaler_calc_phase(1, false);
+
+ /* MPEG2 chroma siting convention */
+ uv_rgb_hphase = skl_scaler_calc_phase(2, true);
+ uv_rgb_vphase = skl_scaler_calc_phase(2, false);
+ } else {
+ /* not used */
+ y_hphase = 0;
+ y_vphase = 0;
- scaler = &crtc_state->scaler_state.scalers[scaler_id];
+ uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+ }
I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
((crtc_w + 1) << 16)|(crtc_h + 1));
@@ -327,19 +349,21 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
bool
-skl_plane_get_hw_state(struct intel_plane *plane)
+skl_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+ ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -588,19 +612,21 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-vlv_plane_get_hw_state(struct intel_plane *plane)
+vlv_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+ ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -754,18 +780,20 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-ivb_plane_get_hw_state(struct intel_plane *plane)
+ivb_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+ ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -910,18 +938,20 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-g4x_plane_get_hw_state(struct intel_plane *plane)
+g4x_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+ ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -1211,8 +1241,17 @@ static const uint64_t skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
-static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
+static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_YUYV:
@@ -1228,8 +1267,17 @@ static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool snb_mod_supported(uint32_t format, uint64_t modifier)
+static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -1246,8 +1294,17 @@ static bool snb_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
+static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_ABGR8888:
@@ -1269,8 +1326,26 @@ static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (!plane->has_ccs)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -1302,38 +1377,48 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
+static const struct drm_plane_funcs g4x_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = g4x_sprite_format_mod_supported,
+};
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
- modifier != DRM_FORMAT_MOD_LINEAR)
- return false;
+static const struct drm_plane_funcs snb_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = snb_sprite_format_mod_supported,
+};
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_mod_supported(format, modifier);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_mod_supported(format, modifier);
- else if (INTEL_GEN(dev_priv) >= 6)
- return snb_mod_supported(format, modifier);
- else
- return g4x_mod_supported(format, modifier);
-}
+static const struct drm_plane_funcs vlv_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = vlv_sprite_format_mod_supported,
+};
-static const struct drm_plane_funcs intel_sprite_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_sprite_plane_format_mod_supported,
+static const struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
};
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
@@ -1359,6 +1444,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
+ const struct drm_plane_funcs *plane_funcs;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
const uint64_t *modifiers;
@@ -1383,6 +1469,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->can_scale = true;
state->scaler_id = -1;
+ intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+ PLANE_SPRITE0 + plane);
+
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
intel_plane->get_hw_state = skl_plane_get_hw_state;
@@ -1396,10 +1485,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
}
- if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
+ if (intel_plane->has_ccs)
modifiers = skl_plane_format_modifiers_ccs;
else
modifiers = skl_plane_format_modifiers_noccs;
+
+ plane_funcs = &skl_plane_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
@@ -1411,6 +1502,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
modifiers = i9xx_plane_format_modifiers;
+
+ plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
@@ -1427,6 +1520,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
+
+ plane_funcs = &snb_sprite_funcs;
} else {
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
@@ -1439,9 +1534,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+
+ plane_funcs = &snb_sprite_funcs;
} else {
plane_formats = g4x_plane_formats;
num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+
+ plane_funcs = &g4x_sprite_funcs;
}
}
@@ -1468,14 +1567,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, &intel_sprite_plane_funcs,
+ possible_crtcs, plane_funcs,
plane_formats, num_plane_formats,
modifiers,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, &intel_sprite_plane_funcs,
+ possible_crtcs, plane_funcs,
plane_formats, num_plane_formats,
modifiers,
DRM_PLANE_TYPE_OVERLAY,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 885fc3809f7f..99bc2368dda0 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -798,16 +798,12 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp = I915_READ(TV_CTL);
- if (!(tmp & TV_ENC_ENABLE))
- return false;
+ *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
- *pipe = PORT_TO_PIPE(tmp);
-
- return true;
+ return tmp & TV_ENC_ENABLE;
}
static void
@@ -1024,8 +1020,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
break;
}
- if (intel_crtc->pipe == 1)
- tv_ctl |= TV_ENC_PIPEB_SELECT;
+ tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
tv_ctl |= tv_mode->oversample;
if (tv_mode->progressive)
@@ -1149,12 +1144,9 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
/* Poll for TV detection */
- tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- if (intel_crtc->pipe == 1)
- tv_ctl |= TV_ENC_PIPEB_SELECT;
- else
- tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+ tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 1cffaf7b5dbe..6a73e81f373b 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -50,10 +50,10 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
return ret;
}
-static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
+static int __get_platform_enable_guc(struct drm_i915_private *i915)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct intel_uc_fw *guc_fw = &i915->guc.fw;
+ struct intel_uc_fw *huc_fw = &i915->huc.fw;
int enable_guc = 0;
/* Default is to enable GuC/HuC if we know their firmwares */
@@ -67,11 +67,11 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
return enable_guc;
}
-static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
+static int __get_default_guc_log_level(struct drm_i915_private *i915)
{
int guc_log_level;
- if (!HAS_GUC(dev_priv) || !intel_uc_is_using_guc())
+ if (!HAS_GUC(i915) || !intel_uc_is_using_guc())
guc_log_level = GUC_LOG_LEVEL_DISABLED;
else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -86,7 +86,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
/**
* sanitize_options_early - sanitize uC related modparam options
- * @dev_priv: device private
+ * @i915: device private
*
* In case of "enable_guc" option this function will attempt to modify
* it only if it was initially set to "auto(-1)". Default value for this
@@ -101,14 +101,14 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
* unless GuC is enabled on given platform and the driver is compiled with
* debug config when this modparam will default to "enable(1..4)".
*/
-static void sanitize_options_early(struct drm_i915_private *dev_priv)
+static void sanitize_options_early(struct drm_i915_private *i915)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct intel_uc_fw *guc_fw = &i915->guc.fw;
+ struct intel_uc_fw *huc_fw = &i915->huc.fw;
/* A negative value means "use platform default" */
if (i915_modparams.enable_guc < 0)
- i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv);
+ i915_modparams.enable_guc = __get_platform_enable_guc(i915);
DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
i915_modparams.enable_guc,
@@ -119,28 +119,28 @@ static void sanitize_options_early(struct drm_i915_private *dev_priv)
if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
- !HAS_GUC(dev_priv) ? "no GuC hardware" :
- "no GuC firmware");
+ !HAS_GUC(i915) ? "no GuC hardware" :
+ "no GuC firmware");
}
/* Verify HuC firmware availability */
if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
- !HAS_HUC(dev_priv) ? "no HuC hardware" :
- "no HuC firmware");
+ !HAS_HUC(i915) ? "no HuC hardware" :
+ "no HuC firmware");
}
/* A negative value means "use platform/config default" */
if (i915_modparams.guc_log_level < 0)
i915_modparams.guc_log_level =
- __get_default_guc_log_level(dev_priv);
+ __get_default_guc_log_level(i915);
if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"guc_log_level", i915_modparams.guc_log_level,
- !HAS_GUC(dev_priv) ? "no GuC hardware" :
- "GuC not enabled");
+ !HAS_GUC(i915) ? "no GuC hardware" :
+ "GuC not enabled");
i915_modparams.guc_log_level = 0;
}
@@ -195,15 +195,14 @@ void intel_uc_cleanup_early(struct drm_i915_private *i915)
/**
* intel_uc_init_mmio - setup uC MMIO access
- *
- * @dev_priv: device private
+ * @i915: device private
*
* Setup minimal state necessary for MMIO accesses later in the
* initialization sequence.
*/
-void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
+void intel_uc_init_mmio(struct drm_i915_private *i915)
{
- intel_guc_init_send_regs(&dev_priv->guc);
+ intel_guc_init_send_regs(&i915->guc);
}
static void guc_capture_load_err_log(struct intel_guc *guc)
@@ -225,11 +224,11 @@ static void guc_free_load_err_log(struct intel_guc *guc)
static int guc_enable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
- gen9_enable_guc_interrupts(dev_priv);
+ gen9_enable_guc_interrupts(i915);
- if (HAS_GUC_CT(dev_priv))
+ if (HAS_GUC_CT(i915))
return intel_guc_ct_enable(&guc->ct);
guc->send = intel_guc_send_mmio;
@@ -239,23 +238,23 @@ static int guc_enable_communication(struct intel_guc *guc)
static void guc_disable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
- if (HAS_GUC_CT(dev_priv))
+ if (HAS_GUC_CT(i915))
intel_guc_ct_disable(&guc->ct);
- gen9_disable_guc_interrupts(dev_priv);
+ gen9_disable_guc_interrupts(i915);
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
}
-int intel_uc_init_misc(struct drm_i915_private *dev_priv)
+int intel_uc_init_misc(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
int ret;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
intel_guc_init_ggtt_pin_bias(guc);
@@ -267,32 +266,32 @@ int intel_uc_init_misc(struct drm_i915_private *dev_priv)
return 0;
}
-void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
+void intel_uc_fini_misc(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
intel_guc_fini_wq(guc);
}
-int intel_uc_init(struct drm_i915_private *dev_priv)
+int intel_uc_init(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
int ret;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GUC(i915))
return -ENODEV;
ret = intel_guc_init(guc);
if (ret)
return ret;
- if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (USES_GUC_SUBMISSION(i915)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -307,16 +306,16 @@ int intel_uc_init(struct drm_i915_private *dev_priv)
return 0;
}
-void intel_uc_fini(struct drm_i915_private *dev_priv)
+void intel_uc_fini(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- if (USES_GUC_SUBMISSION(dev_priv))
+ if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_fini(guc);
intel_guc_fini(guc);
@@ -340,22 +339,22 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
__intel_uc_reset_hw(i915);
}
-int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+int intel_uc_init_hw(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct intel_huc *huc = &dev_priv->huc;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
int ret, attempts;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- gen9_reset_guc_interrupts(dev_priv);
+ gen9_reset_guc_interrupts(i915);
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
- if (IS_GEN9(dev_priv))
+ if (IS_GEN9(i915))
attempts = 3;
else
attempts = 1;
@@ -365,11 +364,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
* Always reset the GuC just before (re)loading, so
* that the state and timing are fairly predictable
*/
- ret = __intel_uc_reset_hw(dev_priv);
+ ret = __intel_uc_reset_hw(i915);
if (ret)
goto err_out;
- if (USES_HUC(dev_priv)) {
+ if (USES_HUC(i915)) {
ret = intel_huc_fw_upload(huc);
if (ret)
goto err_out;
@@ -392,24 +391,24 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_log_capture;
- if (USES_HUC(dev_priv)) {
+ if (USES_HUC(i915)) {
ret = intel_huc_auth(huc);
if (ret)
goto err_communication;
}
- if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (USES_GUC_SUBMISSION(i915)) {
ret = intel_guc_submission_enable(guc);
if (ret)
goto err_communication;
}
- dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
+ dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
guc->fw.major_ver_found, guc->fw.minor_ver_found);
- dev_info(dev_priv->drm.dev, "GuC submission %s\n",
- enableddisabled(USES_GUC_SUBMISSION(dev_priv)));
- dev_info(dev_priv->drm.dev, "HuC %s\n",
- enableddisabled(USES_HUC(dev_priv)));
+ dev_info(i915->drm.dev, "GuC submission %s\n",
+ enableddisabled(USES_GUC_SUBMISSION(i915)));
+ dev_info(i915->drm.dev, "HuC %s\n",
+ enableddisabled(USES_HUC(i915)));
return 0;
@@ -428,20 +427,20 @@ err_out:
if (GEM_WARN_ON(ret == -EIO))
ret = -EINVAL;
- dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret);
+ dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
return ret;
}
-void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
+void intel_uc_fini_hw(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- if (USES_GUC_SUBMISSION(dev_priv))
+ if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_disable(guc);
guc_disable_communication(guc);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 448293eb638d..b36a3b5736a0 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1702,15 +1702,9 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
const u32 base = engine->mmio_base;
- const i915_reg_t mode = RING_MI_MODE(base);
-
- I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
- if (__intel_wait_for_register_fw(dev_priv,
- mode, MODE_IDLE, MODE_IDLE,
- 500, 0,
- NULL))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
- engine->name);
+
+ if (intel_engine_stop_cs(engine))
+ DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 458468237b5f..c132d0c3a500 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -318,6 +318,12 @@ enum vbt_gmbus_ddi {
DDC_BUS_DDI_C,
DDC_BUS_DDI_D,
DDC_BUS_DDI_F,
+ ICL_DDC_BUS_DDI_A = 0x1,
+ ICL_DDC_BUS_DDI_B,
+ ICL_DDC_BUS_PORT_1 = 0x4,
+ ICL_DDC_BUS_PORT_2,
+ ICL_DDC_BUS_PORT_3,
+ ICL_DDC_BUS_PORT_4,
};
#define VBT_DP_MAX_LINK_RATE_HBR3 0
@@ -635,7 +641,7 @@ struct bdb_sdvo_lvds_options {
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
-#define BDB_DRIVER_FEATURE_EDP 3
+#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3
struct bdb_driver_features {
u8 boot_dev_algorithm:1;
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 2df3538ceba5..b1ab56a1ec31 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -463,6 +463,25 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
*/
WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+ /* Wa_2006611047:icl (pre-prod)
+ * Formerly known as WaDisableImprovedTdlClkGating
+ */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1,
+ GEN11_STATE_CACHE_REDIRECT_TO_CS);
+
+ /* Wa_2006665173:icl (pre-prod) */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
+ GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+
+ /* WaEnableFloatBlendOptimization:icl */
+ WA_SET_BIT_MASKED(GEN10_CACHE_MODE_SS, FLOAT_BLEND_OPTIMIZATION_ENABLE);
+
return 0;
}
@@ -672,8 +691,74 @@ static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
+static void wa_init_mcr(struct drm_i915_private *dev_priv)
+{
+ const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+ u32 mcr;
+ u32 mcr_slice_subslice_mask;
+
+ /*
+ * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
+ * L3Banks could be fused off in single slice scenario. If that is
+ * the case, we might need to program MCR select to a valid L3Bank
+ * by default, to make sure we correctly read certain registers
+ * later on (in the range 0xB100 - 0xB3FF).
+ * This might be incompatible with
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads.
+ * Fortunately, this should not happen in production hardware, so
+ * we only assert that this is the case (instead of implementing
+ * something more complex that requires checking the range of every
+ * MMIO read).
+ */
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ is_power_of_2(sseu->slice_mask)) {
+ /*
+ * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
+ * enabled subslice, no need to redirect MCR packet
+ */
+ u32 slice = fls(sseu->slice_mask);
+ u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3);
+ u8 ss_mask = sseu->subslice_mask[slice];
+
+ u8 enabled_mask = (ss_mask | ss_mask >>
+ GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
+ u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
+
+ /*
+ * Production silicon should have matched L3Bank and
+ * subslice enabled
+ */
+ WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
+ }
+
+ mcr = I915_READ(GEN8_MCR_SELECTOR);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
+ GEN11_MCR_SUBSLICE_MASK;
+ else
+ mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
+ GEN8_MCR_SUBSLICE_MASK;
+ /*
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
+ * Before any MMIO read into slice/subslice specific registers, MCR
+ * packet control register needs to be programmed to point to any
+ * enabled s/ss pair. Otherwise, incorrect values will be returned.
+ * This means each subsequent MMIO read will be forwarded to an
+ * specific s/ss combination, but this is OK since these registers
+ * are consistent across s/ss in almost all cases. In the rare
+ * occasions, such as INSTDONE, where this value is dependent
+ * on s/ss combo, the read should be done with read_subslice_reg.
+ */
+ mcr &= ~mcr_slice_subslice_mask;
+ mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
+ I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+}
+
static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
+ wa_init_mcr(dev_priv);
+
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
I915_WRITE(GAMT_CHKN_BIT_REG,
@@ -692,6 +777,8 @@ static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
+ wa_init_mcr(dev_priv);
+
/* This is not an Wa. Enable for better image quality */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
@@ -772,6 +859,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
PMFLUSHDONE_LNICRSDROP |
PMFLUSH_GAPL3UNBLOCK |
PMFLUSHDONE_LNEBLK);
+
+ /* Wa_1406463099:icl
+ * Formerly known as WaGamTlbPendError
+ */
+ I915_WRITE(GAMT_CHKN_BIT_REG,
+ I915_READ(GAMT_CHKN_BIT_REG) |
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 91c72911be3c..7846ea4a99bc 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
static int igt_check_page_sizes(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+ struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
int err = 0;
@@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
static int igt_mock_exhaust_device_supported_pages(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
goto out_put;
}
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -458,7 +458,7 @@ out_device:
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
int bit;
@@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
/* Force the page size for this object */
obj->mm.page_sizes.sg = page_size;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_unpin;
@@ -591,7 +591,7 @@ static void close_object_list(struct list_head *objects,
list_for_each_entry_safe(obj, on, objects, st_link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (!IS_ERR(vma))
i915_vma_close(vma);
@@ -604,8 +604,8 @@ static void close_object_list(struct list_head *objects,
static int igt_mock_ppgtt_huge_fill(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
- unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
unsigned long page_num;
bool single = false;
LIST_HEAD(objects);
@@ -641,7 +641,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
break;
@@ -725,7 +725,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
static int igt_mock_ppgtt_64K(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
struct drm_i915_gem_object *obj;
const struct object_info {
unsigned int size;
@@ -819,7 +819,7 @@ static int igt_mock_ppgtt_64K(void *arg)
*/
obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_object_unpin;
@@ -887,8 +887,8 @@ out_object_put:
static struct i915_vma *
gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
{
- struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
- const int gen = INTEL_GEN(vma->vm->i915);
+ struct drm_i915_private *i915 = vma->vm->i915;
+ const int gen = INTEL_GEN(i915);
unsigned int count = vma->size >> PAGE_SHIFT;
struct drm_i915_gem_object *obj;
struct i915_vma *batch;
@@ -1047,7 +1047,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u32 dword, u32 val)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
@@ -1100,7 +1101,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
I915_RND_STATE(prng);
@@ -1439,7 +1441,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1493,7 +1495,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1531,7 +1533,8 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1587,7 +1590,8 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
@@ -1696,14 +1700,14 @@ int i915_gem_huge_page_mock_selftests(void)
goto out_unlock;
}
- if (!i915_vm_is_48bit(&ppgtt->base)) {
+ if (!i915_vm_is_48bit(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
goto out_close;
}
/* If we were ever hit this then it's time to mock the 64K scratch */
- if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+ if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
pr_err("PPGTT missing 64K scratch page\n");
err = -EINVAL;
goto out_close;
@@ -1712,7 +1716,7 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
- i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
@@ -1758,7 +1762,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
}
if (ctx->ppgtt)
- ctx->ppgtt->base.scrub_64K = true;
+ ctx->ppgtt->vm.scrub_64K = true;
err = i915_subtests(tests, ctx);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index ddb03f009232..708e8d721448 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -26,6 +26,7 @@
#include "igt_flush_test.h"
#include "mock_drm.h"
+#include "mock_gem_device.h"
#include "huge_gem_object.h"
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
@@ -114,7 +115,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
@@ -289,7 +290,7 @@ create_test_object(struct i915_gem_context *ctx,
{
struct drm_i915_gem_object *obj;
struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+ ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
u64 size;
int err;
@@ -420,6 +421,130 @@ out_unlock:
return err;
}
+static __maybe_unused const char *
+__engine_name(struct drm_i915_private *i915, unsigned int engines)
+{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ if (engines == ALL_ENGINES)
+ return "all";
+
+ for_each_engine_masked(engine, i915, engines, tmp)
+ return engine->name;
+
+ return "none";
+}
+
+static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx,
+ unsigned int engines)
+{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+ int err;
+
+ GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+ }
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ return err;
+
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (!engine_has_kernel_context_barrier(engine)) {
+ pr_err("kernel context not last on engine %s!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(i915->gt.active_requests);
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (engine->last_retired_context->gem_context != i915->kernel_context) {
+ pr_err("engine %s not idling in kernel context!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ return err;
+
+ if (i915->gt.active_requests) {
+ pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
+ i915->gt.active_requests);
+ return -EINVAL;
+ }
+
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (!intel_engine_has_kernel_context(engine)) {
+ pr_err("kernel context not last on engine %s!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int igt_switch_to_kernel_context(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ int err;
+
+ /*
+ * A core premise of switching to the kernel context is that
+ * if an engine is already idling in the kernel context, we
+ * do not emit another request and wake it up. The other being
+ * that we do indeed end up idling in the kernel context.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ /* First check idling each individual engine */
+ for_each_engine(engine, i915, id) {
+ err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
+ if (err)
+ goto out_unlock;
+ }
+
+ /* Now en masse */
+ err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
+ if (err)
+ goto out_unlock;
+
+out_unlock:
+ GEM_TRACE_DUMP_ON(err);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kernel_context_close(ctx);
+ return err;
+}
+
static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -432,7 +557,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
continue;
@@ -447,9 +572,28 @@ static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
i915_gem_fini_aliasing_ppgtt(i915);
}
+int i915_gem_context_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_switch_to_kernel_context),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
+
int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(igt_switch_to_kernel_context),
SUBTEST(igt_ctx_exec),
};
bool fake_alias = false;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index ab9d7bee0aae..2dc72a984d45 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
u64 size;
for (size = 0;
- size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
size += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
return -EINVAL;
}
- if (list_empty(&i915->ggtt.base.inactive_list)) {
+ if (list_empty(&i915->ggtt.vm.inactive_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
}
@@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
+ list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
i915_vma_unpin(vma);
}
@@ -103,7 +103,7 @@ static int igt_evict_something(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_something(&ggtt->base,
+ err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -116,7 +116,7 @@ static int igt_evict_something(void *arg)
unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict something */
- err = i915_gem_evict_something(&ggtt->base,
+ err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
err);
@@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg)
unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict the node */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
err);
@@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg)
* i915_gtt_color_adjust throughout our driver, so using a mock color
* adjust will work just fine for our purposes.
*/
- ggtt->base.mm.color_adjust = mock_color_adjust;
+ ggtt->vm.mm.color_adjust = mock_color_adjust;
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg)
i915_vma_unpin(vma);
/* Remove just the second vma */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
goto cleanup;
@@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg)
*/
target.color = I915_CACHE_L3_LLC;
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
err = -EINVAL;
@@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg)
cleanup:
unpin_ggtt(i915);
cleanup_objects(i915);
- ggtt->base.mm.color_adjust = NULL;
+ ggtt->vm.mm.color_adjust = NULL;
return err;
}
@@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_vm(&ggtt->base);
+ err = i915_gem_evict_vm(&ggtt->vm);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg)
unpin_ggtt(i915);
- err = i915_gem_evict_vm(&ggtt->base);
+ err = i915_gem_evict_vm(&ggtt->vm);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg)
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
- err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
PIN_NOEVICT);
if (err)
goto out_locked;
@@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg)
goto out_locked;
}
- if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+ if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
PIN_NOEVICT)) {
kfree(r);
break;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index f7dc926f4ef1..58ab5e84ceb7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -151,14 +151,14 @@ static int igt_ppgtt_alloc(void *arg)
if (err)
goto err_ppgtt;
- if (!ppgtt->base.allocate_va_range)
+ if (!ppgtt->vm.allocate_va_range)
goto err_ppgtt_cleanup;
/* Check we can allocate the entire range */
for (size = 4096;
- size <= ppgtt->base.total;
+ size <= ppgtt->vm.total;
size <<= 2) {
- err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
if (err) {
if (err == -ENOMEM) {
pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
@@ -168,15 +168,15 @@ static int igt_ppgtt_alloc(void *arg)
goto err_ppgtt_cleanup;
}
- ppgtt->base.clear_range(&ppgtt->base, 0, size);
+ ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
}
/* Check we can incrementally allocate the entire range */
for (last = 0, size = 4096;
- size <= ppgtt->base.total;
+ size <= ppgtt->vm.total;
last = size, size <<= 2) {
- err = ppgtt->base.allocate_va_range(&ppgtt->base,
- last, size - last);
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
+ last, size - last);
if (err) {
if (err == -ENOMEM) {
pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
@@ -188,7 +188,7 @@ static int igt_ppgtt_alloc(void *arg)
}
err_ppgtt_cleanup:
- ppgtt->base.cleanup(&ppgtt->base);
+ ppgtt->vm.cleanup(&ppgtt->vm);
err_ppgtt:
mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(ppgtt);
@@ -987,12 +987,12 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- GEM_BUG_ON(offset_in_page(ppgtt->base.total));
- GEM_BUG_ON(ppgtt->base.closed);
+ GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
+ GEM_BUG_ON(ppgtt->vm.closed);
- err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+ err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1061,18 +1061,18 @@ static int exercise_ggtt(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
restart:
- list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
- drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
+ list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
+ drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
if (hole_start < last)
continue;
- if (ggtt->base.mm.color_adjust)
- ggtt->base.mm.color_adjust(node, 0,
- &hole_start, &hole_end);
+ if (ggtt->vm.mm.color_adjust)
+ ggtt->vm.mm.color_adjust(node, 0,
+ &hole_start, &hole_end);
if (hole_start >= hole_end)
continue;
- err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
+ err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
if (err)
break;
@@ -1134,7 +1134,7 @@ static int igt_ggtt_page(void *arg)
goto out_free;
memset(&tmp, 0, sizeof(tmp));
- err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
+ err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
count * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
@@ -1147,9 +1147,9 @@ static int igt_ggtt_page(void *arg)
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, 0),
- offset, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, 0),
+ offset, I915_CACHE_NONE, 0);
}
order = i915_random_order(count, &prng);
@@ -1188,7 +1188,7 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
- ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
+ ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
intel_runtime_pm_put(i915);
drm_mm_remove_node(&tmp);
out_unpin:
@@ -1229,7 +1229,7 @@ static int exercise_mock(struct drm_i915_private *i915,
ppgtt = ctx->ppgtt;
GEM_BUG_ON(!ppgtt);
- err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
+ err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
mock_context_close(ctx);
return err;
@@ -1270,7 +1270,7 @@ static int igt_gtt_reserve(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1288,20 +1288,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1319,7 +1319,7 @@ static int igt_gtt_reserve(void *arg)
/* Now we start forcing evictions */
for (total = I915_GTT_PAGE_SIZE;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1337,20 +1337,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1371,7 +1371,7 @@ static int igt_gtt_reserve(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1383,18 +1383,18 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- offset = random_offset(0, i915->ggtt.base.total,
+ offset = random_offset(0, i915->ggtt.vm.total,
2*I915_GTT_PAGE_SIZE,
I915_GTT_MIN_ALIGNMENT);
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
offset,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1429,8 +1429,8 @@ static int igt_gtt_insert(void *arg)
u64 start, end;
} invalid_insert[] = {
{
- i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
- 0, i915->ggtt.base.total,
+ i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
+ 0, i915->ggtt.vm.total,
},
{
2*I915_GTT_PAGE_SIZE, 0,
@@ -1460,7 +1460,7 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
- err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
@@ -1475,7 +1475,7 @@ static int igt_gtt_insert(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1493,15 +1493,15 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
@@ -1510,7 +1510,7 @@ static int igt_gtt_insert(void *arg)
}
if (err) {
pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1522,7 +1522,7 @@ static int igt_gtt_insert(void *arg)
list_for_each_entry(obj, &objects, st_link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1542,7 +1542,7 @@ static int igt_gtt_insert(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1557,13 +1557,13 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1579,7 +1579,7 @@ static int igt_gtt_insert(void *arg)
/* And then force evictions */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1597,19 +1597,19 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1669,7 +1669,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ggtt_page),
};
- GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
+ GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index fbdb2419d418..2b2dde94526f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg)
obj = huge_gem_object(i915,
nreal * PAGE_SIZE,
- i915->ggtt.base.total + PAGE_SIZE);
+ i915->ggtt.vm.total + PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -311,7 +311,7 @@ static int igt_partial_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
- (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -440,7 +440,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int err;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index d16d74178e9d..1b70208eeea7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -24,3 +24,4 @@ selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
+selftest(contexts, i915_gem_context_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 94bc2e1898a4..a3a89aadeccb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -430,7 +430,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -555,7 +555,8 @@ out_unlock:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index e90f97236e50..8400a8cc5cf2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != &ctx->ppgtt->base) {
+ if (vma->vm != &ctx->ppgtt->vm) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm =
- &ctx->ppgtt->base;
+ struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_vma *vma;
int err;
@@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg)
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
- INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
VALID(4096, PIN_GLOBAL),
@@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg)
VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
- VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
- VALID(i915->ggtt.base.total, PIN_GLOBAL),
- NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
+ VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
+ VALID(i915->ggtt.vm.total, PIN_GLOBAL),
+ NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
- INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
@@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg)
* variable start, end and size.
*/
NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
- NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
+ NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+ NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
#endif
{ },
#undef NOSPACE
@@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg)
* focusing on error handling of boundary conditions.
*/
- GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
+ GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
goto out;
@@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
static int igt_vma_rotate(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.base;
+ struct i915_address_space *vm = &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const struct intel_rotation_plane_info planes[] = {
{ .width = 1, .height = 1, .stride = 1 },
@@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma,
static int igt_vma_partial(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.base;
+ struct i915_address_space *vm = &i915->ggtt.vm;
const unsigned int npages = 1021; /* prime! */
struct drm_i915_gem_object *obj;
const struct phase {
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 438e0b045a2c..390a157b37c3 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -105,7 +105,10 @@ static int emit_recurse_batch(struct hang *h,
struct i915_request *rq)
{
struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ rq->gem_context->ppgtt ?
+ &rq->gem_context->ppgtt->vm :
+ &i915->ggtt.vm;
struct i915_vma *hws, *vma;
unsigned int flags;
u32 *batch;
@@ -560,6 +563,30 @@ struct active_engine {
#define TEST_SELF BIT(2)
#define TEST_PRIORITY BIT(3)
+static int active_request_put(struct i915_request *rq)
+{
+ int err = 0;
+
+ if (!rq)
+ return 0;
+
+ if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n",
+ rq->engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_global_seqno(rq));
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(rq->i915);
+ err = -EIO;
+ }
+
+ i915_request_put(rq);
+
+ return err;
+}
+
static int active_engine(void *data)
{
I915_RND_STATE(prng);
@@ -608,24 +635,20 @@ static int active_engine(void *data)
i915_request_add(new);
mutex_unlock(&engine->i915->drm.struct_mutex);
- if (old) {
- if (i915_request_wait(old, 0, HZ) < 0) {
- GEM_TRACE("%s timed out.\n", engine->name);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(engine->i915);
- i915_request_put(old);
- err = -EIO;
- break;
- }
- i915_request_put(old);
- }
+ err = active_request_put(old);
+ if (err)
+ break;
cond_resched();
}
- for (count = 0; count < ARRAY_SIZE(rq); count++)
- i915_request_put(rq[count]);
+ for (count = 0; count < ARRAY_SIZE(rq); count++) {
+ int err__ = active_request_put(rq[count]);
+
+ /* Keep the first error */
+ if (!err)
+ err = err__;
+ }
err_file:
mock_file_free(engine->i915, file);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1b8a07125150..0b6da08c8cae 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
struct i915_request *rq,
u32 arbitration_command)
{
- struct i915_address_space *vm = &rq->ctx->ppgtt->base;
+ struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index f76f2597df5c..47bc5b2ddb56 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -137,7 +137,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
- valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid),
+ valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid),
GFP_KERNEL);
if (!valid)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 17444a3abbb9..f1cfb0fb6bea 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -33,7 +33,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
memset(cs, 0xc5, PAGE_SIZE);
i915_gem_object_unpin_map(result);
- vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 501becc47c0c..8904f1ce64e3 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915,
const char *name)
{
struct i915_gem_context *ctx;
+ unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -43,6 +44,12 @@ mock_context(struct drm_i915_private *i915,
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+ struct intel_context *ce = &ctx->__engine[n];
+
+ ce->gem_context = ctx;
+ }
+
ret = ida_simple_get(&i915->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
index 302f7d103635..ca682caf1062 100644
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
@@ -94,18 +94,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
vm_unmap_ram(vaddr, mock->npages);
}
-static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return kmap_atomic(mock->pages[page_num]);
-}
-
-static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
- kunmap_atomic(addr);
-}
-
static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
@@ -130,9 +118,7 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
.unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release,
.map = mock_dmabuf_kmap,
- .map_atomic = mock_dmabuf_kmap_atomic,
.unmap = mock_dmabuf_kunmap,
- .unmap_atomic = mock_dmabuf_kunmap_atomic,
.mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 26bf29d97007..c2a0451336cf 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -72,25 +72,34 @@ static void hw_delay_complete(struct timer_list *t)
spin_unlock(&engine->hw_lock);
}
-static struct intel_ring *
-mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static void mock_context_unpin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
- if (!ce->pin_count++)
- i915_gem_context_get(ctx);
+ i915_gem_context_put(ce->gem_context);
+}
- return engine->buffer;
+static void mock_context_destroy(struct intel_context *ce)
+{
+ GEM_BUG_ON(ce->pin_count);
}
-static void mock_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops mock_context_ops = {
+ .unpin = mock_context_unpin,
+ .destroy = mock_context_destroy,
+};
+
+static struct intel_context *
+mock_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
- if (!--ce->pin_count)
- i915_gem_context_put(ctx);
+ if (!ce->pin_count++) {
+ i915_gem_context_get(ctx);
+ ce->ring = engine->buffer;
+ ce->ops = &mock_context_ops;
+ }
+
+ return ce;
}
static int mock_request_alloc(struct i915_request *request)
@@ -185,7 +194,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.status_page.page_addr = (void *)(engine + 1);
engine->base.context_pin = mock_context_pin;
- engine->base.context_unpin = mock_context_unpin;
engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush;
engine->base.emit_breadcrumb = mock_emit_breadcrumb;
@@ -204,8 +212,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
if (!engine->base.buffer)
goto err_breadcrumbs;
+ if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+ goto err_ring;
+
return &engine->base;
+err_ring:
+ mock_ring_free(engine->base.buffer);
err_breadcrumbs:
intel_engine_fini_breadcrumbs(&engine->base);
i915_timeline_fini(&engine->base.timeline);
@@ -238,11 +251,15 @@ void mock_engine_free(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
+ struct intel_context *ce;
GEM_BUG_ON(timer_pending(&mock->hw_delay));
- if (engine->last_retired_context)
- intel_context_unpin(engine->last_retired_context, engine);
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
+
+ __intel_context_unpin(engine->i915->kernel_context, engine);
mock_ring_free(engine->buffer);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 94baedfa0f74..c97075c5ccaf 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -136,8 +136,6 @@ static struct dev_pm_domain pm_domain = {
struct drm_i915_private *mock_gem_device(void)
{
struct drm_i915_private *i915;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
struct pci_dev *pdev;
int err;
@@ -233,13 +231,13 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915);
mkwrite_device_info(i915)->ring_mask = BIT(0);
- i915->engine[RCS] = mock_engine(i915, "mock", RCS);
- if (!i915->engine[RCS])
- goto err_unlock;
-
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
- goto err_engine;
+ goto err_unlock;
+
+ i915->engine[RCS] = mock_engine(i915, "mock", RCS);
+ if (!i915->engine[RCS])
+ goto err_context;
mutex_unlock(&i915->drm.struct_mutex);
@@ -247,9 +245,8 @@ struct drm_i915_private *mock_gem_device(void)
return i915;
-err_engine:
- for_each_engine(engine, i915, id)
- mock_engine_free(engine);
+err_context:
+ i915_gem_contexts_fini(i915);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
kmem_cache_destroy(i915->priorities);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 36c112088940..556c546f2715 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -66,25 +66,25 @@ mock_ppgtt(struct drm_i915_private *i915,
return NULL;
kref_init(&ppgtt->ref);
- ppgtt->base.i915 = i915;
- ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
- ppgtt->base.file = ERR_PTR(-ENODEV);
-
- INIT_LIST_HEAD(&ppgtt->base.active_list);
- INIT_LIST_HEAD(&ppgtt->base.inactive_list);
- INIT_LIST_HEAD(&ppgtt->base.unbound_list);
-
- INIT_LIST_HEAD(&ppgtt->base.global_link);
- drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
-
- ppgtt->base.clear_range = nop_clear_range;
- ppgtt->base.insert_page = mock_insert_page;
- ppgtt->base.insert_entries = mock_insert_entries;
- ppgtt->base.bind_vma = mock_bind_ppgtt;
- ppgtt->base.unbind_vma = mock_unbind_ppgtt;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
- ppgtt->base.cleanup = mock_cleanup;
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
+ ppgtt->vm.file = ERR_PTR(-ENODEV);
+
+ INIT_LIST_HEAD(&ppgtt->vm.active_list);
+ INIT_LIST_HEAD(&ppgtt->vm.inactive_list);
+ INIT_LIST_HEAD(&ppgtt->vm.unbound_list);
+
+ INIT_LIST_HEAD(&ppgtt->vm.global_link);
+ drm_mm_init(&ppgtt->vm.mm, 0, ppgtt->vm.total);
+
+ ppgtt->vm.clear_range = nop_clear_range;
+ ppgtt->vm.insert_page = mock_insert_page;
+ ppgtt->vm.insert_entries = mock_insert_entries;
+ ppgtt->vm.bind_vma = mock_bind_ppgtt;
+ ppgtt->vm.unbind_vma = mock_unbind_ppgtt;
+ ppgtt->vm.set_pages = ppgtt_set_pages;
+ ppgtt->vm.clear_pages = clear_pages;
+ ppgtt->vm.cleanup = mock_cleanup;
return ppgtt;
}
@@ -107,27 +107,27 @@ void mock_init_ggtt(struct drm_i915_private *i915)
INIT_LIST_HEAD(&i915->vm_list);
- ggtt->base.i915 = i915;
+ ggtt->vm.i915 = i915;
ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
- ggtt->base.total = 4096 * PAGE_SIZE;
-
- ggtt->base.clear_range = nop_clear_range;
- ggtt->base.insert_page = mock_insert_page;
- ggtt->base.insert_entries = mock_insert_entries;
- ggtt->base.bind_vma = mock_bind_ggtt;
- ggtt->base.unbind_vma = mock_unbind_ggtt;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = mock_cleanup;
-
- i915_address_space_init(&ggtt->base, i915, "global");
+ ggtt->vm.total = 4096 * PAGE_SIZE;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ ggtt->vm.insert_page = mock_insert_page;
+ ggtt->vm.insert_entries = mock_insert_entries;
+ ggtt->vm.bind_vma = mock_bind_ggtt;
+ ggtt->vm.unbind_vma = mock_unbind_ggtt;
+ ggtt->vm.set_pages = ggtt_set_pages;
+ ggtt->vm.clear_pages = clear_pages;
+ ggtt->vm.cleanup = mock_cleanup;
+
+ i915_address_space_init(&ggtt->vm, i915, "global");
}
void mock_fini_ggtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- i915_address_space_fini(&ggtt->base);
+ i915_address_space_fini(&ggtt->vm);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index 0d8d506695f9..be5f6f1daf55 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -15,6 +15,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/dma-buf.h>
#include <linux/reservation.h>
@@ -22,78 +23,37 @@
#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
-/*
- * mtk specific framebuffer structure.
- *
- * @fb: drm framebuffer object.
- * @gem_obj: array of gem objects.
- */
-struct mtk_drm_fb {
- struct drm_framebuffer base;
- /* For now we only support a single plane */
- struct drm_gem_object *gem_obj;
-};
-
-#define to_mtk_fb(x) container_of(x, struct mtk_drm_fb, base)
-
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- return mtk_fb->gem_obj;
-}
-
-static int mtk_drm_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- return drm_gem_handle_create(file_priv, mtk_fb->gem_obj, handle);
-}
-
-static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- drm_framebuffer_cleanup(fb);
-
- drm_gem_object_put_unlocked(mtk_fb->gem_obj);
-
- kfree(mtk_fb);
-}
-
static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
- .create_handle = mtk_drm_fb_create_handle,
- .destroy = mtk_drm_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
-static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev,
+static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode,
struct drm_gem_object *obj)
{
- struct mtk_drm_fb *mtk_fb;
+ struct drm_framebuffer *fb;
int ret;
if (drm_format_num_planes(mode->pixel_format) != 1)
return ERR_PTR(-EINVAL);
- mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL);
- if (!mtk_fb)
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(dev, &mtk_fb->base, mode);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode);
- mtk_fb->gem_obj = obj;
+ fb->obj[0] = obj;
- ret = drm_framebuffer_init(dev, &mtk_fb->base, &mtk_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
- kfree(mtk_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return mtk_fb;
+ return fb;
}
/*
@@ -110,7 +70,7 @@ int mtk_fb_wait(struct drm_framebuffer *fb)
if (!fb)
return 0;
- gem = mtk_fb_get_gem_obj(fb);
+ gem = fb->obj[0];
if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
return 0;
@@ -128,7 +88,7 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
{
- struct mtk_drm_fb *mtk_fb;
+ struct drm_framebuffer *fb;
struct drm_gem_object *gem;
unsigned int width = cmd->width;
unsigned int height = cmd->height;
@@ -151,13 +111,13 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
goto unreference;
}
- mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem);
- if (IS_ERR(mtk_fb)) {
- ret = PTR_ERR(mtk_fb);
+ fb = mtk_drm_framebuffer_init(dev, cmd, gem);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto unreference;
}
- return &mtk_fb->base;
+ return fb;
unreference:
drm_gem_object_put_unlocked(gem);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.h b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
index 9b2ae345a4e9..7f976b196a15 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
@@ -14,7 +14,6 @@
#ifndef MTK_DRM_FB_H
#define MTK_DRM_FB_H
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb);
int mtk_fb_wait(struct drm_framebuffer *fb);
struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 2f4b0ffee598..f7e6aa1b5b7d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -95,11 +95,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
- if (!mtk_fb_get_gem_obj(fb)) {
- DRM_DEBUG_KMS("buffer is null\n");
- return -EFAULT;
- }
-
if (!state->crtc)
return 0;
@@ -127,7 +122,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return;
- gem = mtk_fb_get_gem_obj(fb);
+ gem = fb->obj[0];
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
pitch = fb->pitches[0];
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index a393095aac1a..c9ad45686e7a 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -529,7 +529,7 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
if (stat & HDMITX_TOP_INTR_HPD_RISE)
hpd_connected = true;
- dw_hdmi_setup_rx_sense(dw_hdmi->dev, hpd_connected,
+ dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected,
hpd_connected);
drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index b001699297c4..457c29dba4a1 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -201,7 +201,7 @@ static void blend_setup(struct drm_crtc *crtc)
int idx = idxs[pipe_id];
if (idx > 0) {
const struct mdp_format *format =
- to_mdp_format(msm_framebuffer_format(plane->fb));
+ to_mdp_format(msm_framebuffer_format(plane->state->fb));
alpha[idx-1] = format->alpha_enable;
}
}
@@ -665,7 +665,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
- plane->crtc = crtc;
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 20e956e14c21..7b641fa6dc4d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -167,8 +167,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
msm_framebuffer_iova(fb, kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
msm_framebuffer_iova(fb, kms->aspace, 3));
-
- plane->fb = fb;
}
static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 10271359789e..24e00274844b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1207,7 +1207,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
"unref cursor", unref_cursor_worker);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
- plane->crtc = crtc;
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index e09bc53a0e65..c4f115fe96ff 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -512,7 +512,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
if (plane_enabled(new_state)) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline =
- mdp5_crtc_get_pipeline(plane->crtc);
+ mdp5_crtc_get_pipeline(new_state->crtc);
int ret;
ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
@@ -1029,8 +1029,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
src_img_w, src_img_h,
src_x + src_w, src_y, src_w, src_h);
- plane->fb = fb;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index e63dc0fb55f8..c79659ca5706 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -157,8 +157,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->qfprom_mmio = NULL;
}
- hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
- config->hpd_reg_cnt, GFP_KERNEL);
+ hdmi->hpd_regs = devm_kcalloc(&pdev->dev,
+ config->hpd_reg_cnt,
+ sizeof(hdmi->hpd_regs[0]),
+ GFP_KERNEL);
if (!hdmi->hpd_regs) {
ret = -ENOMEM;
goto fail;
@@ -178,8 +180,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->hpd_regs[i] = reg;
}
- hdmi->pwr_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_regs[0]) *
- config->pwr_reg_cnt, GFP_KERNEL);
+ hdmi->pwr_regs = devm_kcalloc(&pdev->dev,
+ config->pwr_reg_cnt,
+ sizeof(hdmi->pwr_regs[0]),
+ GFP_KERNEL);
if (!hdmi->pwr_regs) {
ret = -ENOMEM;
goto fail;
@@ -199,8 +203,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->pwr_regs[i] = reg;
}
- hdmi->hpd_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_clks[0]) *
- config->hpd_clk_cnt, GFP_KERNEL);
+ hdmi->hpd_clks = devm_kcalloc(&pdev->dev,
+ config->hpd_clk_cnt,
+ sizeof(hdmi->hpd_clks[0]),
+ GFP_KERNEL);
if (!hdmi->hpd_clks) {
ret = -ENOMEM;
goto fail;
@@ -219,8 +225,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->hpd_clks[i] = clk;
}
- hdmi->pwr_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_clks[0]) *
- config->pwr_clk_cnt, GFP_KERNEL);
+ hdmi->pwr_clks = devm_kcalloc(&pdev->dev,
+ config->pwr_clk_cnt,
+ sizeof(hdmi->pwr_clks[0]),
+ GFP_KERNEL);
if (!hdmi->pwr_clks) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 5e631392dc85..4157722d6b4d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -21,12 +21,12 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
struct device *dev = &phy->pdev->dev;
int i, ret;
- phy->regs = devm_kzalloc(dev, sizeof(phy->regs[0]) * cfg->num_regs,
+ phy->regs = devm_kcalloc(dev, cfg->num_regs, sizeof(phy->regs[0]),
GFP_KERNEL);
if (!phy->regs)
return -ENOMEM;
- phy->clks = devm_kzalloc(dev, sizeof(phy->clks[0]) * cfg->num_clks,
+ phy->clks = devm_kcalloc(dev, cfg->num_clks, sizeof(phy->clks[0]),
GFP_KERNEL);
if (!phy->clks)
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 7a16242bf8bf..2a7348aeb38d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -25,49 +26,20 @@
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
- struct drm_gem_object *planes[MAX_PLANE];
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return drm_gem_handle_create(file_priv,
- msm_fb->planes[0], handle);
-}
-
-static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = fb->format->num_planes;
-
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < n; i++) {
- struct drm_gem_object *bo = msm_fb->planes[i];
-
- drm_gem_object_put_unlocked(bo);
- }
-
- kfree(msm_fb);
-}
-
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
- .create_handle = msm_framebuffer_create_handle,
- .destroy = msm_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
@@ -77,7 +49,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
- msm_gem_describe(msm_fb->planes[i], m);
+ msm_gem_describe(fb->obj[i], m);
}
}
#endif
@@ -90,12 +62,11 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+ ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -107,26 +78,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], aspace);
+ msm_gem_put_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- if (!msm_fb->planes[plane])
+ if (!fb->obj[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+ return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return msm_fb->planes[plane];
+ return drm_gem_fb_get_obj(fb, plane);
}
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
@@ -202,7 +170,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
- if (n > ARRAY_SIZE(msm_fb->planes)) {
+ if (n > ARRAY_SIZE(fb->obj)) {
ret = -EINVAL;
goto fail;
}
@@ -221,7 +189,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
goto fail;
}
- msm_fb->planes[i] = bos[i];
+ msm_fb->base.obj[i] = bos[i];
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 090664899247..e721bb2163a0 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -141,7 +141,7 @@ nv84_fence_suspend(struct nouveau_drm *drm)
struct nv84_fence_priv *priv = drm->fence;
int i;
- priv->suspend = vmalloc(drm->chan.nr * sizeof(u32));
+ priv->suspend = vmalloc(array_size(sizeof(u32), drm->chan.nr));
if (priv->suspend) {
for (i = 0; i < drm->chan.nr; i++)
priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
diff --git a/drivers/gpu/drm/nouveau/nvif/fifo.c b/drivers/gpu/drm/nouveau/nvif/fifo.c
index 99d4fd17543c..e84a2e2ff043 100644
--- a/drivers/gpu/drm/nouveau/nvif/fifo.c
+++ b/drivers/gpu/drm/nouveau/nvif/fifo.c
@@ -50,8 +50,8 @@ nvif_fifo_runlists(struct nvif_device *device)
goto done;
device->runlists = fls64(a->v.runlists.data);
- device->runlist = kzalloc(sizeof(*device->runlist) *
- device->runlists, GFP_KERNEL);
+ device->runlist = kcalloc(device->runlists, sizeof(*device->runlist),
+ GFP_KERNEL);
if (!device->runlist) {
ret = -ENOMEM;
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
index 358ac4f3cf91..ae08a1ca8044 100644
--- a/drivers/gpu/drm/nouveau/nvif/mmu.c
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -65,12 +65,15 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
goto done;
mmu->mem = mems[ret].oclass;
- mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL);
- mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL);
+ mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap),
+ GFP_KERNEL);
+ mmu->type = kmalloc_array(mmu->type_nr, sizeof(*mmu->type),
+ GFP_KERNEL);
if (ret = -ENOMEM, !mmu->heap || !mmu->type)
goto done;
- mmu->kind = kmalloc(sizeof(*mmu->kind) * mmu->kind_nr, GFP_KERNEL);
+ mmu->kind = kmalloc_array(mmu->kind_nr, sizeof(*mmu->kind),
+ GFP_KERNEL);
if (!mmu->kind && mmu->kind_nr)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 40adfe9b334b..ef3f62840e83 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -83,7 +83,7 @@ nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
return ret;
}
- *psclass = kzalloc(sizeof(**psclass) * args->sclass.count, GFP_KERNEL);
+ *psclass = kcalloc(args->sclass.count, sizeof(**psclass), GFP_KERNEL);
if (*psclass) {
for (i = 0; i < args->sclass.count; i++) {
(*psclass)[i].oclass = args->sclass.oclass[i].oclass;
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
index 191832be6c65..6b9c5776547f 100644
--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -138,7 +138,8 @@ nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, u64 addr, u64 size,
vmm->limit = args->size;
vmm->page_nr = args->page_nr;
- vmm->page = kmalloc(sizeof(*vmm->page) * vmm->page_nr, GFP_KERNEL);
+ vmm->page = kmalloc_array(vmm->page_nr, sizeof(*vmm->page),
+ GFP_KERNEL);
if (!vmm->page) {
ret = -ENOMEM;
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/event.c b/drivers/gpu/drm/nouveau/nvkm/core/event.c
index 4e8d3fa042df..006618d77aa4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/event.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/event.c
@@ -84,7 +84,8 @@ int
nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr,
struct nvkm_event *event)
{
- event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr,
+ event->refs = kzalloc(array3_size(index_nr, types_nr,
+ sizeof(*event->refs)),
GFP_KERNEL);
if (!event->refs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index ccba4ae73cc5..8162e3d2359c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -144,8 +144,7 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
struct nvkm_ramht *ramht;
int ret, i;
- if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
- (size >> 3) * sizeof(*ramht->data))))
+ if (!(ramht = *pramht = vzalloc(struct_size(ramht, data, (size >> 3)))))
return -ENOMEM;
ramht->device = device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index a99046414a18..afccf9721cf0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -910,7 +910,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
/* Read PBDMA->runlist(s) mapping from HW. */
- if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL)))
+ if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL)))
return -ENOMEM;
for (i = 0; i < fifo->pbdma_nr; i++)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 53859b6254d6..b2785bee418e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -779,8 +779,8 @@ nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
sdom = spec;
while (sdom->signal_nr) {
- dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
- sizeof(*dom->signal), GFP_KERNEL);
+ dom = kzalloc(struct_size(dom, signal, sdom->signal_nr),
+ GFP_KERNEL);
if (!dom)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
index 73e463ed55c3..dea444d48f94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -73,7 +73,8 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
}
iccsense->nr_entry = cnt;
- iccsense->rail = kmalloc(sizeof(struct pwr_rail_t) * cnt, GFP_KERNEL);
+ iccsense->rail = kmalloc_array(cnt, sizeof(struct pwr_rail_t),
+ GFP_KERNEL);
if (!iccsense->rail)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index 920b3d347803..bbfde1cb3a17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -171,7 +171,7 @@ gt215_link_train(struct gt215_ram *ram)
return -ENOSYS;
/* XXX: Multiple partitions? */
- result = kmalloc(64 * sizeof(u32), GFP_KERNEL);
+ result = kmalloc_array(64, sizeof(u32), GFP_KERNEL);
if (!result)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
index 39808489f21d..92e363dbbc5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
@@ -191,9 +191,9 @@ nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
- if (!(mem->mem = kvmalloc(sizeof(*mem->mem) * size, GFP_KERNEL)))
+ if (!(mem->mem = kvmalloc_array(size, sizeof(*mem->mem), GFP_KERNEL)))
return -ENOMEM;
- if (!(mem->dma = kvmalloc(sizeof(*mem->dma) * size, GFP_KERNEL)))
+ if (!(mem->dma = kvmalloc_array(size, sizeof(*mem->dma), GFP_KERNEL)))
return -ENOMEM;
if (mmu->dma_bits > 32)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 1c12e58f44c2..de269eb482dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -59,7 +59,7 @@ nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
pgt->sparse = sparse;
if (desc->type == PGD) {
- pgt->pde = kvzalloc(sizeof(*pgt->pde) * pten, GFP_KERNEL);
+ pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL);
if (!pgt->pde) {
kfree(pgt);
return NULL;
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 68a40ae26f5b..1e2c931f6acf 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -82,7 +82,7 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
struct dispc_clock_info *dispc_cinfo)
{
int i;
- struct sdi_clk_calc_ctx ctx = { .sdi = sdi };
+ struct sdi_clk_calc_ctx ctx;
/*
* DSS fclk gives us very few possibilities, so finding a good pixel
@@ -95,6 +95,9 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
bool ok;
memset(&ctx, 0, sizeof(ctx));
+
+ ctx.sdi = sdi;
+
if (pclk > 1000 * i * i * i)
ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu);
else
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 401c02e9e6b2..f92fe205550b 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -940,8 +940,8 @@ int tiler_map_show(struct seq_file *s, void *arg)
h_adj = omap_dmm->container_height / ydiv;
w_adj = omap_dmm->container_width / xdiv;
- map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
- global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
+ map = kmalloc_array(h_adj, sizeof(*map), GFP_KERNEL);
+ global_map = kmalloc_array(w_adj + 1, h_adj, GFP_KERNEL);
if (!map || !global_map)
goto error;
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 5fd22ca73913..9f1e3d8f8488 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -51,9 +52,6 @@ static const u32 formats[] = {
/* per-plane info for the fb: */
struct plane {
- struct drm_gem_object *bo;
- u32 pitch;
- u32 offset;
dma_addr_t dma_addr;
};
@@ -68,56 +66,28 @@ struct omap_framebuffer {
struct mutex lock;
};
-static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- return drm_gem_handle_create(file_priv,
- omap_fb->planes[0].bo, handle);
-}
-
-static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = fb->format->num_planes;
-
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
-
- drm_gem_object_unreference_unlocked(plane->bo);
- }
-
- kfree(omap_fb);
-}
-
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
- .create_handle = omap_framebuffer_create_handle,
- .destroy = omap_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
-static u32 get_linear_addr(struct plane *plane,
+static u32 get_linear_addr(struct drm_framebuffer *fb,
const struct drm_format_info *format, int n, int x, int y)
{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ struct plane *plane = &omap_fb->planes[n];
u32 offset;
- offset = plane->offset
+ offset = fb->offsets[n]
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
- + (y * plane->pitch / (n == 0 ? 1 : format->vsub));
+ + (y * fb->pitches[n] / (n == 0 ? 1 : format->vsub));
return plane->dma_addr + offset;
}
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- struct plane *plane = &omap_fb->planes[0];
-
- return omap_gem_flags(plane->bo) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -176,7 +146,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@@ -201,12 +171,12 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x += w - 1;
/* Note: x and y are in TILER units, not pixels */
- omap_gem_rotated_dma_addr(plane->bo, orient, x, y,
+ omap_gem_rotated_dma_addr(fb->obj[0], orient, x, y,
&info->paddr);
info->rotation_type = OMAP_DSS_ROT_TILER;
info->rotation = state->rotation ?: DRM_MODE_ROTATE_0;
/* Note: stride in TILER units, not pixels */
- info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
+ info->screen_width = omap_gem_tiled_stride(fb->obj[0], orient);
} else {
switch (state->rotation & DRM_MODE_ROTATE_MASK) {
case 0:
@@ -221,10 +191,10 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
break;
}
- info->paddr = get_linear_addr(plane, format, 0, x, y);
+ info->paddr = get_linear_addr(fb, format, 0, x, y);
info->rotation_type = OMAP_DSS_ROT_NONE;
info->rotation = DRM_MODE_ROTATE_0;
- info->screen_width = plane->pitch;
+ info->screen_width = fb->pitches[0];
}
/* convert to pixels: */
@@ -234,11 +204,11 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
plane = &omap_fb->planes[1];
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
- omap_gem_rotated_dma_addr(plane->bo, orient, x/2, y/2,
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
- info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
+ info->p_uv_addr = get_linear_addr(fb, format, 1, x, y);
}
} else {
info->p_uv_addr = 0;
@@ -261,10 +231,10 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_pin(plane->bo, &plane->dma_addr);
+ ret = omap_gem_pin(fb->obj[i], &plane->dma_addr);
if (ret)
goto fail;
- omap_gem_dma_sync_buffer(plane->bo, DMA_TO_DEVICE);
+ omap_gem_dma_sync_buffer(fb->obj[i], DMA_TO_DEVICE);
}
omap_fb->pin_count++;
@@ -276,7 +246,7 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
fail:
for (i--; i >= 0; i--) {
struct plane *plane = &omap_fb->planes[i];
- omap_gem_unpin(plane->bo);
+ omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
@@ -302,54 +272,25 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- omap_gem_unpin(plane->bo);
+ omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
}
-/* iterate thru all the connectors, returning ones that are attached
- * to the same fb..
- */
-struct drm_connector *omap_framebuffer_get_next_connector(
- struct drm_framebuffer *fb, struct drm_connector *from)
-{
- struct drm_device *dev = fb->dev;
- struct list_head *connector_list = &dev->mode_config.connector_list;
- struct drm_connector *connector = from;
-
- if (!from)
- return list_first_entry_or_null(connector_list, typeof(*from),
- head);
-
- list_for_each_entry_from(connector, connector_list, head) {
- if (connector != from) {
- struct drm_encoder *encoder = connector->encoder;
- struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
- if (crtc && crtc->primary->fb == fb)
- return connector;
-
- }
- }
-
- return NULL;
-}
-
#ifdef CONFIG_DEBUG_FS
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->format->format);
for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
- i, plane->offset, plane->pitch);
- omap_gem_describe(plane->bo, m);
+ i, fb->offsets[n], fb->pitches[i]);
+ omap_gem_describe(fb->obj[i], m);
}
}
#endif
@@ -454,9 +395,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
goto fail;
}
- plane->bo = bos[i];
- plane->offset = mode_cmd->offsets[i];
- plane->pitch = pitch;
+ fb->obj[i] = bos[i];
plane->dma_addr = 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index 94ad5f9e4404..c20cb4bc714d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -38,8 +38,6 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct drm_plane_state *state, struct omap_overlay_info *info);
-struct drm_connector *omap_framebuffer_get_next_connector(
- struct drm_framebuffer *fb, struct drm_connector *from);
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 0faf042b82e1..17a53d207978 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -244,7 +244,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
* DSS, GPU, etc. are not cache coherent:
*/
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
- addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
+ addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
ret = -ENOMEM;
goto free_pages;
@@ -268,7 +268,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
}
}
} else {
- addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
+ addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
ret = -ENOMEM;
goto free_pages;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 8e41d649e248..1a073f9b2834 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -148,8 +148,6 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .map_atomic = omap_gem_dmabuf_kmap_atomic,
- .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
.map = omap_gem_dmabuf_kmap,
.unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 57df39b5c589..bb53e0850764 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -292,7 +292,6 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
err);
- drm_panel_detach(&innolux->base);
innolux_panel_del(innolux);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 0a94ab79a6c0..99caa7835e7b 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -500,7 +500,6 @@ static int jdi_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
ret);
- drm_panel_detach(&jdi->base);
jdi_panel_del(jdi);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 5185819c5b79..8a1687887ae9 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -282,7 +282,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
{
struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev);
- drm_panel_detach(&lvds->panel);
drm_panel_remove(&lvds->panel);
panel_lvds_disable(&lvds->panel);
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 90f1ae4af93c..87fa316e1d7b 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -14,8 +14,6 @@
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
-#define DRV_NAME "orisetech_otm8009a"
-
#define OTM8009A_BACKLIGHT_DEFAULT 240
#define OTM8009A_BACKLIGHT_MAX 255
@@ -98,6 +96,20 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
DRM_WARN("mipi dsi dcs write buffer failed\n");
}
+static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
+ size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* data will be sent in dsi hs mode (ie. no lpm) */
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ otm8009a_dcs_write_buf(ctx, data, len);
+
+ /* restore back the dsi lpm mode */
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+}
+
#define dcs_write_seq(ctx, seq...) \
({ \
static const u8 d[] = { seq }; \
@@ -248,11 +260,7 @@ static int otm8009a_disable(struct drm_panel *panel)
if (!ctx->enabled)
return 0; /* This is not an issue so we return 0 here */
- /* Power off the backlight. Note: end-user still controls brightness */
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
- ret = backlight_update_status(ctx->bl_dev);
- if (ret)
- return ret;
+ backlight_disable(ctx->bl_dev);
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret)
@@ -316,13 +324,6 @@ static int otm8009a_prepare(struct drm_panel *panel)
ctx->prepared = true;
- /*
- * Power on the backlight. Note: end-user still controls brightness
- * Note: ctx->prepared must be true before updating the backlight.
- */
- ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(ctx->bl_dev);
-
return 0;
}
@@ -330,6 +331,11 @@ static int otm8009a_enable(struct drm_panel *panel)
{
struct otm8009a *ctx = panel_to_otm8009a(panel);
+ if (ctx->enabled)
+ return 0;
+
+ backlight_enable(ctx->bl_dev);
+
ctx->enabled = true;
return 0;
@@ -387,7 +393,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
*/
data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
data[1] = bd->props.brightness;
- otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
/* set Brightness Control & Backlight on */
data[1] = 0x24;
@@ -399,7 +405,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
/* Update Brightness Control & Backlight */
data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
- otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
return 0;
}
@@ -444,11 +450,14 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->panel.dev = dev;
ctx->panel.funcs = &otm8009a_drm_funcs;
- ctx->bl_dev = backlight_device_register(DRV_NAME "_backlight", dev, ctx,
- &otm8009a_backlight_ops, NULL);
+ ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
+ dsi->host->dev, ctx,
+ &otm8009a_backlight_ops,
+ NULL);
if (IS_ERR(ctx->bl_dev)) {
- dev_err(dev, "failed to register backlight device\n");
- return PTR_ERR(ctx->bl_dev);
+ ret = PTR_ERR(ctx->bl_dev);
+ dev_err(dev, "failed to register backlight: %d\n", ret);
+ return ret;
}
ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
@@ -466,11 +475,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
return ret;
}
- DRM_INFO(DRV_NAME "_panel %ux%u@%u %ubpp dsi %udl - ready\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh,
- mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
-
return 0;
}
@@ -481,8 +485,6 @@ static int otm8009a_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
- backlight_device_unregister(ctx->bl_dev);
-
return 0;
}
@@ -496,7 +498,7 @@ static struct mipi_dsi_driver orisetech_otm8009a_driver = {
.probe = otm8009a_probe,
.remove = otm8009a_remove,
.driver = {
- .name = DRV_NAME "_panel",
+ .name = "panel-orisetech-otm8009a",
.of_match_table = orisetech_otm8009a_of_match,
},
};
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 74a806121f80..cb4dfb98be0f 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -299,7 +299,6 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
- drm_panel_detach(&wuxga_nt->base);
wuxga_nt_panel_del(wuxga_nt);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 71c09ed436ae..75f925390551 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -292,7 +292,6 @@ static int seiko_panel_remove(struct platform_device *pdev)
{
struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
- drm_panel_detach(&panel->base);
drm_panel_remove(&panel->base);
seiko_panel_disable(&panel->base);
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index 6bf8730f1a21..02fc0f5423d4 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -418,7 +418,6 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
- drm_panel_detach(&sharp->base);
sharp_panel_del(sharp);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 494aa9b1628a..e5cae0050f52 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -327,7 +327,6 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
- drm_panel_detach(&sharp_nt->base);
sharp_nt_panel_del(sharp_nt);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index cbf1ab404ee7..ac6aaa174c0b 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -364,7 +364,6 @@ static int panel_simple_remove(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
- drm_panel_detach(&panel->base);
drm_panel_remove(&panel->base);
panel_simple_disable(&panel->base);
@@ -581,6 +580,34 @@ static const struct panel_desc auo_b133htn01 = {
},
};
+static const struct display_timing auo_g070vvn01_timings = {
+ .pixelclock = { 33300000, 34209000, 45000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 20, 40, 200 },
+ .hback_porch = { 87, 40, 1 },
+ .hsync_len = { 1, 48, 87 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 5, 13, 200 },
+ .vback_porch = { 31, 31, 29 },
+ .vsync_len = { 1, 1, 3 },
+};
+
+static const struct panel_desc auo_g070vvn01 = {
+ .timings = &auo_g070vvn01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .delay = {
+ .prepare = 200,
+ .enable = 50,
+ .disable = 50,
+ .unprepare = 1000,
+ },
+};
+
static const struct drm_display_mode auo_g104sn02_mode = {
.clock = 40000,
.hdisplay = 800,
@@ -687,7 +714,7 @@ static const struct panel_desc auo_p320hvn03 = {
.enable = 450,
.unprepare = 500,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -1217,6 +1244,30 @@ static const struct panel_desc innolux_n156bge_l21 = {
},
};
+static const struct drm_display_mode innolux_tv123wam_mode = {
+ .clock = 206016,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 80,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 10,
+ .vtotal = 1440 + 3 + 10 + 27,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc innolux_tv123wam = {
+ .modes = &innolux_tv123wam_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 259,
+ .height = 173,
+ },
+};
+
static const struct drm_display_mode innolux_zj070na_01p_mode = {
.clock = 51501,
.hdisplay = 1024,
@@ -1247,8 +1298,8 @@ static const struct display_timing koe_tx31d200vm0baa_timing = {
.hback_porch = { 16, 36, 56 },
.hsync_len = { 8, 8, 8 },
.vactive = { 480, 480, 480 },
- .vfront_porch = { 6, 21, 33.5 },
- .vback_porch = { 6, 21, 33.5 },
+ .vfront_porch = { 6, 21, 33 },
+ .vback_porch = { 6, 21, 33 },
.vsync_len = { 8, 8, 8 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
@@ -2095,6 +2146,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
+ .compatible = "auo,g070vvn01",
+ .data = &auo_g070vvn01,
+ }, {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
@@ -2170,6 +2224,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
+ .compatible = "innolux,tv123wam",
+ .data = &innolux_tv123wam,
+ }, {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 358c64ef1922..74284e5afc5d 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -419,7 +419,6 @@ static int st7789v_remove(struct spi_device *spi)
{
struct st7789v *ctx = spi_get_drvdata(spi);
- drm_panel_detach(&ctx->panel);
drm_panel_remove(&ctx->panel);
if (ctx->backlight)
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 9a6752606079..ca465c0d49fa 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -241,7 +241,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
DRM_DEBUG_DRIVER("%dx%d %d\n", mode_cmd.width,
mode_cmd.height, mode_cmd.pitches[0]);
- shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
+ shadow = vmalloc(array_size(mode_cmd.pitches[0], mode_cmd.height));
/* TODO: what's the usual response to memory allocation errors? */
BUG_ON(!shadow);
DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index c5716a0ca3b8..771250aed78d 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -200,8 +200,8 @@ int qxl_device_init(struct qxl_device *qdev,
(~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
qdev->mem_slots =
- kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
- GFP_KERNEL);
+ kmalloc_array(qdev->n_mem_slots, sizeof(struct qxl_memslot),
+ GFP_KERNEL);
idr_init(&qdev->release_idr);
spin_lock_init(&qdev->release_idr_lock);
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 6a2e091aa7b6..e55cbeee7a53 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1176,7 +1176,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
ectx.abort = false;
ectx.last_jump = 0;
if (ws)
- ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
+ ectx.ws = kcalloc(4, ws, GFP_KERNEL);
else
ectx.ws = NULL;
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 95652e643da1..0aef4937c901 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2581,7 +2581,9 @@ int btc_dpm_init(struct radeon_device *rdev)
return ret;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+ kcalloc(4,
+ sizeof(struct radeon_clock_voltage_dependency_entry),
+ GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
r600_free_extended_power_table(rdev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 7e1b04dc5593..b9302c918271 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5568,8 +5568,9 @@ static int ci_parse_power_table(struct radeon_device *rdev)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
- rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
- state_array->ucNumEntries, GFP_KERNEL);
+ rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+ sizeof(struct radeon_ps),
+ GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
@@ -5770,7 +5771,9 @@ int ci_dpm_init(struct radeon_device *rdev)
ci_set_private_data_variables_based_on_pptable(rdev);
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+ kcalloc(4,
+ sizeof(struct radeon_clock_voltage_dependency_entry),
+ GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
ci_dpm_fini(rdev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index ae1529b0ef6f..f055d6ea3522 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2660,8 +2660,9 @@ static int kv_parse_power_table(struct radeon_device *rdev)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
- rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
- state_array->ucNumEntries, GFP_KERNEL);
+ rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+ sizeof(struct radeon_ps),
+ GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 9416e72f86aa..0fd8d6ba9828 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3998,8 +3998,9 @@ static int ni_parse_power_table(struct radeon_device *rdev)
return -EINVAL;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
- rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
- power_info->pplib.ucNumStates, GFP_KERNEL);
+ rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+ sizeof(struct radeon_ps),
+ GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
@@ -4075,7 +4076,9 @@ int ni_dpm_init(struct radeon_device *rdev)
return ret;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+ kcalloc(4,
+ sizeof(struct radeon_clock_voltage_dependency_entry),
+ GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
r600_free_extended_power_table(rdev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 31d1b4710844..73d4c5348116 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -991,7 +991,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
- kzalloc(psl->ucNumEntries *
+ kcalloc(psl->ucNumEntries,
sizeof(struct radeon_phase_shedding_limits_entry),
GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 4134759a6823..f422a8d6aec4 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2126,13 +2126,16 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
if (num_modes == 0)
return state_index;
- rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+ rdev->pm.power_state = kcalloc(num_modes,
+ sizeof(struct radeon_power_state),
+ GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
rdev->pm.power_state[state_index].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ kcalloc(1, sizeof(struct radeon_pm_clock_info),
+ GFP_KERNEL);
if (!rdev->pm.power_state[state_index].clock_info)
return state_index;
rdev->pm.power_state[state_index].num_clock_modes = 1;
@@ -2587,8 +2590,9 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
if (power_info->pplib.ucNumStates == 0)
return state_index;
- rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
- power_info->pplib.ucNumStates, GFP_KERNEL);
+ rdev->pm.power_state = kcalloc(power_info->pplib.ucNumStates,
+ sizeof(struct radeon_power_state),
+ GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
/* first mode is usually default, followed by low to high */
@@ -2603,10 +2607,11 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
- rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
- ((power_info->pplib.ucStateEntrySize - 1) ?
- (power_info->pplib.ucStateEntrySize - 1) : 1),
- GFP_KERNEL);
+ rdev->pm.power_state[i].clock_info =
+ kcalloc((power_info->pplib.ucStateEntrySize - 1) ?
+ (power_info->pplib.ucStateEntrySize - 1) : 1,
+ sizeof(struct radeon_pm_clock_info),
+ GFP_KERNEL);
if (!rdev->pm.power_state[i].clock_info)
return state_index;
if (power_info->pplib.ucStateEntrySize - 1) {
@@ -2688,8 +2693,9 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
if (state_array->ucNumEntries == 0)
return state_index;
- rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
- state_array->ucNumEntries, GFP_KERNEL);
+ rdev->pm.power_state = kcalloc(state_array->ucNumEntries,
+ sizeof(struct radeon_power_state),
+ GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
power_state_offset = (u8 *)state_array->states;
@@ -2699,10 +2705,11 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
- (power_state->v2.ucNumDPMLevels ?
- power_state->v2.ucNumDPMLevels : 1),
- GFP_KERNEL);
+ rdev->pm.power_state[i].clock_info =
+ kcalloc(power_state->v2.ucNumDPMLevels ?
+ power_state->v2.ucNumDPMLevels : 1,
+ sizeof(struct radeon_pm_clock_info),
+ GFP_KERNEL);
if (!rdev->pm.power_state[i].clock_info)
return state_index;
if (power_state->v2.ucNumDPMLevels) {
@@ -2782,7 +2789,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
rdev->pm.power_state[0].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ kcalloc(1,
+ sizeof(struct radeon_pm_clock_info),
+ GFP_KERNEL);
if (rdev->pm.power_state[0].clock_info) {
/* add the default mode */
rdev->pm.power_state[state_index].type =
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3178ba0c537c..60a61d33f607 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2642,13 +2642,16 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
rdev->pm.default_power_state_index = -1;
/* allocate 2 power states */
- rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
+ rdev->pm.power_state = kcalloc(2, sizeof(struct radeon_power_state),
+ GFP_KERNEL);
if (rdev->pm.power_state) {
/* allocate 1 clock mode per state */
rdev->pm.power_state[0].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ kcalloc(1, sizeof(struct radeon_pm_clock_info),
+ GFP_KERNEL);
rdev->pm.power_state[1].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ kcalloc(1, sizeof(struct radeon_pm_clock_info),
+ GFP_KERNEL);
if (!rdev->pm.power_state[0].clock_info ||
!rdev->pm.power_state[1].clock_info)
goto pm_failed;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 0b3ec35515f3..1cef155cc933 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -347,13 +347,14 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
- rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
+ rdev->gart.pages = vzalloc(array_size(sizeof(void *),
+ rdev->gart.num_cpu_pages));
if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
- rdev->gart.num_gpu_pages);
+ rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
+ rdev->gart.num_gpu_pages));
if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f5e9abfadb56..48f4b273e316 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -59,7 +59,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
n = rdev->mc.gtt_size - rdev->gart_pin_size;
n /= size;
- gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
+ gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL);
if (!gtt_obj) {
DRM_ERROR("Failed to allocate %d pointers\n", n);
r = 1;
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index b5e4e09a8996..694b7b3e9799 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -804,8 +804,9 @@ static int rs780_parse_power_table(struct radeon_device *rdev)
return -EINVAL;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
- rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
- power_info->pplib.ucNumStates, GFP_KERNEL);
+ rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+ sizeof(struct radeon_ps),
+ GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index d91aa3944593..6986051fbb89 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1888,8 +1888,9 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
return -EINVAL;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
- rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
- power_info->pplib.ucNumStates, GFP_KERNEL);
+ rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+ sizeof(struct radeon_ps),
+ GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index cb2a7ec4e217..c765ae7ea806 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2282,8 +2282,9 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
return -EINVAL;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
- rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
- power_info->pplib.ucNumStates, GFP_KERNEL);
+ rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+ sizeof(struct radeon_ps),
+ GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 90d5b41007bf..fea88078cf8e 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6832,8 +6832,9 @@ static int si_parse_power_table(struct radeon_device *rdev)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
- rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
- state_array->ucNumEntries, GFP_KERNEL);
+ rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+ sizeof(struct radeon_ps),
+ GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
@@ -6941,7 +6942,9 @@ int si_dpm_init(struct radeon_device *rdev)
return ret;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+ kcalloc(4,
+ sizeof(struct radeon_clock_voltage_dependency_entry),
+ GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
r600_free_extended_power_table(rdev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index fd4804829e46..1e4975f3374c 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1482,8 +1482,9 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
- rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
- state_array->ucNumEntries, GFP_KERNEL);
+ rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+ sizeof(struct radeon_ps),
+ GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2ef7c4e5e495..5d317f763eea 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1757,8 +1757,9 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
- rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
- state_array->ucNumEntries, GFP_KERNEL);
+ rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+ sizeof(struct radeon_ps),
+ GFP_KERNEL);
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 3e58ed93d5b1..2a3b8d7972b5 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -17,3 +17,10 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
+
+# 'remote-endpoint' is fixed up at run-time
+DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7791 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7793 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7795 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7796 += -Wno-graph_endpoint
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index f2a0bd1e5119..15dc9caa128b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -691,6 +691,52 @@ static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
.atomic_disable = rcar_du_crtc_atomic_disable,
};
+static struct drm_crtc_state *
+rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct rcar_du_crtc_state *state;
+ struct rcar_du_crtc_state *copy;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = to_rcar_crtc_state(crtc->state);
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (copy == NULL)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &copy->state);
+
+ return &copy->state;
+}
+
+static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_rcar_crtc_state(state));
+}
+
+static void rcar_du_crtc_reset(struct drm_crtc *crtc)
+{
+ struct rcar_du_crtc_state *state;
+
+ if (crtc->state) {
+ rcar_du_crtc_atomic_destroy_state(crtc, crtc->state);
+ crtc->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return;
+
+ state->crc.source = VSP1_DU_CRC_NONE;
+ state->crc.index = 0;
+
+ crtc->state = &state->state;
+ crtc->state->crtc = crtc;
+}
+
static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -710,15 +756,111 @@ static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
rcrtc->vblank_enable = false;
}
-static const struct drm_crtc_funcs crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
+static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
+ const char *source_name,
+ size_t *values_cnt)
+{
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ enum vsp1_du_crc_source source;
+ unsigned int index = 0;
+ unsigned int i;
+ int ret;
+
+ /*
+ * Parse the source name. Supported values are "plane%u" to compute the
+ * CRC on an input plane (%u is the plane ID), and "auto" to compute the
+ * CRC on the composer (VSP) output.
+ */
+ if (!source_name) {
+ source = VSP1_DU_CRC_NONE;
+ } else if (!strcmp(source_name, "auto")) {
+ source = VSP1_DU_CRC_OUTPUT;
+ } else if (strstarts(source_name, "plane")) {
+ source = VSP1_DU_CRC_PLANE;
+
+ ret = kstrtouint(source_name + strlen("plane"), 10, &index);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
+ if (index == rcrtc->vsp->planes[i].plane.base.id) {
+ index = i;
+ break;
+ }
+ }
+
+ if (i >= rcrtc->vsp->num_planes)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ *values_cnt = 1;
+
+ /* Perform an atomic commit to set the CRC source. */
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ state->acquire_ctx = &ctx;
+
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (!IS_ERR(crtc_state)) {
+ struct rcar_du_crtc_state *rcrtc_state;
+
+ rcrtc_state = to_rcar_crtc_state(crtc_state);
+ rcrtc_state->crc.source = source;
+ rcrtc_state->crc.index = index;
+
+ ret = drm_atomic_commit(state);
+ } else {
+ ret = PTR_ERR(crtc_state);
+ }
+
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_atomic_state_put(state);
+
+unlock:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs crtc_funcs_gen2 = {
+ .reset = rcar_du_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
+ .enable_vblank = rcar_du_crtc_enable_vblank,
+ .disable_vblank = rcar_du_crtc_disable_vblank,
+};
+
+static const struct drm_crtc_funcs crtc_funcs_gen3 = {
+ .reset = rcar_du_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
.enable_vblank = rcar_du_crtc_enable_vblank,
.disable_vblank = rcar_du_crtc_disable_vblank,
+ .set_crc_source = rcar_du_crtc_set_crc_source,
};
/* -----------------------------------------------------------------------------
@@ -822,8 +964,10 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
else
primary = &rgrp->planes[swindex % 2].plane;
- ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary,
- NULL, &crtc_funcs, NULL);
+ ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL,
+ rcdu->info->gen <= 2 ?
+ &crtc_funcs_gen2 : &crtc_funcs_gen3,
+ NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 84b5e23a85b1..7680cb2636c8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -21,6 +21,8 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <media/vsp1.h>
+
struct rcar_du_group;
struct rcar_du_vsp;
@@ -69,6 +71,19 @@ struct rcar_du_crtc {
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+/**
+ * struct rcar_du_crtc_state - Driver-specific CRTC state
+ * @state: base DRM CRTC state
+ * @crc: CRC computation configuration
+ */
+struct rcar_du_crtc_state {
+ struct drm_crtc_state state;
+
+ struct vsp1_du_crc_config crc;
+};
+
+#define to_rcar_crtc_state(s) container_of(s, struct rcar_du_crtc_state, state)
+
enum rcar_du_output {
RCAR_DU_OUTPUT_DPAD0,
RCAR_DU_OUTPUT_DPAD1,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index c59f0cfabd33..72eebeda518e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -32,7 +32,7 @@
#include "rcar_du_kms.h"
#include "rcar_du_vsp.h"
-static void rcar_du_vsp_complete(void *private, bool completed)
+static void rcar_du_vsp_complete(void *private, bool completed, u32 crc)
{
struct rcar_du_crtc *crtc = private;
@@ -41,6 +41,8 @@ static void rcar_du_vsp_complete(void *private, bool completed)
if (completed)
rcar_du_crtc_finish_page_flip(crtc);
+
+ drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc);
}
void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
@@ -103,7 +105,13 @@ void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc)
{
- vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe);
+ struct vsp1_du_atomic_pipe_config cfg = { { 0, } };
+ struct rcar_du_crtc_state *state;
+
+ state = to_rcar_crtc_state(crtc->crtc.state);
+ cfg.crc = state->crc;
+
+ vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
/* Keep the two tables in sync. */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 3d2d3bbd1342..155ad840f3c5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -88,6 +88,9 @@ static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
const struct drm_display_mode *panel_mode;
struct drm_crtc_state *crtc_state;
+ if (!state->crtc)
+ return 0;
+
if (list_empty(&connector->modes)) {
dev_dbg(lvds->dev, "connector: empty modes list\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index eb3042c6d1b2..3105965fc260 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -792,7 +792,6 @@ err_config_video:
int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
{
- u32 val;
int ret;
ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
@@ -801,11 +800,7 @@ int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
return ret;
}
- val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
- writel(val, dp->regs + SPDIF_CTRL_ADDR);
+ writel(0, dp->regs + SPDIF_CTRL_ADDR);
/* clearn the audio config and reset */
writel(0, dp->regs + AUDIO_SRC_CNTL);
@@ -929,12 +924,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
{
u32 val;
- val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
- writel(val, dp->regs + SPDIF_CTRL_ADDR);
-
writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
@@ -942,9 +931,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
writel(val, dp->regs + SPDIF_CTRL_ADDR);
clk_prepare_enable(dp->spdif_clk);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index d4f4118b482d..ea18cb2a76c0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -18,52 +18,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_psr.h"
-#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
-
-struct rockchip_drm_fb {
- struct drm_framebuffer fb;
- struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
-};
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane)
-{
- struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
-
- if (plane >= ROCKCHIP_MAX_FB_BUFFER)
- return NULL;
-
- return rk_fb->obj[plane];
-}
-
-static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
-{
- struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
- int i;
-
- for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
- drm_gem_object_put_unlocked(rockchip_fb->obj[i]);
-
- drm_framebuffer_cleanup(fb);
- kfree(rockchip_fb);
-}
-
-static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
-
- return drm_gem_handle_create(file_priv,
- rockchip_fb->obj[0], handle);
-}
-
static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int flags, unsigned int color,
@@ -75,46 +36,45 @@ static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
}
static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
- .destroy = rockchip_drm_fb_destroy,
- .create_handle = rockchip_drm_fb_create_handle,
- .dirty = rockchip_drm_fb_dirty,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = rockchip_drm_fb_dirty,
};
-static struct rockchip_drm_fb *
+static struct drm_framebuffer *
rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
int ret;
int i;
- rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
- if (!rockchip_fb)
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(dev, &rockchip_fb->fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
for (i = 0; i < num_planes; i++)
- rockchip_fb->obj[i] = obj[i];
+ fb->obj[i] = obj[i];
- ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
- &rockchip_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, fb, &rockchip_drm_fb_funcs);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"Failed to initialize framebuffer: %d\n",
ret);
- kfree(rockchip_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return rockchip_fb;
+ return fb;
}
static struct drm_framebuffer *
rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
struct drm_gem_object *obj;
unsigned int hsub;
@@ -153,13 +113,13 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
objs[i] = obj;
}
- rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
- if (IS_ERR(rockchip_fb)) {
- ret = PTR_ERR(rockchip_fb);
+ fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto err_gem_object_unreference;
}
- return &rockchip_fb->fb;
+ return fb;
err_gem_object_unreference:
for (i--; i >= 0; i--)
@@ -242,13 +202,13 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
- rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
- if (IS_ERR(rockchip_fb))
- return ERR_CAST(rockchip_fb);
+ fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
+ if (IS_ERR(fb))
+ return ERR_CAST(fb);
- return &rockchip_fb->fb;
+ return fb;
}
void rockchip_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
index 2fe47f1ee98f..f1265cb1aee8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -22,7 +22,4 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
void rockchip_drm_mode_config_init(struct drm_device *dev);
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane);
#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 2121345a61af..c9222119767d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -486,6 +486,31 @@ static void vop_line_flag_irq_disable(struct vop *vop)
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
+static int vop_core_clks_enable(struct vop *vop)
+{
+ int ret;
+
+ ret = clk_enable(vop->hclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(vop->aclk);
+ if (ret < 0)
+ goto err_disable_hclk;
+
+ return 0;
+
+err_disable_hclk:
+ clk_disable(vop->hclk);
+ return ret;
+}
+
+static void vop_core_clks_disable(struct vop *vop)
+{
+ clk_disable(vop->aclk);
+ clk_disable(vop->hclk);
+}
+
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
@@ -497,17 +522,13 @@ static int vop_enable(struct drm_crtc *crtc)
return ret;
}
- ret = clk_enable(vop->hclk);
+ ret = vop_core_clks_enable(vop);
if (WARN_ON(ret < 0))
goto err_put_pm_runtime;
ret = clk_enable(vop->dclk);
if (WARN_ON(ret < 0))
- goto err_disable_hclk;
-
- ret = clk_enable(vop->aclk);
- if (WARN_ON(ret < 0))
- goto err_disable_dclk;
+ goto err_disable_core;
/*
* Slave iommu shares power, irq and clock with vop. It was associated
@@ -519,7 +540,7 @@ static int vop_enable(struct drm_crtc *crtc)
if (ret) {
DRM_DEV_ERROR(vop->dev,
"failed to attach dma mapping, %d\n", ret);
- goto err_disable_aclk;
+ goto err_disable_dclk;
}
spin_lock(&vop->reg_lock);
@@ -552,18 +573,14 @@ static int vop_enable(struct drm_crtc *crtc)
spin_unlock(&vop->reg_lock);
- enable_irq(vop->irq);
-
drm_crtc_vblank_on(crtc);
return 0;
-err_disable_aclk:
- clk_disable(vop->aclk);
err_disable_dclk:
clk_disable(vop->dclk);
-err_disable_hclk:
- clk_disable(vop->hclk);
+err_disable_core:
+ vop_core_clks_disable(vop);
err_put_pm_runtime:
pm_runtime_put_sync(vop->dev);
return ret;
@@ -599,8 +616,6 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
vop_dsp_hold_valid_irq_disable(vop);
- disable_irq(vop->irq);
-
vop->is_enabled = false;
/*
@@ -609,8 +624,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
clk_disable(vop->dclk);
- clk_disable(vop->aclk);
- clk_disable(vop->hclk);
+ vop_core_clks_disable(vop);
pm_runtime_put(vop->dev);
mutex_unlock(&vop->vop_lock);
@@ -728,7 +742,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
return;
}
- obj = rockchip_fb_get_gem_obj(fb, 0);
+ obj = fb->obj[0];
rk_obj = to_rockchip_obj(obj);
actual_w = drm_rect_width(src) >> 16;
@@ -758,7 +772,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
int bpp = fb->format->cpp[1];
- uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+ uv_obj = fb->obj[1];
rk_uv_obj = to_rockchip_obj(uv_obj);
offset = (src->x1 >> 16) * bpp / hsub;
@@ -1178,6 +1192,18 @@ static irqreturn_t vop_isr(int irq, void *data)
int ret = IRQ_NONE;
/*
+ * The irq is shared with the iommu. If the runtime-pm state of the
+ * vop-device is disabled the irq has to be targeted at the iommu.
+ */
+ if (!pm_runtime_get_if_in_use(vop->dev))
+ return IRQ_NONE;
+
+ if (vop_core_clks_enable(vop)) {
+ DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
+ goto out;
+ }
+
+ /*
* interrupt register has interrupt status, enable and clear bits, we
* must hold irq_lock to avoid a race with enable/disable_vblank().
*/
@@ -1192,7 +1218,7 @@ static irqreturn_t vop_isr(int irq, void *data)
/* This is expected for vop iommu irqs, since the irq is shared */
if (!active_irqs)
- return IRQ_NONE;
+ goto out_disable;
if (active_irqs & DSP_HOLD_VALID_INTR) {
complete(&vop->dsp_hold_completion);
@@ -1218,6 +1244,10 @@ static irqreturn_t vop_isr(int irq, void *data)
DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
active_irqs);
+out_disable:
+ vop_core_clks_disable(vop);
+out:
+ pm_runtime_put(vop->dev);
return ret;
}
@@ -1596,9 +1626,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_disable_pm_runtime;
- /* IRQ is initially disabled; it gets enabled in power_on */
- disable_irq(vop->irq);
-
return 0;
err_disable_pm_runtime:
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index e67f4ea28c0e..b3f6f524b402 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -363,8 +363,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
of_property_read_u32(endpoint, "reg", &endpoint_id);
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
&lvds->panel, &lvds->bridge);
- if (!ret)
+ if (!ret) {
+ of_node_put(endpoint);
break;
+ }
}
if (!child_count) {
DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
@@ -446,14 +448,12 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_free_connector;
}
} else {
- lvds->bridge->encoder = encoder;
ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
goto err_free_encoder;
}
- encoder->bridge = lvds->bridge;
}
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 2a5b8466d806..35dc74883f83 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -298,8 +298,9 @@ static int savage_dma_init(drm_savage_private_t * dev_priv)
dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
(SAVAGE_DMA_PAGE_SIZE * 4);
- dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) *
- dev_priv->nr_dma_pages, GFP_KERNEL);
+ dev_priv->dma_pages = kmalloc_array(dev_priv->nr_dma_pages,
+ sizeof(drm_savage_dma_page_t),
+ GFP_KERNEL);
if (dev_priv->dma_pages == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
index 54acc117550c..6b943ea1c57d 100644
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -19,7 +19,9 @@ selftest(align64, igt_align64)
selftest(evict, igt_evict)
selftest(evict_range, igt_evict_range)
selftest(bottomup, igt_bottomup)
+selftest(lowest, igt_lowest)
selftest(topdown, igt_topdown)
+selftest(highest, igt_highest)
selftest(color, igt_color)
selftest(color_evict, igt_color_evict)
selftest(color_evict_range, igt_color_evict_range)
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 7cc935d7b7aa..fbed2c90fd51 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -389,7 +389,7 @@ static int __igt_reserve(unsigned int count, u64 size)
if (!order)
goto err;
- nodes = vzalloc(sizeof(*nodes) * count);
+ nodes = vzalloc(array_size(count, sizeof(*nodes)));
if (!nodes)
goto err_order;
@@ -579,7 +579,7 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
DRM_MM_BUG_ON(!size);
ret = -ENOMEM;
- nodes = vmalloc(count * sizeof(*nodes));
+ nodes = vmalloc(array_size(count, sizeof(*nodes)));
if (!nodes)
goto err;
@@ -889,7 +889,7 @@ static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
*/
ret = -ENOMEM;
- nodes = vzalloc(count * sizeof(*nodes));
+ nodes = vzalloc(array_size(count, sizeof(*nodes)));
if (!nodes)
goto err;
@@ -1046,7 +1046,7 @@ static int igt_align(void *ignored)
* meets our requirements.
*/
- nodes = vzalloc(max_count * sizeof(*nodes));
+ nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
if (!nodes)
goto err;
@@ -1416,7 +1416,7 @@ static int igt_evict(void *ignored)
*/
ret = -ENOMEM;
- nodes = vzalloc(size * sizeof(*nodes));
+ nodes = vzalloc(array_size(size, sizeof(*nodes)));
if (!nodes)
goto err;
@@ -1526,7 +1526,7 @@ static int igt_evict_range(void *ignored)
*/
ret = -ENOMEM;
- nodes = vzalloc(size * sizeof(*nodes));
+ nodes = vzalloc(array_size(size, sizeof(*nodes)));
if (!nodes)
goto err;
@@ -1627,11 +1627,11 @@ static int igt_topdown(void *ignored)
*/
ret = -ENOMEM;
- nodes = vzalloc(count * sizeof(*nodes));
+ nodes = vzalloc(array_size(count, sizeof(*nodes)));
if (!nodes)
goto err;
- bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
+ bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
GFP_KERNEL);
if (!bitmap)
goto err_nodes;
@@ -1741,11 +1741,11 @@ static int igt_bottomup(void *ignored)
*/
ret = -ENOMEM;
- nodes = vzalloc(count * sizeof(*nodes));
+ nodes = vzalloc(array_size(count, sizeof(*nodes)));
if (!nodes)
goto err;
- bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
+ bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
GFP_KERNEL);
if (!bitmap)
goto err_nodes;
@@ -1825,6 +1825,77 @@ err:
return ret;
}
+static int __igt_once(unsigned int mode)
+{
+ struct drm_mm mm;
+ struct drm_mm_node rsvd_lo, rsvd_hi, node;
+ int err;
+
+ drm_mm_init(&mm, 0, 7);
+
+ memset(&rsvd_lo, 0, sizeof(rsvd_lo));
+ rsvd_lo.start = 1;
+ rsvd_lo.size = 1;
+ err = drm_mm_reserve_node(&mm, &rsvd_lo);
+ if (err) {
+ pr_err("Could not reserve low node\n");
+ goto err;
+ }
+
+ memset(&rsvd_hi, 0, sizeof(rsvd_hi));
+ rsvd_hi.start = 5;
+ rsvd_hi.size = 1;
+ err = drm_mm_reserve_node(&mm, &rsvd_hi);
+ if (err) {
+ pr_err("Could not reserve low node\n");
+ goto err_lo;
+ }
+
+ if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
+ pr_err("Expected a hole after lo and high nodes!\n");
+ err = -EINVAL;
+ goto err_hi;
+ }
+
+ memset(&node, 0, sizeof(node));
+ err = drm_mm_insert_node_generic(&mm, &node,
+ 2, 0, 0,
+ mode | DRM_MM_INSERT_ONCE);
+ if (!err) {
+ pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
+ node.start);
+ err = -EINVAL;
+ goto err_node;
+ }
+
+ err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
+ if (err) {
+ pr_err("Could not insert the node into the available hole!\n");
+ err = -EINVAL;
+ goto err_hi;
+ }
+
+err_node:
+ drm_mm_remove_node(&node);
+err_hi:
+ drm_mm_remove_node(&rsvd_hi);
+err_lo:
+ drm_mm_remove_node(&rsvd_lo);
+err:
+ drm_mm_takedown(&mm);
+ return err;
+}
+
+static int igt_lowest(void *ignored)
+{
+ return __igt_once(DRM_MM_INSERT_LOW);
+}
+
+static int igt_highest(void *ignored)
+{
+ return __igt_once(DRM_MM_INSERT_HIGH);
+}
+
static void separate_adjacent_colors(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -2098,7 +2169,7 @@ static int igt_color_evict(void *ignored)
*/
ret = -ENOMEM;
- nodes = vzalloc(total_size * sizeof(*nodes));
+ nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
if (!nodes)
goto err;
@@ -2199,7 +2270,7 @@ static int igt_color_evict_range(void *ignored)
*/
ret = -ENOMEM;
- nodes = vzalloc(total_size * sizeof(*nodes));
+ nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
if (!nodes)
goto err;
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index c987c826daa3..0426d66660d1 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -2,7 +2,6 @@ config DRM_SHMOBILE
tristate "DRM Support for SH Mobile"
depends on DRM && ARM
depends on ARCH_SHMOBILE || COMPILE_TEST
- depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index e7738939a86d..40df8887fc17 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -21,8 +21,6 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
-#include <video/sh_mobile_meram.h>
-
#include "shmob_drm_backlight.h"
#include "shmob_drm_crtc.h"
#include "shmob_drm_drv.h"
@@ -47,20 +45,12 @@ static int shmob_drm_clk_on(struct shmob_drm_device *sdev)
if (ret < 0)
return ret;
}
-#if 0
- if (sdev->meram_dev && sdev->meram_dev->pdev)
- pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
-#endif
return 0;
}
static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
{
-#if 0
- if (sdev->meram_dev && sdev->meram_dev->pdev)
- pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
-#endif
if (sdev->clock)
clk_disable_unprepare(sdev->clock);
}
@@ -269,12 +259,6 @@ static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
if (!scrtc->started)
return;
- /* Disable the MERAM cache. */
- if (scrtc->cache) {
- sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
- scrtc->cache = NULL;
- }
-
/* Stop the LCDC. */
shmob_drm_crtc_start_stop(scrtc, false);
@@ -305,7 +289,6 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
{
struct drm_crtc *crtc = &scrtc->crtc;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct shmob_drm_device *sdev = crtc->dev->dev_private;
struct drm_gem_cma_object *gem;
unsigned int bpp;
@@ -321,11 +304,6 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ x * (bpp == 16 ? 2 : 1);
}
-
- if (scrtc->cache)
- sh_mobile_meram_cache_update(sdev->meram, scrtc->cache,
- scrtc->dma[0], scrtc->dma[1],
- &scrtc->dma[0], &scrtc->dma[1]);
}
static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
@@ -372,9 +350,7 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
{
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
struct shmob_drm_device *sdev = crtc->dev->dev_private;
- const struct sh_mobile_meram_cfg *mdata = sdev->pdata->meram;
const struct shmob_drm_format_info *format;
- void *cache;
format = shmob_drm_format_info(crtc->primary->fb->format->format);
if (format == NULL) {
@@ -386,24 +362,6 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
scrtc->format = format;
scrtc->line_size = crtc->primary->fb->pitches[0];
- if (sdev->meram) {
- /* Enable MERAM cache if configured. We need to de-init
- * configured ICBs before we can re-initialize them.
- */
- if (scrtc->cache) {
- sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
- scrtc->cache = NULL;
- }
-
- cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata,
- crtc->primary->fb->pitches[0],
- adjusted_mode->vdisplay,
- format->meram,
- &scrtc->line_size);
- if (!IS_ERR(cache))
- scrtc->cache = cache;
- }
-
shmob_drm_crtc_compute_base(scrtc, x, y);
return 0;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
index f152973df11c..c11f421737dc 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -28,7 +28,6 @@ struct shmob_drm_crtc {
int dpms;
const struct shmob_drm_format_info *format;
- void *cache;
unsigned long dma[2];
unsigned int line_size;
bool started;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
index 02ea315ba69a..088a6e55fa29 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
@@ -23,7 +23,6 @@
struct clk;
struct device;
struct drm_device;
-struct sh_mobile_meram_info;
struct shmob_drm_device {
struct device *dev;
@@ -31,7 +30,6 @@ struct shmob_drm_device {
void __iomem *mmio;
struct clk *clock;
- struct sh_mobile_meram_info *meram;
u32 lddckr;
u32 ldmt1r;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index d36919b14da7..447638581c08 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -18,8 +18,6 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <video/sh_mobile_meram.h>
-
#include "shmob_drm_crtc.h"
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
@@ -35,55 +33,46 @@ static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
.bpp = 16,
.yuv = false,
.lddfr = LDDFR_PKF_RGB16,
- .meram = SH_MOBILE_MERAM_PF_RGB,
}, {
.fourcc = DRM_FORMAT_RGB888,
.bpp = 24,
.yuv = false,
.lddfr = LDDFR_PKF_RGB24,
- .meram = SH_MOBILE_MERAM_PF_RGB,
}, {
.fourcc = DRM_FORMAT_ARGB8888,
.bpp = 32,
.yuv = false,
.lddfr = LDDFR_PKF_ARGB32,
- .meram = SH_MOBILE_MERAM_PF_RGB,
}, {
.fourcc = DRM_FORMAT_NV12,
.bpp = 12,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_420,
- .meram = SH_MOBILE_MERAM_PF_NV,
}, {
.fourcc = DRM_FORMAT_NV21,
.bpp = 12,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_420,
- .meram = SH_MOBILE_MERAM_PF_NV,
}, {
.fourcc = DRM_FORMAT_NV16,
.bpp = 16,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_422,
- .meram = SH_MOBILE_MERAM_PF_NV,
}, {
.fourcc = DRM_FORMAT_NV61,
.bpp = 16,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_422,
- .meram = SH_MOBILE_MERAM_PF_NV,
}, {
.fourcc = DRM_FORMAT_NV24,
.bpp = 24,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_444,
- .meram = SH_MOBILE_MERAM_PF_NV24,
}, {
.fourcc = DRM_FORMAT_NV42,
.bpp = 24,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_444,
- .meram = SH_MOBILE_MERAM_PF_NV24,
},
};
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
index 06d5b7caa026..753e2817dc2c 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
@@ -24,7 +24,6 @@ struct shmob_drm_format_info {
unsigned int bpp;
bool yuv;
u32 lddfr;
- unsigned int meram;
};
const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index 97f6e4a3eb0d..1d0359f713ca 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -17,8 +17,6 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <video/sh_mobile_meram.h>
-
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
#include "shmob_drm_plane.h"
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 9b2c47051b51..49813d34bdf0 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -211,7 +211,11 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
- struct drm_crtc *crtc = drm_plane->crtc;
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock(&drm_plane->mutex, NULL);
+ crtc = drm_plane->state->crtc;
+ drm_modeset_unlock(&drm_plane->mutex);
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&gdp->plane), gdp->regs);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index bfbf761f0c1d..d4e7d16a2514 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -1040,7 +1040,7 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
return 0;
}
-static int sun6i_dsi_runtime_resume(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
@@ -1069,7 +1069,7 @@ static int sun6i_dsi_runtime_resume(struct device *dev)
return 0;
}
-static int sun6i_dsi_runtime_suspend(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 00a5c9f32254..4f80100ff5f3 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -582,18 +582,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
return 0;
}
-static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
- unsigned long page)
-{
- return NULL;
-}
-
-static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
- unsigned long page,
- void *addr)
-{
-}
-
static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
{
return NULL;
@@ -634,8 +622,6 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.release = tegra_gem_prime_release,
.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
.end_cpu_access = tegra_gem_prime_end_cpu_access,
- .map_atomic = tegra_gem_prime_kmap_atomic,
- .unmap_atomic = tegra_gem_prime_kunmap_atomic,
.map = tegra_gem_prime_kmap,
.unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 1ee6855212a0..50a1d4216ce7 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -548,7 +548,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
epd->factored_stage_time);
- buf = kmalloc(fb->width * fb->height, GFP_KERNEL);
+ buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 06c94e3a5f15..6e2d1300b457 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -348,8 +348,9 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
if (use_static)
pages_to_free = static_buf;
else
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc_array(npages_to_free,
+ sizeof(struct page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_debug("Failed to allocate memory for pool free operation\n");
return 0;
@@ -547,7 +548,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
/* allocate array for page caching change */
- caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+ caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
+ GFP_KERNEL);
if (!caching_array) {
pr_debug("Unable to allocate table for new pages\n");
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index f63d99c302e4..3f14c1cc0789 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -463,8 +463,9 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
if (use_static)
pages_to_free = static_buf;
else
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc_array(npages_to_free,
+ sizeof(struct page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_debug("%s: Failed to allocate memory for pool free operation\n",
@@ -753,7 +754,8 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
/* allocate array for page caching change */
- caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+ caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
+ GFP_KERNEL);
if (!caching_array) {
pr_debug("%s: Unable to allocate table for new pages\n",
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index 0a20695eb120..556f62662aa9 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -29,7 +29,6 @@ struct udl_drm_dmabuf_attachment {
};
static int udl_attach_dma_buf(struct dma_buf *dmabuf,
- struct device *dev,
struct dma_buf_attachment *attach)
{
struct udl_drm_dmabuf_attachment *udl_attach;
@@ -158,27 +157,12 @@ static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
return NULL;
}
-static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
/* TODO */
}
-static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num,
- void *addr)
-{
- /* TODO */
-}
-
static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
struct vm_area_struct *vma)
{
@@ -193,9 +177,7 @@ static const struct dma_buf_ops udl_dmabuf_ops = {
.map_dma_buf = udl_map_dma_buf,
.unmap_dma_buf = udl_unmap_dma_buf,
.map = udl_dmabuf_kmap,
- .map_atomic = udl_dmabuf_kmap_atomic,
.unmap = udl_dmabuf_kunmap,
- .unmap_atomic = udl_dmabuf_kunmap_atomic,
.mmap = udl_dmabuf_mmap,
.release = drm_gem_dmabuf_release,
};
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 55c0cc309198..072582570a4f 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -16,6 +16,7 @@
#include <linux/usb.h>
#include <drm/drm_gem.h>
+#include <linux/mm_types.h>
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
@@ -136,7 +137,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int udl_gem_fault(struct vm_fault *vmf);
+vm_fault_t udl_gem_fault(struct vm_fault *vmf);
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 9a15cce22cce..d5a23295dd80 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -100,13 +100,12 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-int udl_gem_fault(struct vm_fault *vmf)
+vm_fault_t udl_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
struct page *page;
unsigned int page_offset;
- int ret = 0;
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
@@ -114,17 +113,7 @@ int udl_gem_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
page = obj->pages[page_offset];
- ret = vm_insert_page(vma, vmf->address, page);
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_page(vma, vmf->address, page);
}
int udl_gem_get_pages(struct udl_gem_object *obj)
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index b07bece9417d..808bc901f567 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -114,8 +114,8 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
v3d_invalidate_caches(v3d);
fence = v3d_fence_create(v3d, q);
- if (!fence)
- return fence;
+ if (IS_ERR(fence))
+ return NULL;
if (job->done_fence)
dma_fence_put(job->done_fence);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index c8650bbcbcb3..dcadf793ee80 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -862,7 +862,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
* is released.
*/
drm_atomic_set_fb_for_plane(plane->state, fb);
- plane->fb = fb;
vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
vc4_async_page_flip_complete);
@@ -1057,7 +1056,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
&vc4_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
- primary_plane->crtc = crtc;
vc4_crtc->channel = vc4_crtc->data->hvs_channel;
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
@@ -1093,7 +1091,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
if (!IS_ERR(cursor_plane)) {
cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc);
- cursor_plane->crtc = crtc;
crtc->cursor = cursor_plane;
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 71d44c357d35..8604fd2e7c5a 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -209,7 +209,7 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
{
if (vc4_state->dlist_count == vc4_state->dlist_size) {
u32 new_size = max(4u, vc4_state->dlist_count * 2);
- u32 *new_dlist = kmalloc(new_size * 4, GFP_KERNEL);
+ u32 *new_dlist = kmalloc_array(new_size, 4, GFP_KERNEL);
if (!new_dlist)
return;
@@ -467,12 +467,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
struct drm_framebuffer *fb = state->fb;
u32 ctl0_offset = vc4_state->dlist_count;
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
+ u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
int num_planes = drm_format_num_planes(format->drm);
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
u32 lbm_size, tiling;
unsigned long irqflags;
+ u32 hvs_format = format->hvs;
int ret, i;
ret = vc4_plane_setup_clipping_and_scaling(state);
@@ -512,7 +514,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
scl1 = vc4_get_scl_field(state, 0);
}
- switch (fb->modifier) {
+ switch (base_format_mod) {
case DRM_FORMAT_MOD_LINEAR:
tiling = SCALER_CTL0_TILING_LINEAR;
pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
@@ -535,6 +537,49 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
break;
}
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256: {
+ uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
+
+ /* Column-based NV12 or RGBA.
+ */
+ if (fb->format->num_planes > 1) {
+ if (hvs_format != HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE) {
+ DRM_DEBUG_KMS("SAND format only valid for NV12/21");
+ return -EINVAL;
+ }
+ hvs_format = HVS_PIXEL_FORMAT_H264;
+ } else {
+ if (base_format_mod == DRM_FORMAT_MOD_BROADCOM_SAND256) {
+ DRM_DEBUG_KMS("SAND256 format only valid for H.264");
+ return -EINVAL;
+ }
+ }
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ tiling = SCALER_CTL0_TILING_64B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tiling = SCALER_CTL0_TILING_128B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tiling = SCALER_CTL0_TILING_256B_OR_T;
+ break;
+ default:
+ break;
+ }
+
+ if (param > SCALER_TILE_HEIGHT_MASK) {
+ DRM_DEBUG_KMS("SAND height too large (%d)\n", param);
+ return -EINVAL;
+ }
+
+ pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
+ break;
+ }
+
default:
DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
(long long)fb->modifier);
@@ -544,8 +589,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
+ VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
(format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
- (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
+ (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
(vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
@@ -607,8 +653,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Pitch word 1/2 */
for (i = 1; i < num_planes; i++) {
- vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
+ if (hvs_format != HVS_PIXEL_FORMAT_H264) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(fb->pitches[i],
+ SCALER_SRC_PITCH));
+ } else {
+ vc4_dlist_write(vc4_state, pitch0);
+ }
}
/* Colorspace conversion words */
@@ -810,18 +861,21 @@ static int vc4_prepare_fb(struct drm_plane *plane,
struct dma_fence *fence;
int ret;
- if ((plane->state->fb == state->fb) || !state->fb)
+ if (!state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ fence = reservation_object_get_excl_rcu(bo->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+
+ if (plane->state->fb == state->fb)
+ return 0;
+
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
- fence = reservation_object_get_excl_rcu(bo->resv);
- drm_atomic_set_fence_for_plane(state, fence);
-
return 0;
}
@@ -866,13 +920,32 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
case DRM_FORMAT_BGR565:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
- return true;
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ return true;
+ default:
+ return false;
+ }
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ return true;
+ default:
+ return false;
+ }
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
- case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
default:
return (modifier == DRM_FORMAT_MOD_LINEAR);
}
@@ -900,6 +973,9 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
unsigned i;
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
+ DRM_FORMAT_MOD_BROADCOM_SAND128,
+ DRM_FORMAT_MOD_BROADCOM_SAND64,
+ DRM_FORMAT_MOD_BROADCOM_SAND256,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index d1fb6fec46eb..d6864fa4bd14 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -1031,6 +1031,12 @@ enum hvs_pixel_format {
#define SCALER_SRC_PITCH_MASK VC4_MASK(15, 0)
#define SCALER_SRC_PITCH_SHIFT 0
+/* PITCH0/1/2 fields for tiled (SAND). */
+#define SCALER_TILE_SKIP_0_MASK VC4_MASK(18, 16)
+#define SCALER_TILE_SKIP_0_SHIFT 16
+#define SCALER_TILE_HEIGHT_MASK VC4_MASK(15, 0)
+#define SCALER_TILE_HEIGHT_SHIFT 0
+
/* PITCH0 fields for T-tiled. */
#define SCALER_PITCH0_TILE_WIDTH_L_MASK VC4_MASK(22, 16)
#define SCALER_PITCH0_TILE_WIDTH_L_SHIFT 16
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 2524ff116f00..c64a85950c82 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -61,13 +61,13 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
kfree(vgem_obj);
}
-static int vgem_gem_fault(struct vm_fault *vmf)
+static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long vaddr = vmf->address;
- int ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
loff_t num_pages;
pgoff_t page_offset;
page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
@@ -77,7 +77,6 @@ static int vgem_gem_fault(struct vm_fault *vmf)
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
- ret = -ENOENT;
mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index d6e84a589ef1..345bda4494e1 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -235,7 +235,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
first_pfn + 1;
- vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
+ vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
if (NULL == vsg->pages)
return -ENOMEM;
ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index a5edd86603d9..ff9933e79416 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -28,6 +28,7 @@
#include "virtgpu_drv.h"
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#define XRES_MIN 32
#define YRES_MIN 32
@@ -48,16 +49,6 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
-static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct virtio_gpu_framebuffer *virtio_gpu_fb
- = to_virtio_gpu_framebuffer(fb);
-
- drm_gem_object_put_unlocked(virtio_gpu_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(virtio_gpu_fb);
-}
-
static int
virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@@ -71,20 +62,9 @@ virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
}
-static int
-virtio_gpu_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct virtio_gpu_framebuffer *virtio_gpu_fb =
- to_virtio_gpu_framebuffer(fb);
-
- return drm_gem_handle_create(file_priv, virtio_gpu_fb->obj, handle);
-}
-
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
- .create_handle = virtio_gpu_framebuffer_create_handle,
- .destroy = virtio_gpu_user_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
.dirty = virtio_gpu_framebuffer_surface_dirty,
};
@@ -97,7 +77,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
int ret;
struct virtio_gpu_object *bo;
- vgfb->obj = obj;
+ vgfb->base.obj[0] = obj;
bo = gem_to_virtio_gpu_obj(obj);
@@ -105,7 +85,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
- vgfb->obj = NULL;
+ vgfb->base.obj[0] = NULL;
return ret;
}
@@ -302,8 +282,6 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_crtc_init_with_planes(dev, crtc, primary, cursor,
&virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
- primary->crtc = crtc;
- cursor->crtc = crtc;
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d25c8ca224aa..65605e207bbe 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -124,7 +124,6 @@ struct virtio_gpu_output {
struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
- struct drm_gem_object *obj;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
uint32_t hw_res_handle;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 8af69ab58b89..a121b1c79522 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -46,7 +46,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
int bpp = fb->base.format->cpp[0];
int x2, y2;
unsigned long flags;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
if ((width <= 0) ||
(x + width > fb->base.width) ||
@@ -121,7 +121,7 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
unsigned int num_clips)
{
struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
struct drm_clip_rect norect;
struct drm_clip_rect *clips_ptr;
int left, right, top, bottom;
@@ -305,8 +305,8 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&vgfbdev->helper);
- if (vgfb->obj)
- vgfb->obj = NULL;
+ if (vgfb->base.obj[0])
+ vgfb->base.obj[0] = NULL;
drm_fb_helper_fini(&vgfbdev->helper);
drm_framebuffer_cleanup(&vgfb->base);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 71ba455af915..dc5b5b2b7aab 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -154,7 +154,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
if (bo->dumb) {
virtio_gpu_cmd_transfer_to_host_2d
@@ -208,7 +208,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
handle = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 97f37c3c16f2..09cc721160c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1278,8 +1278,6 @@ static void vmw_master_drop(struct drm_device *dev,
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
-
- vmw_fb_refresh(dev_priv);
}
/**
@@ -1483,7 +1481,6 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_kms_resume(dev);
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
- vmw_fb_refresh(dev_priv);
return -EBUSY;
}
@@ -1523,8 +1520,6 @@ static int vmw_pm_restore(struct device *kdev)
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
- vmw_fb_refresh(dev_priv);
-
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index f34f368c1a2e..5fcbe1620d50 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -910,7 +910,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
int vmw_fb_close(struct vmw_private *dev_priv);
int vmw_fb_off(struct vmw_private *vmw_priv);
int vmw_fb_on(struct vmw_private *vmw_priv);
-void vmw_fb_refresh(struct vmw_private *vmw_priv);
/**
* Kernel modesetting - vmwgfx_kms.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ba0cdb743c3e..9b7e0aca5f84 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -439,38 +439,13 @@ static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
static int vmwgfx_set_config_internal(struct drm_mode_set *set)
{
struct drm_crtc *crtc = set->crtc;
- struct drm_framebuffer *fb;
- struct drm_crtc *tmp;
- struct drm_device *dev = set->crtc->dev;
struct drm_modeset_acquire_ctx ctx;
int ret;
drm_modeset_acquire_init(&ctx, 0);
restart:
- /*
- * NOTE: ->set_config can also disable other crtcs (if we steal all
- * connectors from it), hence we need to refcount the fbs across all
- * crtcs. Atomic modeset will have saner semantics ...
- */
- drm_for_each_crtc(tmp, dev)
- tmp->primary->old_fb = tmp->primary->fb;
-
- fb = set->fb;
-
ret = crtc->funcs->set_config(set, &ctx);
- if (ret == 0) {
- crtc->primary->crtc = crtc;
- crtc->primary->fb = fb;
- }
-
- drm_for_each_crtc(tmp, dev) {
- if (tmp->primary->fb)
- drm_framebuffer_get(tmp->primary->fb);
- if (tmp->primary->old_fb)
- drm_framebuffer_put(tmp->primary->old_fb);
- tmp->primary->old_fb = NULL;
- }
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
@@ -866,21 +841,13 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = true;
spin_unlock_irqrestore(&par->dirty.lock, flags);
-
- return 0;
-}
-/**
- * vmw_fb_refresh - Refresh fb display
- *
- * @vmw_priv: Pointer to device private
- *
- * Call into kms to show the fbdev display(s).
- */
-void vmw_fb_refresh(struct vmw_private *vmw_priv)
-{
- if (!vmw_priv->fb_info)
- return;
+ /*
+ * Need to reschedule a dirty update, because otherwise that's
+ * only done in dirty_mark() if the previous coalesced
+ * dirty region was empty.
+ */
+ schedule_delayed_work(&par->local_work, 0);
- vmw_fb_set_par(vmw_priv->fb_info);
+ return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 01f2dc9e6f52..ef96ba7432ad 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1536,9 +1536,13 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
unsigned long requested_bb_mem = 0;
if (dev_priv->active_display_unit == vmw_du_screen_target) {
- if (crtc->primary->fb) {
- int cpp = crtc->primary->fb->pitches[0] /
- crtc->primary->fb->width;
+ struct drm_plane *plane = crtc->primary;
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (plane_state && plane_state->fb) {
+ int cpp = plane_state->fb->format->cpp[0];
requested_bb_mem += crtc->mode.hdisplay * cpp *
crtc->mode.vdisplay;
@@ -2322,9 +2326,10 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
} else {
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
+ struct drm_plane *plane = crtc->primary;
+
+ if (plane->state->fb == &framebuffer->base)
+ units[num_units++] = vmw_crtc_to_du(crtc);
}
}
@@ -2806,6 +2811,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
{
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_plane *plane = crtc->primary;
struct vmw_framebuffer *vfb;
mutex_lock(&dev_priv->global_kms_state_mutex);
@@ -2813,7 +2819,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
if (!du->is_implicit)
goto out_unlock;
- vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
+ vfb = vmw_framebuffer_to_vfb(plane->state->fb);
WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
dev_priv->implicit_fb != vfb);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index cdff99211602..21d746bdc922 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -329,8 +329,6 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
struct rpc_channel channel;
char *msg, *reply = NULL;
size_t reply_len = 0;
- int ret = 0;
-
if (!vmw_msg_enabled)
return -ENODEV;
@@ -344,15 +342,14 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
return -ENOMEM;
}
- if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
- vmw_send_msg(&channel, msg) ||
- vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
- vmw_close_channel(&channel)) {
- DRM_ERROR("Failed to get %s", guest_info_param);
+ if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
+ goto out_open;
- ret = -EINVAL;
- }
+ if (vmw_send_msg(&channel, msg) ||
+ vmw_recv_msg(&channel, (void *) &reply, &reply_len))
+ goto out_msg;
+ vmw_close_channel(&channel);
if (buffer && reply && reply_len > 0) {
/* Remove reply code, which are the first 2 characters of
* the reply
@@ -369,7 +366,17 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
kfree(reply);
kfree(msg);
- return ret;
+ return 0;
+
+out_msg:
+ vmw_close_channel(&channel);
+ kfree(reply);
+out_open:
+ *length = 0;
+ kfree(msg);
+ DRM_ERROR("Failed to get %s", guest_info_param);
+
+ return -EINVAL;
}
@@ -400,15 +407,22 @@ int vmw_host_log(const char *log)
return -ENOMEM;
}
- if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
- vmw_send_msg(&channel, msg) ||
- vmw_close_channel(&channel)) {
- DRM_ERROR("Failed to send log\n");
+ if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
+ goto out_open;
- ret = -EINVAL;
- }
+ if (vmw_send_msg(&channel, msg))
+ goto out_msg;
+ vmw_close_channel(&channel);
kfree(msg);
- return ret;
+ return 0;
+
+out_msg:
+ vmw_close_channel(&channel);
+out_open:
+ kfree(msg);
+ DRM_ERROR("Failed to send log\n");
+
+ return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 557a033fb610..8545488aa0cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -135,17 +135,24 @@
#else
-/* In the 32-bit version of this macro, we use "m" because there is no
- * more register left for bp
+/*
+ * In the 32-bit version of this macro, we store bp in a memory location
+ * because we've ran out of registers.
+ * Now we can't reference that memory location while we've modified
+ * %esp or %ebp, so we first push it on the stack, just before we push
+ * %ebp, and then when we need it we read it from the stack where we
+ * just pushed it.
*/
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
port_num, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
- asm volatile ("push %%ebp;" \
- "mov %12, %%ebp;" \
+ asm volatile ("push %12;" \
+ "push %%ebp;" \
+ "mov 0x04(%%esp), %%ebp;" \
"rep outsb;" \
- "pop %%ebp;" : \
+ "pop %%ebp;" \
+ "add $0x04, %%esp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
@@ -167,10 +174,12 @@
port_num, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
- asm volatile ("push %%ebp;" \
- "mov %12, %%ebp;" \
+ asm volatile ("push %12;" \
+ "push %%ebp;" \
+ "mov 0x04(%%esp), %%ebp;" \
"rep insb;" \
- "pop %%ebp" : \
+ "pop %%ebp;" \
+ "add $0x04, %%esp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 0d42a46521fc..373bc6da2f84 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -40,7 +40,6 @@
*/
static int vmw_prime_map_attach(struct dma_buf *dma_buf,
- struct device *target_dev,
struct dma_buf_attachment *attach)
{
return -ENOSYS;
@@ -72,17 +71,6 @@ static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
}
-static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-
-static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
unsigned long page_num)
{
@@ -109,9 +97,7 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = {
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
.map = vmw_prime_dmabuf_kmap,
- .map_atomic = vmw_prime_dmabuf_kmap_atomic,
.unmap = vmw_prime_dmabuf_kunmap,
- .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
.mmap = vmw_prime_dmabuf_mmap,
.vmap = vmw_prime_dmabuf_vmap,
.vunmap = vmw_prime_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 648f8127f65a..9798640cbfcd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -482,6 +482,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
return ret;
}
+ vps->dmabuf_size = size;
+
/*
* TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped.
@@ -525,8 +527,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
*/
if (ret != 0)
DRM_ERROR("Failed to update screen.\n");
-
- crtc->primary->fb = plane->state->fb;
} else {
/*
* When disabling a plane, CRTC and FB should always be NULL
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 67331f01ef32..152e96cb1c01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -414,6 +414,7 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct drm_plane_state *plane_state = crtc->primary->state;
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
struct vmw_framebuffer *vfb;
@@ -422,7 +423,7 @@ static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- fb = crtc->primary->fb;
+ fb = plane_state->fb;
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
@@ -1285,8 +1286,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1, 1, NULL, crtc);
if (ret)
DRM_ERROR("Failed to update STDU.\n");
-
- crtc->primary->fb = plane->state->fb;
} else {
crtc = old_state->crtc;
stdu = vmw_crtc_to_stdu(crtc);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 3345ac71b391..6b6d5ab82ec3 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -623,7 +623,7 @@ static int displback_initwait(struct xen_drm_front_info *front_info)
if (ret < 0)
return ret;
- DRM_INFO("Have %d conector(s)\n", cfg->num_connectors);
+ DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
/* Create event channels for all connectors and publish */
ret = xen_drm_front_evtchnl_create_all(front_info);
if (ret < 0)
@@ -737,9 +737,8 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
* is not correct: to fix this call of_dma_configure() with a NULL
* node to set default DMA ops.
*/
- dev->bus->force_dma = true;
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- ret = of_dma_configure(dev, NULL);
+ ret = of_dma_configure(dev, NULL, true);
if (ret < 0) {
DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
return ret;
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index 2c2479b571ae..5693b4a4b02b 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -126,12 +126,12 @@ struct xen_drm_front_drm_info {
static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
{
- return (u64)fb;
+ return (uintptr_t)fb;
}
static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
{
- return (u64)gem_obj;
+ return (uintptr_t)gem_obj;
}
int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
index 8099cb343ae3..d333b67cc1a0 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
@@ -122,7 +122,7 @@ static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
}
#define xen_page_to_vaddr(page) \
- ((phys_addr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
+ ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
static int backend_unmap(struct xen_drm_front_shbuf *buf)
{
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 88a3558b7916..815bdb42e3f0 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -314,6 +314,11 @@ static int host1x_device_match(struct device *dev, struct device_driver *drv)
return strcmp(dev_name(dev), drv->name) == 0;
}
+static int host1x_dma_configure(struct device *dev)
+{
+ return of_dma_configure(dev, dev->of_node, true);
+}
+
static const struct dev_pm_ops host1x_device_pm_ops = {
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
@@ -326,8 +331,8 @@ static const struct dev_pm_ops host1x_device_pm_ops = {
struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
+ .dma_configure = host1x_dma_configure,
.pm = &host1x_device_pm_ops,
- .force_dma = true,
};
static void __host1x_device_del(struct host1x_device *device)
@@ -416,7 +421,7 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
- of_dma_configure(&device->dev, host1x->dev->of_node);
+ of_dma_configure(&device->dev, host1x->dev->of_node, true);
err = host1x_device_parse_dt(device, driver);
if (err < 0) {
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 29437eabe095..b677e5d524e6 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -6,7 +6,7 @@ config VGA_ARB
Some "legacy" VGA devices implemented on PCI typically have the same
hard-decoded addresses as they did on ISA. When multiple PCI devices
are accessed at same time they need some kind of coordination. Please
- see Documentation/vgaarbiter.txt for more details. Select this to
+ see Documentation/gpu/vgaarbiter.rst for more details. Select this to
enable VGA arbiter.
config VGA_ARB_MAX_GPUS
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1c5e74cb9279..c61b04555779 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1,6 +1,6 @@
/*
* vgaarb.c: Implements the VGA arbitration. For details refer to
- * Documentation/vgaarbiter.txt
+ * Documentation/gpu/vgaarbiter.rst
*
*
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>